diff options
562 files changed, 5578 insertions, 2811 deletions
diff --git a/.gitignore b/.gitignore index 8faa6c02b39e..5d56a3fd0de6 100644 --- a/.gitignore +++ b/.gitignore | |||
@@ -28,6 +28,7 @@ modules.builtin | |||
28 | *.gz | 28 | *.gz |
29 | *.bz2 | 29 | *.bz2 |
30 | *.lzma | 30 | *.lzma |
31 | *.xz | ||
31 | *.lzo | 32 | *.lzo |
32 | *.patch | 33 | *.patch |
33 | *.gcno | 34 | *.gcno |
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX index fe5c099b8fc8..4edd78dfb362 100644 --- a/Documentation/networking/00-INDEX +++ b/Documentation/networking/00-INDEX | |||
@@ -40,8 +40,6 @@ decnet.txt | |||
40 | - info on using the DECnet networking layer in Linux. | 40 | - info on using the DECnet networking layer in Linux. |
41 | depca.txt | 41 | depca.txt |
42 | - the Digital DEPCA/EtherWORKS DE1?? and DE2?? LANCE Ethernet driver | 42 | - the Digital DEPCA/EtherWORKS DE1?? and DE2?? LANCE Ethernet driver |
43 | dgrs.txt | ||
44 | - the Digi International RightSwitch SE-X Ethernet driver | ||
45 | dmfe.txt | 43 | dmfe.txt |
46 | - info on the Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver. | 44 | - info on the Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver. |
47 | e100.txt | 45 | e100.txt |
@@ -50,8 +48,6 @@ e1000.txt | |||
50 | - info on Intel's E1000 line of gigabit ethernet boards | 48 | - info on Intel's E1000 line of gigabit ethernet boards |
51 | eql.txt | 49 | eql.txt |
52 | - serial IP load balancing | 50 | - serial IP load balancing |
53 | ethertap.txt | ||
54 | - the Ethertap user space packet reception and transmission driver | ||
55 | ewrk3.txt | 51 | ewrk3.txt |
56 | - the Digital EtherWORKS 3 DE203/4/5 Ethernet driver | 52 | - the Digital EtherWORKS 3 DE203/4/5 Ethernet driver |
57 | filter.txt | 53 | filter.txt |
@@ -104,8 +100,6 @@ tuntap.txt | |||
104 | - TUN/TAP device driver, allowing user space Rx/Tx of packets. | 100 | - TUN/TAP device driver, allowing user space Rx/Tx of packets. |
105 | vortex.txt | 101 | vortex.txt |
106 | - info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards. | 102 | - info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards. |
107 | wavelan.txt | ||
108 | - AT&T GIS (nee NCR) WaveLAN card: An Ethernet-like radio transceiver | ||
109 | x25.txt | 103 | x25.txt |
110 | - general info on X.25 development. | 104 | - general info on X.25 development. |
111 | x25-iface.txt | 105 | x25-iface.txt |
diff --git a/Documentation/networking/dns_resolver.txt b/Documentation/networking/dns_resolver.txt index aefd1e681804..04ca06325b08 100644 --- a/Documentation/networking/dns_resolver.txt +++ b/Documentation/networking/dns_resolver.txt | |||
@@ -61,7 +61,6 @@ before the more general line given above as the first match is the one taken. | |||
61 | create dns_resolver foo:* * /usr/sbin/dns.foo %k | 61 | create dns_resolver foo:* * /usr/sbin/dns.foo %k |
62 | 62 | ||
63 | 63 | ||
64 | |||
65 | ===== | 64 | ===== |
66 | USAGE | 65 | USAGE |
67 | ===== | 66 | ===== |
@@ -104,6 +103,14 @@ implemented in the module can be called after doing: | |||
104 | returned also. | 103 | returned also. |
105 | 104 | ||
106 | 105 | ||
106 | =============================== | ||
107 | READING DNS KEYS FROM USERSPACE | ||
108 | =============================== | ||
109 | |||
110 | Keys of dns_resolver type can be read from userspace using keyctl_read() or | ||
111 | "keyctl read/print/pipe". | ||
112 | |||
113 | |||
107 | ========= | 114 | ========= |
108 | MECHANISM | 115 | MECHANISM |
109 | ========= | 116 | ========= |
diff --git a/MAINTAINERS b/MAINTAINERS index 6f99e1260db8..f1bc3dc6b369 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1010,6 +1010,15 @@ L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) | |||
1010 | S: Maintained | 1010 | S: Maintained |
1011 | F: arch/arm/mach-s5p*/ | 1011 | F: arch/arm/mach-s5p*/ |
1012 | 1012 | ||
1013 | ARM/SAMSUNG MOBILE MACHINE SUPPORT | ||
1014 | M: Kyungmin Park <kyungmin.park@samsung.com> | ||
1015 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | ||
1016 | S: Maintained | ||
1017 | F: arch/arm/mach-s5pv210/mach-aquila.c | ||
1018 | F: arch/arm/mach-s5pv210/mach-goni.c | ||
1019 | F: arch/arm/mach-exynos4/mach-universal_c210.c | ||
1020 | F: arch/arm/mach-exynos4/mach-nuri.c | ||
1021 | |||
1013 | ARM/SAMSUNG S5P SERIES FIMC SUPPORT | 1022 | ARM/SAMSUNG S5P SERIES FIMC SUPPORT |
1014 | M: Kyungmin Park <kyungmin.park@samsung.com> | 1023 | M: Kyungmin Park <kyungmin.park@samsung.com> |
1015 | M: Sylwester Nawrocki <s.nawrocki@samsung.com> | 1024 | M: Sylwester Nawrocki <s.nawrocki@samsung.com> |
@@ -1467,6 +1476,7 @@ F: include/net/bluetooth/ | |||
1467 | 1476 | ||
1468 | BONDING DRIVER | 1477 | BONDING DRIVER |
1469 | M: Jay Vosburgh <fubar@us.ibm.com> | 1478 | M: Jay Vosburgh <fubar@us.ibm.com> |
1479 | M: Andy Gospodarek <andy@greyhouse.net> | ||
1470 | L: netdev@vger.kernel.org | 1480 | L: netdev@vger.kernel.org |
1471 | W: http://sourceforge.net/projects/bonding/ | 1481 | W: http://sourceforge.net/projects/bonding/ |
1472 | S: Supported | 1482 | S: Supported |
@@ -1692,6 +1702,13 @@ M: Andy Whitcroft <apw@canonical.com> | |||
1692 | S: Supported | 1702 | S: Supported |
1693 | F: scripts/checkpatch.pl | 1703 | F: scripts/checkpatch.pl |
1694 | 1704 | ||
1705 | CHINESE DOCUMENTATION | ||
1706 | M: Harry Wei <harryxiyou@gmail.com> | ||
1707 | L: xiyoulinuxkernelgroup@googlegroups.com | ||
1708 | L: linux-kernel@zh-kernel.org (moderated for non-subscribers) | ||
1709 | S: Maintained | ||
1710 | F: Documentation/zh_CN/ | ||
1711 | |||
1695 | CISCO VIC ETHERNET NIC DRIVER | 1712 | CISCO VIC ETHERNET NIC DRIVER |
1696 | M: Vasanthy Kolluri <vkolluri@cisco.com> | 1713 | M: Vasanthy Kolluri <vkolluri@cisco.com> |
1697 | M: Roopa Prabhu <roprabhu@cisco.com> | 1714 | M: Roopa Prabhu <roprabhu@cisco.com> |
@@ -2026,7 +2043,7 @@ F: Documentation/scsi/dc395x.txt | |||
2026 | F: drivers/scsi/dc395x.* | 2043 | F: drivers/scsi/dc395x.* |
2027 | 2044 | ||
2028 | DCCP PROTOCOL | 2045 | DCCP PROTOCOL |
2029 | M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> | 2046 | M: Gerrit Renker <gerrit@erg.abdn.ac.uk> |
2030 | L: dccp@vger.kernel.org | 2047 | L: dccp@vger.kernel.org |
2031 | W: http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp | 2048 | W: http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp |
2032 | S: Maintained | 2049 | S: Maintained |
@@ -3512,7 +3529,7 @@ F: drivers/hwmon/jc42.c | |||
3512 | F: Documentation/hwmon/jc42 | 3529 | F: Documentation/hwmon/jc42 |
3513 | 3530 | ||
3514 | JFS FILESYSTEM | 3531 | JFS FILESYSTEM |
3515 | M: Dave Kleikamp <shaggy@linux.vnet.ibm.com> | 3532 | M: Dave Kleikamp <shaggy@kernel.org> |
3516 | L: jfs-discussion@lists.sourceforge.net | 3533 | L: jfs-discussion@lists.sourceforge.net |
3517 | W: http://jfs.sourceforge.net/ | 3534 | W: http://jfs.sourceforge.net/ |
3518 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git | 3535 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git |
@@ -4275,10 +4292,7 @@ S: Maintained | |||
4275 | F: net/sched/sch_netem.c | 4292 | F: net/sched/sch_netem.c |
4276 | 4293 | ||
4277 | NETERION 10GbE DRIVERS (s2io/vxge) | 4294 | NETERION 10GbE DRIVERS (s2io/vxge) |
4278 | M: Ramkrishna Vepa <ramkrishna.vepa@exar.com> | 4295 | M: Jon Mason <jdmason@kudzu.us> |
4279 | M: Sivakumar Subramani <sivakumar.subramani@exar.com> | ||
4280 | M: Sreenivasa Honnur <sreenivasa.honnur@exar.com> | ||
4281 | M: Jon Mason <jon.mason@exar.com> | ||
4282 | L: netdev@vger.kernel.org | 4296 | L: netdev@vger.kernel.org |
4283 | W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/Linux?Anonymous | 4297 | W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/Linux?Anonymous |
4284 | W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/X3100Linux?Anonymous | 4298 | W: http://trac.neterion.com/cgi-bin/trac.cgi/wiki/X3100Linux?Anonymous |
@@ -5164,6 +5178,7 @@ F: drivers/char/random.c | |||
5164 | 5178 | ||
5165 | RAPIDIO SUBSYSTEM | 5179 | RAPIDIO SUBSYSTEM |
5166 | M: Matt Porter <mporter@kernel.crashing.org> | 5180 | M: Matt Porter <mporter@kernel.crashing.org> |
5181 | M: Alexandre Bounine <alexandre.bounine@idt.com> | ||
5167 | S: Maintained | 5182 | S: Maintained |
5168 | F: drivers/rapidio/ | 5183 | F: drivers/rapidio/ |
5169 | 5184 | ||
@@ -5266,7 +5281,7 @@ S: Maintained | |||
5266 | F: drivers/net/wireless/rtl818x/rtl8180/ | 5281 | F: drivers/net/wireless/rtl818x/rtl8180/ |
5267 | 5282 | ||
5268 | RTL8187 WIRELESS DRIVER | 5283 | RTL8187 WIRELESS DRIVER |
5269 | M: Herton Ronaldo Krzesinski <herton@mandriva.com.br> | 5284 | M: Herton Ronaldo Krzesinski <herton@canonical.com> |
5270 | M: Hin-Tak Leung <htl10@users.sourceforge.net> | 5285 | M: Hin-Tak Leung <htl10@users.sourceforge.net> |
5271 | M: Larry Finger <Larry.Finger@lwfinger.net> | 5286 | M: Larry Finger <Larry.Finger@lwfinger.net> |
5272 | L: linux-wireless@vger.kernel.org | 5287 | L: linux-wireless@vger.kernel.org |
@@ -6104,7 +6119,7 @@ S: Maintained | |||
6104 | F: security/tomoyo/ | 6119 | F: security/tomoyo/ |
6105 | 6120 | ||
6106 | TOPSTAR LAPTOP EXTRAS DRIVER | 6121 | TOPSTAR LAPTOP EXTRAS DRIVER |
6107 | M: Herton Ronaldo Krzesinski <herton@mandriva.com.br> | 6122 | M: Herton Ronaldo Krzesinski <herton@canonical.com> |
6108 | L: platform-driver-x86@vger.kernel.org | 6123 | L: platform-driver-x86@vger.kernel.org |
6109 | S: Maintained | 6124 | S: Maintained |
6110 | F: drivers/platform/x86/topstar-laptop.c | 6125 | F: drivers/platform/x86/topstar-laptop.c |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 2 | 1 | VERSION = 2 |
2 | PATCHLEVEL = 6 | 2 | PATCHLEVEL = 6 |
3 | SUBLEVEL = 38 | 3 | SUBLEVEL = 38 |
4 | EXTRAVERSION = -rc6 | 4 | EXTRAVERSION = |
5 | NAME = Flesh-Eating Bats with Fangs | 5 | NAME = Flesh-Eating Bats with Fangs |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 47f63d480141..cc31bec2e316 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig | |||
@@ -11,6 +11,7 @@ config ALPHA | |||
11 | select HAVE_GENERIC_HARDIRQS | 11 | select HAVE_GENERIC_HARDIRQS |
12 | select GENERIC_IRQ_PROBE | 12 | select GENERIC_IRQ_PROBE |
13 | select AUTO_IRQ_AFFINITY if SMP | 13 | select AUTO_IRQ_AFFINITY if SMP |
14 | select GENERIC_HARDIRQS_NO_DEPRECATED | ||
14 | help | 15 | help |
15 | The Alpha is a 64-bit general-purpose processor designed and | 16 | The Alpha is a 64-bit general-purpose processor designed and |
16 | marketed by the Digital Equipment Corporation of blessed memory, | 17 | marketed by the Digital Equipment Corporation of blessed memory, |
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index 9ab234f48dd8..a19d60082299 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c | |||
@@ -44,11 +44,16 @@ static char irq_user_affinity[NR_IRQS]; | |||
44 | 44 | ||
45 | int irq_select_affinity(unsigned int irq) | 45 | int irq_select_affinity(unsigned int irq) |
46 | { | 46 | { |
47 | struct irq_desc *desc = irq_to_desc[irq]; | 47 | struct irq_data *data = irq_get_irq_data(irq); |
48 | struct irq_chip *chip; | ||
48 | static int last_cpu; | 49 | static int last_cpu; |
49 | int cpu = last_cpu + 1; | 50 | int cpu = last_cpu + 1; |
50 | 51 | ||
51 | if (!desc || !get_irq_desc_chip(desc)->set_affinity || irq_user_affinity[irq]) | 52 | if (!data) |
53 | return 1; | ||
54 | chip = irq_data_get_irq_chip(data); | ||
55 | |||
56 | if (!chip->irq_set_affinity || irq_user_affinity[irq]) | ||
52 | return 1; | 57 | return 1; |
53 | 58 | ||
54 | while (!cpu_possible(cpu) || | 59 | while (!cpu_possible(cpu) || |
@@ -56,8 +61,8 @@ int irq_select_affinity(unsigned int irq) | |||
56 | cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); | 61 | cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); |
57 | last_cpu = cpu; | 62 | last_cpu = cpu; |
58 | 63 | ||
59 | cpumask_copy(desc->affinity, cpumask_of(cpu)); | 64 | cpumask_copy(data->affinity, cpumask_of(cpu)); |
60 | get_irq_desc_chip(desc)->set_affinity(irq, cpumask_of(cpu)); | 65 | chip->irq_set_affinity(data, cpumask_of(cpu), false); |
61 | return 0; | 66 | return 0; |
62 | } | 67 | } |
63 | #endif /* CONFIG_SMP */ | 68 | #endif /* CONFIG_SMP */ |
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c index 2d0679b60939..411ca11d0a18 100644 --- a/arch/alpha/kernel/irq_alpha.c +++ b/arch/alpha/kernel/irq_alpha.c | |||
@@ -228,14 +228,9 @@ struct irqaction timer_irqaction = { | |||
228 | void __init | 228 | void __init |
229 | init_rtc_irq(void) | 229 | init_rtc_irq(void) |
230 | { | 230 | { |
231 | struct irq_desc *desc = irq_to_desc(RTC_IRQ); | 231 | set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip, |
232 | 232 | handle_simple_irq, "RTC"); | |
233 | if (desc) { | 233 | setup_irq(RTC_IRQ, &timer_irqaction); |
234 | desc->status |= IRQ_DISABLED; | ||
235 | set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip, | ||
236 | handle_simple_irq, "RTC"); | ||
237 | setup_irq(RTC_IRQ, &timer_irqaction); | ||
238 | } | ||
239 | } | 234 | } |
240 | 235 | ||
241 | /* Dummy irqactions. */ | 236 | /* Dummy irqactions. */ |
diff --git a/arch/alpha/kernel/irq_i8259.c b/arch/alpha/kernel/irq_i8259.c index 956ea0ed1694..c7cc9813e45f 100644 --- a/arch/alpha/kernel/irq_i8259.c +++ b/arch/alpha/kernel/irq_i8259.c | |||
@@ -33,10 +33,10 @@ i8259_update_irq_hw(unsigned int irq, unsigned long mask) | |||
33 | } | 33 | } |
34 | 34 | ||
35 | inline void | 35 | inline void |
36 | i8259a_enable_irq(unsigned int irq) | 36 | i8259a_enable_irq(struct irq_data *d) |
37 | { | 37 | { |
38 | spin_lock(&i8259_irq_lock); | 38 | spin_lock(&i8259_irq_lock); |
39 | i8259_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq)); | 39 | i8259_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq)); |
40 | spin_unlock(&i8259_irq_lock); | 40 | spin_unlock(&i8259_irq_lock); |
41 | } | 41 | } |
42 | 42 | ||
@@ -47,16 +47,18 @@ __i8259a_disable_irq(unsigned int irq) | |||
47 | } | 47 | } |
48 | 48 | ||
49 | void | 49 | void |
50 | i8259a_disable_irq(unsigned int irq) | 50 | i8259a_disable_irq(struct irq_data *d) |
51 | { | 51 | { |
52 | spin_lock(&i8259_irq_lock); | 52 | spin_lock(&i8259_irq_lock); |
53 | __i8259a_disable_irq(irq); | 53 | __i8259a_disable_irq(d->irq); |
54 | spin_unlock(&i8259_irq_lock); | 54 | spin_unlock(&i8259_irq_lock); |
55 | } | 55 | } |
56 | 56 | ||
57 | void | 57 | void |
58 | i8259a_mask_and_ack_irq(unsigned int irq) | 58 | i8259a_mask_and_ack_irq(struct irq_data *d) |
59 | { | 59 | { |
60 | unsigned int irq = d->irq; | ||
61 | |||
60 | spin_lock(&i8259_irq_lock); | 62 | spin_lock(&i8259_irq_lock); |
61 | __i8259a_disable_irq(irq); | 63 | __i8259a_disable_irq(irq); |
62 | 64 | ||
@@ -71,9 +73,9 @@ i8259a_mask_and_ack_irq(unsigned int irq) | |||
71 | 73 | ||
72 | struct irq_chip i8259a_irq_type = { | 74 | struct irq_chip i8259a_irq_type = { |
73 | .name = "XT-PIC", | 75 | .name = "XT-PIC", |
74 | .unmask = i8259a_enable_irq, | 76 | .irq_unmask = i8259a_enable_irq, |
75 | .mask = i8259a_disable_irq, | 77 | .irq_mask = i8259a_disable_irq, |
76 | .mask_ack = i8259a_mask_and_ack_irq, | 78 | .irq_mask_ack = i8259a_mask_and_ack_irq, |
77 | }; | 79 | }; |
78 | 80 | ||
79 | void __init | 81 | void __init |
diff --git a/arch/alpha/kernel/irq_impl.h b/arch/alpha/kernel/irq_impl.h index b63ccd7386f1..d507a234b05d 100644 --- a/arch/alpha/kernel/irq_impl.h +++ b/arch/alpha/kernel/irq_impl.h | |||
@@ -31,11 +31,9 @@ extern void init_rtc_irq(void); | |||
31 | 31 | ||
32 | extern void common_init_isa_dma(void); | 32 | extern void common_init_isa_dma(void); |
33 | 33 | ||
34 | extern void i8259a_enable_irq(unsigned int); | 34 | extern void i8259a_enable_irq(struct irq_data *d); |
35 | extern void i8259a_disable_irq(unsigned int); | 35 | extern void i8259a_disable_irq(struct irq_data *d); |
36 | extern void i8259a_mask_and_ack_irq(unsigned int); | 36 | extern void i8259a_mask_and_ack_irq(struct irq_data *d); |
37 | extern unsigned int i8259a_startup_irq(unsigned int); | ||
38 | extern void i8259a_end_irq(unsigned int); | ||
39 | extern struct irq_chip i8259a_irq_type; | 37 | extern struct irq_chip i8259a_irq_type; |
40 | extern void init_i8259a_irqs(void); | 38 | extern void init_i8259a_irqs(void); |
41 | 39 | ||
diff --git a/arch/alpha/kernel/irq_pyxis.c b/arch/alpha/kernel/irq_pyxis.c index 2863458c853e..b30227fa7f5f 100644 --- a/arch/alpha/kernel/irq_pyxis.c +++ b/arch/alpha/kernel/irq_pyxis.c | |||
@@ -29,21 +29,21 @@ pyxis_update_irq_hw(unsigned long mask) | |||
29 | } | 29 | } |
30 | 30 | ||
31 | static inline void | 31 | static inline void |
32 | pyxis_enable_irq(unsigned int irq) | 32 | pyxis_enable_irq(struct irq_data *d) |
33 | { | 33 | { |
34 | pyxis_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); | 34 | pyxis_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16)); |
35 | } | 35 | } |
36 | 36 | ||
37 | static void | 37 | static void |
38 | pyxis_disable_irq(unsigned int irq) | 38 | pyxis_disable_irq(struct irq_data *d) |
39 | { | 39 | { |
40 | pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); | 40 | pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16))); |
41 | } | 41 | } |
42 | 42 | ||
43 | static void | 43 | static void |
44 | pyxis_mask_and_ack_irq(unsigned int irq) | 44 | pyxis_mask_and_ack_irq(struct irq_data *d) |
45 | { | 45 | { |
46 | unsigned long bit = 1UL << (irq - 16); | 46 | unsigned long bit = 1UL << (d->irq - 16); |
47 | unsigned long mask = cached_irq_mask &= ~bit; | 47 | unsigned long mask = cached_irq_mask &= ~bit; |
48 | 48 | ||
49 | /* Disable the interrupt. */ | 49 | /* Disable the interrupt. */ |
@@ -58,9 +58,9 @@ pyxis_mask_and_ack_irq(unsigned int irq) | |||
58 | 58 | ||
59 | static struct irq_chip pyxis_irq_type = { | 59 | static struct irq_chip pyxis_irq_type = { |
60 | .name = "PYXIS", | 60 | .name = "PYXIS", |
61 | .mask_ack = pyxis_mask_and_ack_irq, | 61 | .irq_mask_ack = pyxis_mask_and_ack_irq, |
62 | .mask = pyxis_disable_irq, | 62 | .irq_mask = pyxis_disable_irq, |
63 | .unmask = pyxis_enable_irq, | 63 | .irq_unmask = pyxis_enable_irq, |
64 | }; | 64 | }; |
65 | 65 | ||
66 | void | 66 | void |
@@ -103,7 +103,7 @@ init_pyxis_irqs(unsigned long ignore_mask) | |||
103 | if ((ignore_mask >> i) & 1) | 103 | if ((ignore_mask >> i) & 1) |
104 | continue; | 104 | continue; |
105 | set_irq_chip_and_handler(i, &pyxis_irq_type, handle_level_irq); | 105 | set_irq_chip_and_handler(i, &pyxis_irq_type, handle_level_irq); |
106 | irq_to_desc(i)->status |= IRQ_LEVEL; | 106 | irq_set_status_flags(i, IRQ_LEVEL); |
107 | } | 107 | } |
108 | 108 | ||
109 | setup_irq(16+7, &isa_cascade_irqaction); | 109 | setup_irq(16+7, &isa_cascade_irqaction); |
diff --git a/arch/alpha/kernel/irq_srm.c b/arch/alpha/kernel/irq_srm.c index 0e57e828b413..82a47bba41c4 100644 --- a/arch/alpha/kernel/irq_srm.c +++ b/arch/alpha/kernel/irq_srm.c | |||
@@ -18,27 +18,27 @@ | |||
18 | DEFINE_SPINLOCK(srm_irq_lock); | 18 | DEFINE_SPINLOCK(srm_irq_lock); |
19 | 19 | ||
20 | static inline void | 20 | static inline void |
21 | srm_enable_irq(unsigned int irq) | 21 | srm_enable_irq(struct irq_data *d) |
22 | { | 22 | { |
23 | spin_lock(&srm_irq_lock); | 23 | spin_lock(&srm_irq_lock); |
24 | cserve_ena(irq - 16); | 24 | cserve_ena(d->irq - 16); |
25 | spin_unlock(&srm_irq_lock); | 25 | spin_unlock(&srm_irq_lock); |
26 | } | 26 | } |
27 | 27 | ||
28 | static void | 28 | static void |
29 | srm_disable_irq(unsigned int irq) | 29 | srm_disable_irq(struct irq_data *d) |
30 | { | 30 | { |
31 | spin_lock(&srm_irq_lock); | 31 | spin_lock(&srm_irq_lock); |
32 | cserve_dis(irq - 16); | 32 | cserve_dis(d->irq - 16); |
33 | spin_unlock(&srm_irq_lock); | 33 | spin_unlock(&srm_irq_lock); |
34 | } | 34 | } |
35 | 35 | ||
36 | /* Handle interrupts from the SRM, assuming no additional weirdness. */ | 36 | /* Handle interrupts from the SRM, assuming no additional weirdness. */ |
37 | static struct irq_chip srm_irq_type = { | 37 | static struct irq_chip srm_irq_type = { |
38 | .name = "SRM", | 38 | .name = "SRM", |
39 | .unmask = srm_enable_irq, | 39 | .irq_unmask = srm_enable_irq, |
40 | .mask = srm_disable_irq, | 40 | .irq_mask = srm_disable_irq, |
41 | .mask_ack = srm_disable_irq, | 41 | .irq_mask_ack = srm_disable_irq, |
42 | }; | 42 | }; |
43 | 43 | ||
44 | void __init | 44 | void __init |
@@ -52,7 +52,7 @@ init_srm_irqs(long max, unsigned long ignore_mask) | |||
52 | if (i < 64 && ((ignore_mask >> i) & 1)) | 52 | if (i < 64 && ((ignore_mask >> i) & 1)) |
53 | continue; | 53 | continue; |
54 | set_irq_chip_and_handler(i, &srm_irq_type, handle_level_irq); | 54 | set_irq_chip_and_handler(i, &srm_irq_type, handle_level_irq); |
55 | irq_to_desc(i)->status |= IRQ_LEVEL; | 55 | irq_set_status_flags(i, IRQ_LEVEL); |
56 | } | 56 | } |
57 | } | 57 | } |
58 | 58 | ||
diff --git a/arch/alpha/kernel/sys_alcor.c b/arch/alpha/kernel/sys_alcor.c index 7bef61768236..88d95e872f55 100644 --- a/arch/alpha/kernel/sys_alcor.c +++ b/arch/alpha/kernel/sys_alcor.c | |||
@@ -44,31 +44,31 @@ alcor_update_irq_hw(unsigned long mask) | |||
44 | } | 44 | } |
45 | 45 | ||
46 | static inline void | 46 | static inline void |
47 | alcor_enable_irq(unsigned int irq) | 47 | alcor_enable_irq(struct irq_data *d) |
48 | { | 48 | { |
49 | alcor_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); | 49 | alcor_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16)); |
50 | } | 50 | } |
51 | 51 | ||
52 | static void | 52 | static void |
53 | alcor_disable_irq(unsigned int irq) | 53 | alcor_disable_irq(struct irq_data *d) |
54 | { | 54 | { |
55 | alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); | 55 | alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16))); |
56 | } | 56 | } |
57 | 57 | ||
58 | static void | 58 | static void |
59 | alcor_mask_and_ack_irq(unsigned int irq) | 59 | alcor_mask_and_ack_irq(struct irq_data *d) |
60 | { | 60 | { |
61 | alcor_disable_irq(irq); | 61 | alcor_disable_irq(d); |
62 | 62 | ||
63 | /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ | 63 | /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ |
64 | *(vuip)GRU_INT_CLEAR = 1 << (irq - 16); mb(); | 64 | *(vuip)GRU_INT_CLEAR = 1 << (d->irq - 16); mb(); |
65 | *(vuip)GRU_INT_CLEAR = 0; mb(); | 65 | *(vuip)GRU_INT_CLEAR = 0; mb(); |
66 | } | 66 | } |
67 | 67 | ||
68 | static void | 68 | static void |
69 | alcor_isa_mask_and_ack_irq(unsigned int irq) | 69 | alcor_isa_mask_and_ack_irq(struct irq_data *d) |
70 | { | 70 | { |
71 | i8259a_mask_and_ack_irq(irq); | 71 | i8259a_mask_and_ack_irq(d); |
72 | 72 | ||
73 | /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ | 73 | /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ |
74 | *(vuip)GRU_INT_CLEAR = 0x80000000; mb(); | 74 | *(vuip)GRU_INT_CLEAR = 0x80000000; mb(); |
@@ -77,9 +77,9 @@ alcor_isa_mask_and_ack_irq(unsigned int irq) | |||
77 | 77 | ||
78 | static struct irq_chip alcor_irq_type = { | 78 | static struct irq_chip alcor_irq_type = { |
79 | .name = "ALCOR", | 79 | .name = "ALCOR", |
80 | .unmask = alcor_enable_irq, | 80 | .irq_unmask = alcor_enable_irq, |
81 | .mask = alcor_disable_irq, | 81 | .irq_mask = alcor_disable_irq, |
82 | .mask_ack = alcor_mask_and_ack_irq, | 82 | .irq_mask_ack = alcor_mask_and_ack_irq, |
83 | }; | 83 | }; |
84 | 84 | ||
85 | static void | 85 | static void |
@@ -126,9 +126,9 @@ alcor_init_irq(void) | |||
126 | if (i >= 16+20 && i <= 16+30) | 126 | if (i >= 16+20 && i <= 16+30) |
127 | continue; | 127 | continue; |
128 | set_irq_chip_and_handler(i, &alcor_irq_type, handle_level_irq); | 128 | set_irq_chip_and_handler(i, &alcor_irq_type, handle_level_irq); |
129 | irq_to_desc(i)->status |= IRQ_LEVEL; | 129 | irq_set_status_flags(i, IRQ_LEVEL); |
130 | } | 130 | } |
131 | i8259a_irq_type.ack = alcor_isa_mask_and_ack_irq; | 131 | i8259a_irq_type.irq_ack = alcor_isa_mask_and_ack_irq; |
132 | 132 | ||
133 | init_i8259a_irqs(); | 133 | init_i8259a_irqs(); |
134 | common_init_isa_dma(); | 134 | common_init_isa_dma(); |
diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c index b0c916493aea..57eb6307bc27 100644 --- a/arch/alpha/kernel/sys_cabriolet.c +++ b/arch/alpha/kernel/sys_cabriolet.c | |||
@@ -46,22 +46,22 @@ cabriolet_update_irq_hw(unsigned int irq, unsigned long mask) | |||
46 | } | 46 | } |
47 | 47 | ||
48 | static inline void | 48 | static inline void |
49 | cabriolet_enable_irq(unsigned int irq) | 49 | cabriolet_enable_irq(struct irq_data *d) |
50 | { | 50 | { |
51 | cabriolet_update_irq_hw(irq, cached_irq_mask &= ~(1UL << irq)); | 51 | cabriolet_update_irq_hw(d->irq, cached_irq_mask &= ~(1UL << d->irq)); |
52 | } | 52 | } |
53 | 53 | ||
54 | static void | 54 | static void |
55 | cabriolet_disable_irq(unsigned int irq) | 55 | cabriolet_disable_irq(struct irq_data *d) |
56 | { | 56 | { |
57 | cabriolet_update_irq_hw(irq, cached_irq_mask |= 1UL << irq); | 57 | cabriolet_update_irq_hw(d->irq, cached_irq_mask |= 1UL << d->irq); |
58 | } | 58 | } |
59 | 59 | ||
60 | static struct irq_chip cabriolet_irq_type = { | 60 | static struct irq_chip cabriolet_irq_type = { |
61 | .name = "CABRIOLET", | 61 | .name = "CABRIOLET", |
62 | .unmask = cabriolet_enable_irq, | 62 | .irq_unmask = cabriolet_enable_irq, |
63 | .mask = cabriolet_disable_irq, | 63 | .irq_mask = cabriolet_disable_irq, |
64 | .mask_ack = cabriolet_disable_irq, | 64 | .irq_mask_ack = cabriolet_disable_irq, |
65 | }; | 65 | }; |
66 | 66 | ||
67 | static void | 67 | static void |
@@ -107,7 +107,7 @@ common_init_irq(void (*srm_dev_int)(unsigned long v)) | |||
107 | for (i = 16; i < 35; ++i) { | 107 | for (i = 16; i < 35; ++i) { |
108 | set_irq_chip_and_handler(i, &cabriolet_irq_type, | 108 | set_irq_chip_and_handler(i, &cabriolet_irq_type, |
109 | handle_level_irq); | 109 | handle_level_irq); |
110 | irq_to_desc(i)->status |= IRQ_LEVEL; | 110 | irq_set_status_flags(i, IRQ_LEVEL); |
111 | } | 111 | } |
112 | } | 112 | } |
113 | 113 | ||
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c index edad5f759ccd..481df4ecb651 100644 --- a/arch/alpha/kernel/sys_dp264.c +++ b/arch/alpha/kernel/sys_dp264.c | |||
@@ -98,37 +98,37 @@ tsunami_update_irq_hw(unsigned long mask) | |||
98 | } | 98 | } |
99 | 99 | ||
100 | static void | 100 | static void |
101 | dp264_enable_irq(unsigned int irq) | 101 | dp264_enable_irq(struct irq_data *d) |
102 | { | 102 | { |
103 | spin_lock(&dp264_irq_lock); | 103 | spin_lock(&dp264_irq_lock); |
104 | cached_irq_mask |= 1UL << irq; | 104 | cached_irq_mask |= 1UL << d->irq; |
105 | tsunami_update_irq_hw(cached_irq_mask); | 105 | tsunami_update_irq_hw(cached_irq_mask); |
106 | spin_unlock(&dp264_irq_lock); | 106 | spin_unlock(&dp264_irq_lock); |
107 | } | 107 | } |
108 | 108 | ||
109 | static void | 109 | static void |
110 | dp264_disable_irq(unsigned int irq) | 110 | dp264_disable_irq(struct irq_data *d) |
111 | { | 111 | { |
112 | spin_lock(&dp264_irq_lock); | 112 | spin_lock(&dp264_irq_lock); |
113 | cached_irq_mask &= ~(1UL << irq); | 113 | cached_irq_mask &= ~(1UL << d->irq); |
114 | tsunami_update_irq_hw(cached_irq_mask); | 114 | tsunami_update_irq_hw(cached_irq_mask); |
115 | spin_unlock(&dp264_irq_lock); | 115 | spin_unlock(&dp264_irq_lock); |
116 | } | 116 | } |
117 | 117 | ||
118 | static void | 118 | static void |
119 | clipper_enable_irq(unsigned int irq) | 119 | clipper_enable_irq(struct irq_data *d) |
120 | { | 120 | { |
121 | spin_lock(&dp264_irq_lock); | 121 | spin_lock(&dp264_irq_lock); |
122 | cached_irq_mask |= 1UL << (irq - 16); | 122 | cached_irq_mask |= 1UL << (d->irq - 16); |
123 | tsunami_update_irq_hw(cached_irq_mask); | 123 | tsunami_update_irq_hw(cached_irq_mask); |
124 | spin_unlock(&dp264_irq_lock); | 124 | spin_unlock(&dp264_irq_lock); |
125 | } | 125 | } |
126 | 126 | ||
127 | static void | 127 | static void |
128 | clipper_disable_irq(unsigned int irq) | 128 | clipper_disable_irq(struct irq_data *d) |
129 | { | 129 | { |
130 | spin_lock(&dp264_irq_lock); | 130 | spin_lock(&dp264_irq_lock); |
131 | cached_irq_mask &= ~(1UL << (irq - 16)); | 131 | cached_irq_mask &= ~(1UL << (d->irq - 16)); |
132 | tsunami_update_irq_hw(cached_irq_mask); | 132 | tsunami_update_irq_hw(cached_irq_mask); |
133 | spin_unlock(&dp264_irq_lock); | 133 | spin_unlock(&dp264_irq_lock); |
134 | } | 134 | } |
@@ -149,10 +149,11 @@ cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) | |||
149 | } | 149 | } |
150 | 150 | ||
151 | static int | 151 | static int |
152 | dp264_set_affinity(unsigned int irq, const struct cpumask *affinity) | 152 | dp264_set_affinity(struct irq_data *d, const struct cpumask *affinity, |
153 | { | 153 | bool force) |
154 | { | ||
154 | spin_lock(&dp264_irq_lock); | 155 | spin_lock(&dp264_irq_lock); |
155 | cpu_set_irq_affinity(irq, *affinity); | 156 | cpu_set_irq_affinity(d->irq, *affinity); |
156 | tsunami_update_irq_hw(cached_irq_mask); | 157 | tsunami_update_irq_hw(cached_irq_mask); |
157 | spin_unlock(&dp264_irq_lock); | 158 | spin_unlock(&dp264_irq_lock); |
158 | 159 | ||
@@ -160,10 +161,11 @@ dp264_set_affinity(unsigned int irq, const struct cpumask *affinity) | |||
160 | } | 161 | } |
161 | 162 | ||
162 | static int | 163 | static int |
163 | clipper_set_affinity(unsigned int irq, const struct cpumask *affinity) | 164 | clipper_set_affinity(struct irq_data *d, const struct cpumask *affinity, |
164 | { | 165 | bool force) |
166 | { | ||
165 | spin_lock(&dp264_irq_lock); | 167 | spin_lock(&dp264_irq_lock); |
166 | cpu_set_irq_affinity(irq - 16, *affinity); | 168 | cpu_set_irq_affinity(d->irq - 16, *affinity); |
167 | tsunami_update_irq_hw(cached_irq_mask); | 169 | tsunami_update_irq_hw(cached_irq_mask); |
168 | spin_unlock(&dp264_irq_lock); | 170 | spin_unlock(&dp264_irq_lock); |
169 | 171 | ||
@@ -171,19 +173,19 @@ clipper_set_affinity(unsigned int irq, const struct cpumask *affinity) | |||
171 | } | 173 | } |
172 | 174 | ||
173 | static struct irq_chip dp264_irq_type = { | 175 | static struct irq_chip dp264_irq_type = { |
174 | .name = "DP264", | 176 | .name = "DP264", |
175 | .unmask = dp264_enable_irq, | 177 | .irq_unmask = dp264_enable_irq, |
176 | .mask = dp264_disable_irq, | 178 | .irq_mask = dp264_disable_irq, |
177 | .mask_ack = dp264_disable_irq, | 179 | .irq_mask_ack = dp264_disable_irq, |
178 | .set_affinity = dp264_set_affinity, | 180 | .irq_set_affinity = dp264_set_affinity, |
179 | }; | 181 | }; |
180 | 182 | ||
181 | static struct irq_chip clipper_irq_type = { | 183 | static struct irq_chip clipper_irq_type = { |
182 | .name = "CLIPPER", | 184 | .name = "CLIPPER", |
183 | .unmask = clipper_enable_irq, | 185 | .irq_unmask = clipper_enable_irq, |
184 | .mask = clipper_disable_irq, | 186 | .irq_mask = clipper_disable_irq, |
185 | .mask_ack = clipper_disable_irq, | 187 | .irq_mask_ack = clipper_disable_irq, |
186 | .set_affinity = clipper_set_affinity, | 188 | .irq_set_affinity = clipper_set_affinity, |
187 | }; | 189 | }; |
188 | 190 | ||
189 | static void | 191 | static void |
@@ -268,8 +270,8 @@ init_tsunami_irqs(struct irq_chip * ops, int imin, int imax) | |||
268 | { | 270 | { |
269 | long i; | 271 | long i; |
270 | for (i = imin; i <= imax; ++i) { | 272 | for (i = imin; i <= imax; ++i) { |
271 | irq_to_desc(i)->status |= IRQ_LEVEL; | ||
272 | set_irq_chip_and_handler(i, ops, handle_level_irq); | 273 | set_irq_chip_and_handler(i, ops, handle_level_irq); |
274 | irq_set_status_flags(i, IRQ_LEVEL); | ||
273 | } | 275 | } |
274 | } | 276 | } |
275 | 277 | ||
diff --git a/arch/alpha/kernel/sys_eb64p.c b/arch/alpha/kernel/sys_eb64p.c index ae5f29d127b0..402e908ffb3e 100644 --- a/arch/alpha/kernel/sys_eb64p.c +++ b/arch/alpha/kernel/sys_eb64p.c | |||
@@ -44,22 +44,22 @@ eb64p_update_irq_hw(unsigned int irq, unsigned long mask) | |||
44 | } | 44 | } |
45 | 45 | ||
46 | static inline void | 46 | static inline void |
47 | eb64p_enable_irq(unsigned int irq) | 47 | eb64p_enable_irq(struct irq_data *d) |
48 | { | 48 | { |
49 | eb64p_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq)); | 49 | eb64p_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq)); |
50 | } | 50 | } |
51 | 51 | ||
52 | static void | 52 | static void |
53 | eb64p_disable_irq(unsigned int irq) | 53 | eb64p_disable_irq(struct irq_data *d) |
54 | { | 54 | { |
55 | eb64p_update_irq_hw(irq, cached_irq_mask |= 1 << irq); | 55 | eb64p_update_irq_hw(d->irq, cached_irq_mask |= 1 << d->irq); |
56 | } | 56 | } |
57 | 57 | ||
58 | static struct irq_chip eb64p_irq_type = { | 58 | static struct irq_chip eb64p_irq_type = { |
59 | .name = "EB64P", | 59 | .name = "EB64P", |
60 | .unmask = eb64p_enable_irq, | 60 | .irq_unmask = eb64p_enable_irq, |
61 | .mask = eb64p_disable_irq, | 61 | .irq_mask = eb64p_disable_irq, |
62 | .mask_ack = eb64p_disable_irq, | 62 | .irq_mask_ack = eb64p_disable_irq, |
63 | }; | 63 | }; |
64 | 64 | ||
65 | static void | 65 | static void |
@@ -118,9 +118,9 @@ eb64p_init_irq(void) | |||
118 | init_i8259a_irqs(); | 118 | init_i8259a_irqs(); |
119 | 119 | ||
120 | for (i = 16; i < 32; ++i) { | 120 | for (i = 16; i < 32; ++i) { |
121 | irq_to_desc(i)->status |= IRQ_LEVEL; | ||
122 | set_irq_chip_and_handler(i, &eb64p_irq_type, handle_level_irq); | 121 | set_irq_chip_and_handler(i, &eb64p_irq_type, handle_level_irq); |
123 | } | 122 | irq_set_status_flags(i, IRQ_LEVEL); |
123 | } | ||
124 | 124 | ||
125 | common_init_isa_dma(); | 125 | common_init_isa_dma(); |
126 | setup_irq(16+5, &isa_cascade_irqaction); | 126 | setup_irq(16+5, &isa_cascade_irqaction); |
diff --git a/arch/alpha/kernel/sys_eiger.c b/arch/alpha/kernel/sys_eiger.c index 1121bc5c6c6c..0b44a54c1522 100644 --- a/arch/alpha/kernel/sys_eiger.c +++ b/arch/alpha/kernel/sys_eiger.c | |||
@@ -51,16 +51,18 @@ eiger_update_irq_hw(unsigned long irq, unsigned long mask) | |||
51 | } | 51 | } |
52 | 52 | ||
53 | static inline void | 53 | static inline void |
54 | eiger_enable_irq(unsigned int irq) | 54 | eiger_enable_irq(struct irq_data *d) |
55 | { | 55 | { |
56 | unsigned int irq = d->irq; | ||
56 | unsigned long mask; | 57 | unsigned long mask; |
57 | mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); | 58 | mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); |
58 | eiger_update_irq_hw(irq, mask); | 59 | eiger_update_irq_hw(irq, mask); |
59 | } | 60 | } |
60 | 61 | ||
61 | static void | 62 | static void |
62 | eiger_disable_irq(unsigned int irq) | 63 | eiger_disable_irq(struct irq_data *d) |
63 | { | 64 | { |
65 | unsigned int irq = d->irq; | ||
64 | unsigned long mask; | 66 | unsigned long mask; |
65 | mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); | 67 | mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); |
66 | eiger_update_irq_hw(irq, mask); | 68 | eiger_update_irq_hw(irq, mask); |
@@ -68,9 +70,9 @@ eiger_disable_irq(unsigned int irq) | |||
68 | 70 | ||
69 | static struct irq_chip eiger_irq_type = { | 71 | static struct irq_chip eiger_irq_type = { |
70 | .name = "EIGER", | 72 | .name = "EIGER", |
71 | .unmask = eiger_enable_irq, | 73 | .irq_unmask = eiger_enable_irq, |
72 | .mask = eiger_disable_irq, | 74 | .irq_mask = eiger_disable_irq, |
73 | .mask_ack = eiger_disable_irq, | 75 | .irq_mask_ack = eiger_disable_irq, |
74 | }; | 76 | }; |
75 | 77 | ||
76 | static void | 78 | static void |
@@ -136,8 +138,8 @@ eiger_init_irq(void) | |||
136 | init_i8259a_irqs(); | 138 | init_i8259a_irqs(); |
137 | 139 | ||
138 | for (i = 16; i < 128; ++i) { | 140 | for (i = 16; i < 128; ++i) { |
139 | irq_to_desc(i)->status |= IRQ_LEVEL; | ||
140 | set_irq_chip_and_handler(i, &eiger_irq_type, handle_level_irq); | 141 | set_irq_chip_and_handler(i, &eiger_irq_type, handle_level_irq); |
142 | irq_set_status_flags(i, IRQ_LEVEL); | ||
141 | } | 143 | } |
142 | } | 144 | } |
143 | 145 | ||
diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c index 34f55e03d331..00341b75c8b2 100644 --- a/arch/alpha/kernel/sys_jensen.c +++ b/arch/alpha/kernel/sys_jensen.c | |||
@@ -63,34 +63,34 @@ | |||
63 | */ | 63 | */ |
64 | 64 | ||
65 | static void | 65 | static void |
66 | jensen_local_enable(unsigned int irq) | 66 | jensen_local_enable(struct irq_data *d) |
67 | { | 67 | { |
68 | /* the parport is really hw IRQ 1, silly Jensen. */ | 68 | /* the parport is really hw IRQ 1, silly Jensen. */ |
69 | if (irq == 7) | 69 | if (d->irq == 7) |
70 | i8259a_enable_irq(1); | 70 | i8259a_enable_irq(d); |
71 | } | 71 | } |
72 | 72 | ||
73 | static void | 73 | static void |
74 | jensen_local_disable(unsigned int irq) | 74 | jensen_local_disable(struct irq_data *d) |
75 | { | 75 | { |
76 | /* the parport is really hw IRQ 1, silly Jensen. */ | 76 | /* the parport is really hw IRQ 1, silly Jensen. */ |
77 | if (irq == 7) | 77 | if (d->irq == 7) |
78 | i8259a_disable_irq(1); | 78 | i8259a_disable_irq(d); |
79 | } | 79 | } |
80 | 80 | ||
81 | static void | 81 | static void |
82 | jensen_local_mask_ack(unsigned int irq) | 82 | jensen_local_mask_ack(struct irq_data *d) |
83 | { | 83 | { |
84 | /* the parport is really hw IRQ 1, silly Jensen. */ | 84 | /* the parport is really hw IRQ 1, silly Jensen. */ |
85 | if (irq == 7) | 85 | if (d->irq == 7) |
86 | i8259a_mask_and_ack_irq(1); | 86 | i8259a_mask_and_ack_irq(d); |
87 | } | 87 | } |
88 | 88 | ||
89 | static struct irq_chip jensen_local_irq_type = { | 89 | static struct irq_chip jensen_local_irq_type = { |
90 | .name = "LOCAL", | 90 | .name = "LOCAL", |
91 | .unmask = jensen_local_enable, | 91 | .irq_unmask = jensen_local_enable, |
92 | .mask = jensen_local_disable, | 92 | .irq_mask = jensen_local_disable, |
93 | .mask_ack = jensen_local_mask_ack, | 93 | .irq_mask_ack = jensen_local_mask_ack, |
94 | }; | 94 | }; |
95 | 95 | ||
96 | static void | 96 | static void |
diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c index 2bfc9f1b1ddc..e61910734e41 100644 --- a/arch/alpha/kernel/sys_marvel.c +++ b/arch/alpha/kernel/sys_marvel.c | |||
@@ -104,9 +104,10 @@ io7_get_irq_ctl(unsigned int irq, struct io7 **pio7) | |||
104 | } | 104 | } |
105 | 105 | ||
106 | static void | 106 | static void |
107 | io7_enable_irq(unsigned int irq) | 107 | io7_enable_irq(struct irq_data *d) |
108 | { | 108 | { |
109 | volatile unsigned long *ctl; | 109 | volatile unsigned long *ctl; |
110 | unsigned int irq = d->irq; | ||
110 | struct io7 *io7; | 111 | struct io7 *io7; |
111 | 112 | ||
112 | ctl = io7_get_irq_ctl(irq, &io7); | 113 | ctl = io7_get_irq_ctl(irq, &io7); |
@@ -115,7 +116,7 @@ io7_enable_irq(unsigned int irq) | |||
115 | __func__, irq); | 116 | __func__, irq); |
116 | return; | 117 | return; |
117 | } | 118 | } |
118 | 119 | ||
119 | spin_lock(&io7->irq_lock); | 120 | spin_lock(&io7->irq_lock); |
120 | *ctl |= 1UL << 24; | 121 | *ctl |= 1UL << 24; |
121 | mb(); | 122 | mb(); |
@@ -124,9 +125,10 @@ io7_enable_irq(unsigned int irq) | |||
124 | } | 125 | } |
125 | 126 | ||
126 | static void | 127 | static void |
127 | io7_disable_irq(unsigned int irq) | 128 | io7_disable_irq(struct irq_data *d) |
128 | { | 129 | { |
129 | volatile unsigned long *ctl; | 130 | volatile unsigned long *ctl; |
131 | unsigned int irq = d->irq; | ||
130 | struct io7 *io7; | 132 | struct io7 *io7; |
131 | 133 | ||
132 | ctl = io7_get_irq_ctl(irq, &io7); | 134 | ctl = io7_get_irq_ctl(irq, &io7); |
@@ -135,7 +137,7 @@ io7_disable_irq(unsigned int irq) | |||
135 | __func__, irq); | 137 | __func__, irq); |
136 | return; | 138 | return; |
137 | } | 139 | } |
138 | 140 | ||
139 | spin_lock(&io7->irq_lock); | 141 | spin_lock(&io7->irq_lock); |
140 | *ctl &= ~(1UL << 24); | 142 | *ctl &= ~(1UL << 24); |
141 | mb(); | 143 | mb(); |
@@ -144,35 +146,29 @@ io7_disable_irq(unsigned int irq) | |||
144 | } | 146 | } |
145 | 147 | ||
146 | static void | 148 | static void |
147 | marvel_irq_noop(unsigned int irq) | 149 | marvel_irq_noop(struct irq_data *d) |
148 | { | 150 | { |
149 | return; | 151 | return; |
150 | } | ||
151 | |||
152 | static unsigned int | ||
153 | marvel_irq_noop_return(unsigned int irq) | ||
154 | { | ||
155 | return 0; | ||
156 | } | 152 | } |
157 | 153 | ||
158 | static struct irq_chip marvel_legacy_irq_type = { | 154 | static struct irq_chip marvel_legacy_irq_type = { |
159 | .name = "LEGACY", | 155 | .name = "LEGACY", |
160 | .mask = marvel_irq_noop, | 156 | .irq_mask = marvel_irq_noop, |
161 | .unmask = marvel_irq_noop, | 157 | .irq_unmask = marvel_irq_noop, |
162 | }; | 158 | }; |
163 | 159 | ||
164 | static struct irq_chip io7_lsi_irq_type = { | 160 | static struct irq_chip io7_lsi_irq_type = { |
165 | .name = "LSI", | 161 | .name = "LSI", |
166 | .unmask = io7_enable_irq, | 162 | .irq_unmask = io7_enable_irq, |
167 | .mask = io7_disable_irq, | 163 | .irq_mask = io7_disable_irq, |
168 | .mask_ack = io7_disable_irq, | 164 | .irq_mask_ack = io7_disable_irq, |
169 | }; | 165 | }; |
170 | 166 | ||
171 | static struct irq_chip io7_msi_irq_type = { | 167 | static struct irq_chip io7_msi_irq_type = { |
172 | .name = "MSI", | 168 | .name = "MSI", |
173 | .unmask = io7_enable_irq, | 169 | .irq_unmask = io7_enable_irq, |
174 | .mask = io7_disable_irq, | 170 | .irq_mask = io7_disable_irq, |
175 | .ack = marvel_irq_noop, | 171 | .irq_ack = marvel_irq_noop, |
176 | }; | 172 | }; |
177 | 173 | ||
178 | static void | 174 | static void |
@@ -280,8 +276,8 @@ init_io7_irqs(struct io7 *io7, | |||
280 | 276 | ||
281 | /* Set up the lsi irqs. */ | 277 | /* Set up the lsi irqs. */ |
282 | for (i = 0; i < 128; ++i) { | 278 | for (i = 0; i < 128; ++i) { |
283 | irq_to_desc(base + i)->status |= IRQ_LEVEL; | ||
284 | set_irq_chip_and_handler(base + i, lsi_ops, handle_level_irq); | 279 | set_irq_chip_and_handler(base + i, lsi_ops, handle_level_irq); |
280 | irq_set_status_flags(i, IRQ_LEVEL); | ||
285 | } | 281 | } |
286 | 282 | ||
287 | /* Disable the implemented irqs in hardware. */ | 283 | /* Disable the implemented irqs in hardware. */ |
@@ -294,8 +290,8 @@ init_io7_irqs(struct io7 *io7, | |||
294 | 290 | ||
295 | /* Set up the msi irqs. */ | 291 | /* Set up the msi irqs. */ |
296 | for (i = 128; i < (128 + 512); ++i) { | 292 | for (i = 128; i < (128 + 512); ++i) { |
297 | irq_to_desc(base + i)->status |= IRQ_LEVEL; | ||
298 | set_irq_chip_and_handler(base + i, msi_ops, handle_level_irq); | 293 | set_irq_chip_and_handler(base + i, msi_ops, handle_level_irq); |
294 | irq_set_status_flags(i, IRQ_LEVEL); | ||
299 | } | 295 | } |
300 | 296 | ||
301 | for (i = 0; i < 16; ++i) | 297 | for (i = 0; i < 16; ++i) |
diff --git a/arch/alpha/kernel/sys_mikasa.c b/arch/alpha/kernel/sys_mikasa.c index bcc1639e8efb..cf7f43dd3147 100644 --- a/arch/alpha/kernel/sys_mikasa.c +++ b/arch/alpha/kernel/sys_mikasa.c | |||
@@ -43,22 +43,22 @@ mikasa_update_irq_hw(int mask) | |||
43 | } | 43 | } |
44 | 44 | ||
45 | static inline void | 45 | static inline void |
46 | mikasa_enable_irq(unsigned int irq) | 46 | mikasa_enable_irq(struct irq_data *d) |
47 | { | 47 | { |
48 | mikasa_update_irq_hw(cached_irq_mask |= 1 << (irq - 16)); | 48 | mikasa_update_irq_hw(cached_irq_mask |= 1 << (d->irq - 16)); |
49 | } | 49 | } |
50 | 50 | ||
51 | static void | 51 | static void |
52 | mikasa_disable_irq(unsigned int irq) | 52 | mikasa_disable_irq(struct irq_data *d) |
53 | { | 53 | { |
54 | mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (irq - 16))); | 54 | mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (d->irq - 16))); |
55 | } | 55 | } |
56 | 56 | ||
57 | static struct irq_chip mikasa_irq_type = { | 57 | static struct irq_chip mikasa_irq_type = { |
58 | .name = "MIKASA", | 58 | .name = "MIKASA", |
59 | .unmask = mikasa_enable_irq, | 59 | .irq_unmask = mikasa_enable_irq, |
60 | .mask = mikasa_disable_irq, | 60 | .irq_mask = mikasa_disable_irq, |
61 | .mask_ack = mikasa_disable_irq, | 61 | .irq_mask_ack = mikasa_disable_irq, |
62 | }; | 62 | }; |
63 | 63 | ||
64 | static void | 64 | static void |
@@ -98,8 +98,8 @@ mikasa_init_irq(void) | |||
98 | mikasa_update_irq_hw(0); | 98 | mikasa_update_irq_hw(0); |
99 | 99 | ||
100 | for (i = 16; i < 32; ++i) { | 100 | for (i = 16; i < 32; ++i) { |
101 | irq_to_desc(i)->status |= IRQ_LEVEL; | ||
102 | set_irq_chip_and_handler(i, &mikasa_irq_type, handle_level_irq); | 101 | set_irq_chip_and_handler(i, &mikasa_irq_type, handle_level_irq); |
102 | irq_set_status_flags(i, IRQ_LEVEL); | ||
103 | } | 103 | } |
104 | 104 | ||
105 | init_i8259a_irqs(); | 105 | init_i8259a_irqs(); |
diff --git a/arch/alpha/kernel/sys_noritake.c b/arch/alpha/kernel/sys_noritake.c index e88f4ae1260e..92bc188e94a9 100644 --- a/arch/alpha/kernel/sys_noritake.c +++ b/arch/alpha/kernel/sys_noritake.c | |||
@@ -48,22 +48,22 @@ noritake_update_irq_hw(int irq, int mask) | |||
48 | } | 48 | } |
49 | 49 | ||
50 | static void | 50 | static void |
51 | noritake_enable_irq(unsigned int irq) | 51 | noritake_enable_irq(struct irq_data *d) |
52 | { | 52 | { |
53 | noritake_update_irq_hw(irq, cached_irq_mask |= 1 << (irq - 16)); | 53 | noritake_update_irq_hw(d->irq, cached_irq_mask |= 1 << (d->irq - 16)); |
54 | } | 54 | } |
55 | 55 | ||
56 | static void | 56 | static void |
57 | noritake_disable_irq(unsigned int irq) | 57 | noritake_disable_irq(struct irq_data *d) |
58 | { | 58 | { |
59 | noritake_update_irq_hw(irq, cached_irq_mask &= ~(1 << (irq - 16))); | 59 | noritake_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << (d->irq - 16))); |
60 | } | 60 | } |
61 | 61 | ||
62 | static struct irq_chip noritake_irq_type = { | 62 | static struct irq_chip noritake_irq_type = { |
63 | .name = "NORITAKE", | 63 | .name = "NORITAKE", |
64 | .unmask = noritake_enable_irq, | 64 | .irq_unmask = noritake_enable_irq, |
65 | .mask = noritake_disable_irq, | 65 | .irq_mask = noritake_disable_irq, |
66 | .mask_ack = noritake_disable_irq, | 66 | .irq_mask_ack = noritake_disable_irq, |
67 | }; | 67 | }; |
68 | 68 | ||
69 | static void | 69 | static void |
@@ -127,8 +127,8 @@ noritake_init_irq(void) | |||
127 | outw(0, 0x54c); | 127 | outw(0, 0x54c); |
128 | 128 | ||
129 | for (i = 16; i < 48; ++i) { | 129 | for (i = 16; i < 48; ++i) { |
130 | irq_to_desc(i)->status |= IRQ_LEVEL; | ||
131 | set_irq_chip_and_handler(i, &noritake_irq_type, handle_level_irq); | 130 | set_irq_chip_and_handler(i, &noritake_irq_type, handle_level_irq); |
131 | irq_set_status_flags(i, IRQ_LEVEL); | ||
132 | } | 132 | } |
133 | 133 | ||
134 | init_i8259a_irqs(); | 134 | init_i8259a_irqs(); |
diff --git a/arch/alpha/kernel/sys_rawhide.c b/arch/alpha/kernel/sys_rawhide.c index 6a51364dd1cc..936d4140ed5f 100644 --- a/arch/alpha/kernel/sys_rawhide.c +++ b/arch/alpha/kernel/sys_rawhide.c | |||
@@ -56,9 +56,10 @@ rawhide_update_irq_hw(int hose, int mask) | |||
56 | (((h) < MCPCIA_MAX_HOSES) && (cached_irq_masks[(h)] != 0)) | 56 | (((h) < MCPCIA_MAX_HOSES) && (cached_irq_masks[(h)] != 0)) |
57 | 57 | ||
58 | static inline void | 58 | static inline void |
59 | rawhide_enable_irq(unsigned int irq) | 59 | rawhide_enable_irq(struct irq_data *d) |
60 | { | 60 | { |
61 | unsigned int mask, hose; | 61 | unsigned int mask, hose; |
62 | unsigned int irq = d->irq; | ||
62 | 63 | ||
63 | irq -= 16; | 64 | irq -= 16; |
64 | hose = irq / 24; | 65 | hose = irq / 24; |
@@ -76,9 +77,10 @@ rawhide_enable_irq(unsigned int irq) | |||
76 | } | 77 | } |
77 | 78 | ||
78 | static void | 79 | static void |
79 | rawhide_disable_irq(unsigned int irq) | 80 | rawhide_disable_irq(struct irq_data *d) |
80 | { | 81 | { |
81 | unsigned int mask, hose; | 82 | unsigned int mask, hose; |
83 | unsigned int irq = d->irq; | ||
82 | 84 | ||
83 | irq -= 16; | 85 | irq -= 16; |
84 | hose = irq / 24; | 86 | hose = irq / 24; |
@@ -96,9 +98,10 @@ rawhide_disable_irq(unsigned int irq) | |||
96 | } | 98 | } |
97 | 99 | ||
98 | static void | 100 | static void |
99 | rawhide_mask_and_ack_irq(unsigned int irq) | 101 | rawhide_mask_and_ack_irq(struct irq_data *d) |
100 | { | 102 | { |
101 | unsigned int mask, mask1, hose; | 103 | unsigned int mask, mask1, hose; |
104 | unsigned int irq = d->irq; | ||
102 | 105 | ||
103 | irq -= 16; | 106 | irq -= 16; |
104 | hose = irq / 24; | 107 | hose = irq / 24; |
@@ -123,9 +126,9 @@ rawhide_mask_and_ack_irq(unsigned int irq) | |||
123 | 126 | ||
124 | static struct irq_chip rawhide_irq_type = { | 127 | static struct irq_chip rawhide_irq_type = { |
125 | .name = "RAWHIDE", | 128 | .name = "RAWHIDE", |
126 | .unmask = rawhide_enable_irq, | 129 | .irq_unmask = rawhide_enable_irq, |
127 | .mask = rawhide_disable_irq, | 130 | .irq_mask = rawhide_disable_irq, |
128 | .mask_ack = rawhide_mask_and_ack_irq, | 131 | .irq_mask_ack = rawhide_mask_and_ack_irq, |
129 | }; | 132 | }; |
130 | 133 | ||
131 | static void | 134 | static void |
@@ -177,8 +180,8 @@ rawhide_init_irq(void) | |||
177 | } | 180 | } |
178 | 181 | ||
179 | for (i = 16; i < 128; ++i) { | 182 | for (i = 16; i < 128; ++i) { |
180 | irq_to_desc(i)->status |= IRQ_LEVEL; | ||
181 | set_irq_chip_and_handler(i, &rawhide_irq_type, handle_level_irq); | 183 | set_irq_chip_and_handler(i, &rawhide_irq_type, handle_level_irq); |
184 | irq_set_status_flags(i, IRQ_LEVEL); | ||
182 | } | 185 | } |
183 | 186 | ||
184 | init_i8259a_irqs(); | 187 | init_i8259a_irqs(); |
diff --git a/arch/alpha/kernel/sys_rx164.c b/arch/alpha/kernel/sys_rx164.c index 89e7e37ec84c..cea22a62913b 100644 --- a/arch/alpha/kernel/sys_rx164.c +++ b/arch/alpha/kernel/sys_rx164.c | |||
@@ -47,22 +47,22 @@ rx164_update_irq_hw(unsigned long mask) | |||
47 | } | 47 | } |
48 | 48 | ||
49 | static inline void | 49 | static inline void |
50 | rx164_enable_irq(unsigned int irq) | 50 | rx164_enable_irq(struct irq_data *d) |
51 | { | 51 | { |
52 | rx164_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); | 52 | rx164_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16)); |
53 | } | 53 | } |
54 | 54 | ||
55 | static void | 55 | static void |
56 | rx164_disable_irq(unsigned int irq) | 56 | rx164_disable_irq(struct irq_data *d) |
57 | { | 57 | { |
58 | rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); | 58 | rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16))); |
59 | } | 59 | } |
60 | 60 | ||
61 | static struct irq_chip rx164_irq_type = { | 61 | static struct irq_chip rx164_irq_type = { |
62 | .name = "RX164", | 62 | .name = "RX164", |
63 | .unmask = rx164_enable_irq, | 63 | .irq_unmask = rx164_enable_irq, |
64 | .mask = rx164_disable_irq, | 64 | .irq_mask = rx164_disable_irq, |
65 | .mask_ack = rx164_disable_irq, | 65 | .irq_mask_ack = rx164_disable_irq, |
66 | }; | 66 | }; |
67 | 67 | ||
68 | static void | 68 | static void |
@@ -99,8 +99,8 @@ rx164_init_irq(void) | |||
99 | 99 | ||
100 | rx164_update_irq_hw(0); | 100 | rx164_update_irq_hw(0); |
101 | for (i = 16; i < 40; ++i) { | 101 | for (i = 16; i < 40; ++i) { |
102 | irq_to_desc(i)->status |= IRQ_LEVEL; | ||
103 | set_irq_chip_and_handler(i, &rx164_irq_type, handle_level_irq); | 102 | set_irq_chip_and_handler(i, &rx164_irq_type, handle_level_irq); |
103 | irq_set_status_flags(i, IRQ_LEVEL); | ||
104 | } | 104 | } |
105 | 105 | ||
106 | init_i8259a_irqs(); | 106 | init_i8259a_irqs(); |
diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c index 5c4423d1b06c..a349538aabc9 100644 --- a/arch/alpha/kernel/sys_sable.c +++ b/arch/alpha/kernel/sys_sable.c | |||
@@ -443,11 +443,11 @@ lynx_swizzle(struct pci_dev *dev, u8 *pinp) | |||
443 | /* GENERIC irq routines */ | 443 | /* GENERIC irq routines */ |
444 | 444 | ||
445 | static inline void | 445 | static inline void |
446 | sable_lynx_enable_irq(unsigned int irq) | 446 | sable_lynx_enable_irq(struct irq_data *d) |
447 | { | 447 | { |
448 | unsigned long bit, mask; | 448 | unsigned long bit, mask; |
449 | 449 | ||
450 | bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; | 450 | bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq]; |
451 | spin_lock(&sable_lynx_irq_lock); | 451 | spin_lock(&sable_lynx_irq_lock); |
452 | mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit); | 452 | mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit); |
453 | sable_lynx_irq_swizzle->update_irq_hw(bit, mask); | 453 | sable_lynx_irq_swizzle->update_irq_hw(bit, mask); |
@@ -459,11 +459,11 @@ sable_lynx_enable_irq(unsigned int irq) | |||
459 | } | 459 | } |
460 | 460 | ||
461 | static void | 461 | static void |
462 | sable_lynx_disable_irq(unsigned int irq) | 462 | sable_lynx_disable_irq(struct irq_data *d) |
463 | { | 463 | { |
464 | unsigned long bit, mask; | 464 | unsigned long bit, mask; |
465 | 465 | ||
466 | bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; | 466 | bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq]; |
467 | spin_lock(&sable_lynx_irq_lock); | 467 | spin_lock(&sable_lynx_irq_lock); |
468 | mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; | 468 | mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; |
469 | sable_lynx_irq_swizzle->update_irq_hw(bit, mask); | 469 | sable_lynx_irq_swizzle->update_irq_hw(bit, mask); |
@@ -475,11 +475,11 @@ sable_lynx_disable_irq(unsigned int irq) | |||
475 | } | 475 | } |
476 | 476 | ||
477 | static void | 477 | static void |
478 | sable_lynx_mask_and_ack_irq(unsigned int irq) | 478 | sable_lynx_mask_and_ack_irq(struct irq_data *d) |
479 | { | 479 | { |
480 | unsigned long bit, mask; | 480 | unsigned long bit, mask; |
481 | 481 | ||
482 | bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; | 482 | bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq]; |
483 | spin_lock(&sable_lynx_irq_lock); | 483 | spin_lock(&sable_lynx_irq_lock); |
484 | mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; | 484 | mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; |
485 | sable_lynx_irq_swizzle->update_irq_hw(bit, mask); | 485 | sable_lynx_irq_swizzle->update_irq_hw(bit, mask); |
@@ -489,9 +489,9 @@ sable_lynx_mask_and_ack_irq(unsigned int irq) | |||
489 | 489 | ||
490 | static struct irq_chip sable_lynx_irq_type = { | 490 | static struct irq_chip sable_lynx_irq_type = { |
491 | .name = "SABLE/LYNX", | 491 | .name = "SABLE/LYNX", |
492 | .unmask = sable_lynx_enable_irq, | 492 | .irq_unmask = sable_lynx_enable_irq, |
493 | .mask = sable_lynx_disable_irq, | 493 | .irq_mask = sable_lynx_disable_irq, |
494 | .mask_ack = sable_lynx_mask_and_ack_irq, | 494 | .irq_mask_ack = sable_lynx_mask_and_ack_irq, |
495 | }; | 495 | }; |
496 | 496 | ||
497 | static void | 497 | static void |
@@ -518,9 +518,9 @@ sable_lynx_init_irq(int nr_of_irqs) | |||
518 | long i; | 518 | long i; |
519 | 519 | ||
520 | for (i = 0; i < nr_of_irqs; ++i) { | 520 | for (i = 0; i < nr_of_irqs; ++i) { |
521 | irq_to_desc(i)->status |= IRQ_LEVEL; | ||
522 | set_irq_chip_and_handler(i, &sable_lynx_irq_type, | 521 | set_irq_chip_and_handler(i, &sable_lynx_irq_type, |
523 | handle_level_irq); | 522 | handle_level_irq); |
523 | irq_set_status_flags(i, IRQ_LEVEL); | ||
524 | } | 524 | } |
525 | 525 | ||
526 | common_init_isa_dma(); | 526 | common_init_isa_dma(); |
diff --git a/arch/alpha/kernel/sys_takara.c b/arch/alpha/kernel/sys_takara.c index f8a1e8a862fb..42a5331f13c4 100644 --- a/arch/alpha/kernel/sys_takara.c +++ b/arch/alpha/kernel/sys_takara.c | |||
@@ -45,16 +45,18 @@ takara_update_irq_hw(unsigned long irq, unsigned long mask) | |||
45 | } | 45 | } |
46 | 46 | ||
47 | static inline void | 47 | static inline void |
48 | takara_enable_irq(unsigned int irq) | 48 | takara_enable_irq(struct irq_data *d) |
49 | { | 49 | { |
50 | unsigned int irq = d->irq; | ||
50 | unsigned long mask; | 51 | unsigned long mask; |
51 | mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); | 52 | mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); |
52 | takara_update_irq_hw(irq, mask); | 53 | takara_update_irq_hw(irq, mask); |
53 | } | 54 | } |
54 | 55 | ||
55 | static void | 56 | static void |
56 | takara_disable_irq(unsigned int irq) | 57 | takara_disable_irq(struct irq_data *d) |
57 | { | 58 | { |
59 | unsigned int irq = d->irq; | ||
58 | unsigned long mask; | 60 | unsigned long mask; |
59 | mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); | 61 | mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); |
60 | takara_update_irq_hw(irq, mask); | 62 | takara_update_irq_hw(irq, mask); |
@@ -62,9 +64,9 @@ takara_disable_irq(unsigned int irq) | |||
62 | 64 | ||
63 | static struct irq_chip takara_irq_type = { | 65 | static struct irq_chip takara_irq_type = { |
64 | .name = "TAKARA", | 66 | .name = "TAKARA", |
65 | .unmask = takara_enable_irq, | 67 | .irq_unmask = takara_enable_irq, |
66 | .mask = takara_disable_irq, | 68 | .irq_mask = takara_disable_irq, |
67 | .mask_ack = takara_disable_irq, | 69 | .irq_mask_ack = takara_disable_irq, |
68 | }; | 70 | }; |
69 | 71 | ||
70 | static void | 72 | static void |
@@ -136,8 +138,8 @@ takara_init_irq(void) | |||
136 | takara_update_irq_hw(i, -1); | 138 | takara_update_irq_hw(i, -1); |
137 | 139 | ||
138 | for (i = 16; i < 128; ++i) { | 140 | for (i = 16; i < 128; ++i) { |
139 | irq_to_desc(i)->status |= IRQ_LEVEL; | ||
140 | set_irq_chip_and_handler(i, &takara_irq_type, handle_level_irq); | 141 | set_irq_chip_and_handler(i, &takara_irq_type, handle_level_irq); |
142 | irq_set_status_flags(i, IRQ_LEVEL); | ||
141 | } | 143 | } |
142 | 144 | ||
143 | common_init_isa_dma(); | 145 | common_init_isa_dma(); |
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c index e02494bf5ef3..8c13a0c77830 100644 --- a/arch/alpha/kernel/sys_titan.c +++ b/arch/alpha/kernel/sys_titan.c | |||
@@ -112,8 +112,9 @@ titan_update_irq_hw(unsigned long mask) | |||
112 | } | 112 | } |
113 | 113 | ||
114 | static inline void | 114 | static inline void |
115 | titan_enable_irq(unsigned int irq) | 115 | titan_enable_irq(struct irq_data *d) |
116 | { | 116 | { |
117 | unsigned int irq = d->irq; | ||
117 | spin_lock(&titan_irq_lock); | 118 | spin_lock(&titan_irq_lock); |
118 | titan_cached_irq_mask |= 1UL << (irq - 16); | 119 | titan_cached_irq_mask |= 1UL << (irq - 16); |
119 | titan_update_irq_hw(titan_cached_irq_mask); | 120 | titan_update_irq_hw(titan_cached_irq_mask); |
@@ -121,8 +122,9 @@ titan_enable_irq(unsigned int irq) | |||
121 | } | 122 | } |
122 | 123 | ||
123 | static inline void | 124 | static inline void |
124 | titan_disable_irq(unsigned int irq) | 125 | titan_disable_irq(struct irq_data *d) |
125 | { | 126 | { |
127 | unsigned int irq = d->irq; | ||
126 | spin_lock(&titan_irq_lock); | 128 | spin_lock(&titan_irq_lock); |
127 | titan_cached_irq_mask &= ~(1UL << (irq - 16)); | 129 | titan_cached_irq_mask &= ~(1UL << (irq - 16)); |
128 | titan_update_irq_hw(titan_cached_irq_mask); | 130 | titan_update_irq_hw(titan_cached_irq_mask); |
@@ -144,8 +146,10 @@ titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) | |||
144 | } | 146 | } |
145 | 147 | ||
146 | static int | 148 | static int |
147 | titan_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) | 149 | titan_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, |
150 | bool force) | ||
148 | { | 151 | { |
152 | unsigned int irq = d->irq; | ||
149 | spin_lock(&titan_irq_lock); | 153 | spin_lock(&titan_irq_lock); |
150 | titan_cpu_set_irq_affinity(irq - 16, *affinity); | 154 | titan_cpu_set_irq_affinity(irq - 16, *affinity); |
151 | titan_update_irq_hw(titan_cached_irq_mask); | 155 | titan_update_irq_hw(titan_cached_irq_mask); |
@@ -175,17 +179,17 @@ init_titan_irqs(struct irq_chip * ops, int imin, int imax) | |||
175 | { | 179 | { |
176 | long i; | 180 | long i; |
177 | for (i = imin; i <= imax; ++i) { | 181 | for (i = imin; i <= imax; ++i) { |
178 | irq_to_desc(i)->status |= IRQ_LEVEL; | ||
179 | set_irq_chip_and_handler(i, ops, handle_level_irq); | 182 | set_irq_chip_and_handler(i, ops, handle_level_irq); |
183 | irq_set_status_flags(i, IRQ_LEVEL); | ||
180 | } | 184 | } |
181 | } | 185 | } |
182 | 186 | ||
183 | static struct irq_chip titan_irq_type = { | 187 | static struct irq_chip titan_irq_type = { |
184 | .name = "TITAN", | 188 | .name = "TITAN", |
185 | .unmask = titan_enable_irq, | 189 | .irq_unmask = titan_enable_irq, |
186 | .mask = titan_disable_irq, | 190 | .irq_mask = titan_disable_irq, |
187 | .mask_ack = titan_disable_irq, | 191 | .irq_mask_ack = titan_disable_irq, |
188 | .set_affinity = titan_set_irq_affinity, | 192 | .irq_set_affinity = titan_set_irq_affinity, |
189 | }; | 193 | }; |
190 | 194 | ||
191 | static irqreturn_t | 195 | static irqreturn_t |
diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c index eec52594d410..ca60a387ef0a 100644 --- a/arch/alpha/kernel/sys_wildfire.c +++ b/arch/alpha/kernel/sys_wildfire.c | |||
@@ -104,10 +104,12 @@ wildfire_init_irq_hw(void) | |||
104 | } | 104 | } |
105 | 105 | ||
106 | static void | 106 | static void |
107 | wildfire_enable_irq(unsigned int irq) | 107 | wildfire_enable_irq(struct irq_data *d) |
108 | { | 108 | { |
109 | unsigned int irq = d->irq; | ||
110 | |||
109 | if (irq < 16) | 111 | if (irq < 16) |
110 | i8259a_enable_irq(irq); | 112 | i8259a_enable_irq(d); |
111 | 113 | ||
112 | spin_lock(&wildfire_irq_lock); | 114 | spin_lock(&wildfire_irq_lock); |
113 | set_bit(irq, &cached_irq_mask); | 115 | set_bit(irq, &cached_irq_mask); |
@@ -116,10 +118,12 @@ wildfire_enable_irq(unsigned int irq) | |||
116 | } | 118 | } |
117 | 119 | ||
118 | static void | 120 | static void |
119 | wildfire_disable_irq(unsigned int irq) | 121 | wildfire_disable_irq(struct irq_data *d) |
120 | { | 122 | { |
123 | unsigned int irq = d->irq; | ||
124 | |||
121 | if (irq < 16) | 125 | if (irq < 16) |
122 | i8259a_disable_irq(irq); | 126 | i8259a_disable_irq(d); |
123 | 127 | ||
124 | spin_lock(&wildfire_irq_lock); | 128 | spin_lock(&wildfire_irq_lock); |
125 | clear_bit(irq, &cached_irq_mask); | 129 | clear_bit(irq, &cached_irq_mask); |
@@ -128,10 +132,12 @@ wildfire_disable_irq(unsigned int irq) | |||
128 | } | 132 | } |
129 | 133 | ||
130 | static void | 134 | static void |
131 | wildfire_mask_and_ack_irq(unsigned int irq) | 135 | wildfire_mask_and_ack_irq(struct irq_data *d) |
132 | { | 136 | { |
137 | unsigned int irq = d->irq; | ||
138 | |||
133 | if (irq < 16) | 139 | if (irq < 16) |
134 | i8259a_mask_and_ack_irq(irq); | 140 | i8259a_mask_and_ack_irq(d); |
135 | 141 | ||
136 | spin_lock(&wildfire_irq_lock); | 142 | spin_lock(&wildfire_irq_lock); |
137 | clear_bit(irq, &cached_irq_mask); | 143 | clear_bit(irq, &cached_irq_mask); |
@@ -141,9 +147,9 @@ wildfire_mask_and_ack_irq(unsigned int irq) | |||
141 | 147 | ||
142 | static struct irq_chip wildfire_irq_type = { | 148 | static struct irq_chip wildfire_irq_type = { |
143 | .name = "WILDFIRE", | 149 | .name = "WILDFIRE", |
144 | .unmask = wildfire_enable_irq, | 150 | .irq_unmask = wildfire_enable_irq, |
145 | .mask = wildfire_disable_irq, | 151 | .irq_mask = wildfire_disable_irq, |
146 | .mask_ack = wildfire_mask_and_ack_irq, | 152 | .irq_mask_ack = wildfire_mask_and_ack_irq, |
147 | }; | 153 | }; |
148 | 154 | ||
149 | static void __init | 155 | static void __init |
@@ -177,21 +183,21 @@ wildfire_init_irq_per_pca(int qbbno, int pcano) | |||
177 | for (i = 0; i < 16; ++i) { | 183 | for (i = 0; i < 16; ++i) { |
178 | if (i == 2) | 184 | if (i == 2) |
179 | continue; | 185 | continue; |
180 | irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL; | ||
181 | set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type, | 186 | set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type, |
182 | handle_level_irq); | 187 | handle_level_irq); |
188 | irq_set_status_flags(i + irq_bias, IRQ_LEVEL); | ||
183 | } | 189 | } |
184 | 190 | ||
185 | irq_to_desc(36+irq_bias)->status |= IRQ_LEVEL; | ||
186 | set_irq_chip_and_handler(36+irq_bias, &wildfire_irq_type, | 191 | set_irq_chip_and_handler(36+irq_bias, &wildfire_irq_type, |
187 | handle_level_irq); | 192 | handle_level_irq); |
193 | irq_set_status_flags(36 + irq_bias, IRQ_LEVEL); | ||
188 | for (i = 40; i < 64; ++i) { | 194 | for (i = 40; i < 64; ++i) { |
189 | irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL; | ||
190 | set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type, | 195 | set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type, |
191 | handle_level_irq); | 196 | handle_level_irq); |
197 | irq_set_status_flags(i + irq_bias, IRQ_LEVEL); | ||
192 | } | 198 | } |
193 | 199 | ||
194 | setup_irq(32+irq_bias, &isa_enable); | 200 | setup_irq(32+irq_bias, &isa_enable); |
195 | } | 201 | } |
196 | 202 | ||
197 | static void __init | 203 | static void __init |
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig index 778655f0257a..ea5ee4d067f3 100644 --- a/arch/arm/common/Kconfig +++ b/arch/arm/common/Kconfig | |||
@@ -6,6 +6,8 @@ config ARM_VIC | |||
6 | 6 | ||
7 | config ARM_VIC_NR | 7 | config ARM_VIC_NR |
8 | int | 8 | int |
9 | default 4 if ARCH_S5PV210 | ||
10 | default 3 if ARCH_S5P6442 || ARCH_S5PC100 | ||
9 | default 2 | 11 | default 2 |
10 | depends on ARM_VIC | 12 | depends on ARM_VIC |
11 | help | 13 | help |
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h index 3a0893a76a3b..bf13b814c1b8 100644 --- a/arch/arm/include/asm/mach/arch.h +++ b/arch/arm/include/asm/mach/arch.h | |||
@@ -15,10 +15,6 @@ struct meminfo; | |||
15 | struct sys_timer; | 15 | struct sys_timer; |
16 | 16 | ||
17 | struct machine_desc { | 17 | struct machine_desc { |
18 | /* | ||
19 | * Note! The first two elements are used | ||
20 | * by assembler code in head.S, head-common.S | ||
21 | */ | ||
22 | unsigned int nr; /* architecture number */ | 18 | unsigned int nr; /* architecture number */ |
23 | const char *name; /* architecture name */ | 19 | const char *name; /* architecture name */ |
24 | unsigned long boot_params; /* tagged list */ | 20 | unsigned long boot_params; /* tagged list */ |
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index 9763be04f77e..22de005f159c 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h | |||
@@ -10,6 +10,8 @@ | |||
10 | #ifndef _ASMARM_PGALLOC_H | 10 | #ifndef _ASMARM_PGALLOC_H |
11 | #define _ASMARM_PGALLOC_H | 11 | #define _ASMARM_PGALLOC_H |
12 | 12 | ||
13 | #include <linux/pagemap.h> | ||
14 | |||
13 | #include <asm/domain.h> | 15 | #include <asm/domain.h> |
14 | #include <asm/pgtable-hwdef.h> | 16 | #include <asm/pgtable-hwdef.h> |
15 | #include <asm/processor.h> | 17 | #include <asm/processor.h> |
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index d600bd350704..44b84fe6e1b0 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c | |||
@@ -836,9 +836,11 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, | |||
836 | /* | 836 | /* |
837 | * One-time initialisation. | 837 | * One-time initialisation. |
838 | */ | 838 | */ |
839 | static void reset_ctrl_regs(void *unused) | 839 | static void reset_ctrl_regs(void *info) |
840 | { | 840 | { |
841 | int i; | 841 | int i, cpu = smp_processor_id(); |
842 | u32 dbg_power; | ||
843 | cpumask_t *cpumask = info; | ||
842 | 844 | ||
843 | /* | 845 | /* |
844 | * v7 debug contains save and restore registers so that debug state | 846 | * v7 debug contains save and restore registers so that debug state |
@@ -850,6 +852,17 @@ static void reset_ctrl_regs(void *unused) | |||
850 | */ | 852 | */ |
851 | if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) { | 853 | if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) { |
852 | /* | 854 | /* |
855 | * Ensure sticky power-down is clear (i.e. debug logic is | ||
856 | * powered up). | ||
857 | */ | ||
858 | asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power)); | ||
859 | if ((dbg_power & 0x1) == 0) { | ||
860 | pr_warning("CPU %d debug is powered down!\n", cpu); | ||
861 | cpumask_or(cpumask, cpumask, cpumask_of(cpu)); | ||
862 | return; | ||
863 | } | ||
864 | |||
865 | /* | ||
853 | * Unconditionally clear the lock by writing a value | 866 | * Unconditionally clear the lock by writing a value |
854 | * other than 0xC5ACCE55 to the access register. | 867 | * other than 0xC5ACCE55 to the access register. |
855 | */ | 868 | */ |
@@ -887,6 +900,7 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = { | |||
887 | static int __init arch_hw_breakpoint_init(void) | 900 | static int __init arch_hw_breakpoint_init(void) |
888 | { | 901 | { |
889 | u32 dscr; | 902 | u32 dscr; |
903 | cpumask_t cpumask = { CPU_BITS_NONE }; | ||
890 | 904 | ||
891 | debug_arch = get_debug_arch(); | 905 | debug_arch = get_debug_arch(); |
892 | 906 | ||
@@ -911,7 +925,13 @@ static int __init arch_hw_breakpoint_init(void) | |||
911 | * Reset the breakpoint resources. We assume that a halting | 925 | * Reset the breakpoint resources. We assume that a halting |
912 | * debugger will leave the world in a nice state for us. | 926 | * debugger will leave the world in a nice state for us. |
913 | */ | 927 | */ |
914 | on_each_cpu(reset_ctrl_regs, NULL, 1); | 928 | on_each_cpu(reset_ctrl_regs, &cpumask, 1); |
929 | if (!cpumask_empty(&cpumask)) { | ||
930 | core_num_brps = 0; | ||
931 | core_num_reserved_brps = 0; | ||
932 | core_num_wrps = 0; | ||
933 | return 0; | ||
934 | } | ||
915 | 935 | ||
916 | ARM_DBG_READ(c1, 0, dscr); | 936 | ARM_DBG_READ(c1, 0, dscr); |
917 | if (dscr & ARM_DSCR_HDBGEN) { | 937 | if (dscr & ARM_DSCR_HDBGEN) { |
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 19c6816db61e..b13e70f63d71 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c | |||
@@ -996,10 +996,10 @@ static int ptrace_gethbpregs(struct task_struct *tsk, long num, | |||
996 | while (!(arch_ctrl.len & 0x1)) | 996 | while (!(arch_ctrl.len & 0x1)) |
997 | arch_ctrl.len >>= 1; | 997 | arch_ctrl.len >>= 1; |
998 | 998 | ||
999 | if (idx & 0x1) | 999 | if (num & 0x1) |
1000 | reg = encode_ctrl_reg(arch_ctrl); | ||
1001 | else | ||
1002 | reg = bp->attr.bp_addr; | 1000 | reg = bp->attr.bp_addr; |
1001 | else | ||
1002 | reg = encode_ctrl_reg(arch_ctrl); | ||
1003 | } | 1003 | } |
1004 | 1004 | ||
1005 | put: | 1005 | put: |
diff --git a/arch/arm/mach-davinci/cpufreq.c b/arch/arm/mach-davinci/cpufreq.c index 343de73161fa..4a68c2b1ec11 100644 --- a/arch/arm/mach-davinci/cpufreq.c +++ b/arch/arm/mach-davinci/cpufreq.c | |||
@@ -132,7 +132,7 @@ out: | |||
132 | return ret; | 132 | return ret; |
133 | } | 133 | } |
134 | 134 | ||
135 | static int __init davinci_cpu_init(struct cpufreq_policy *policy) | 135 | static int davinci_cpu_init(struct cpufreq_policy *policy) |
136 | { | 136 | { |
137 | int result = 0; | 137 | int result = 0; |
138 | struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data; | 138 | struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data; |
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c index 9eec63070e0c..beda8a4133a0 100644 --- a/arch/arm/mach-davinci/devices-da8xx.c +++ b/arch/arm/mach-davinci/devices-da8xx.c | |||
@@ -480,8 +480,15 @@ static struct platform_device da850_mcasp_device = { | |||
480 | .resource = da850_mcasp_resources, | 480 | .resource = da850_mcasp_resources, |
481 | }; | 481 | }; |
482 | 482 | ||
483 | struct platform_device davinci_pcm_device = { | ||
484 | .name = "davinci-pcm-audio", | ||
485 | .id = -1, | ||
486 | }; | ||
487 | |||
483 | void __init da8xx_register_mcasp(int id, struct snd_platform_data *pdata) | 488 | void __init da8xx_register_mcasp(int id, struct snd_platform_data *pdata) |
484 | { | 489 | { |
490 | platform_device_register(&davinci_pcm_device); | ||
491 | |||
485 | /* DA830/OMAP-L137 has 3 instances of McASP */ | 492 | /* DA830/OMAP-L137 has 3 instances of McASP */ |
486 | if (cpu_is_davinci_da830() && id == 1) { | 493 | if (cpu_is_davinci_da830() && id == 1) { |
487 | da830_mcasp1_device.dev.platform_data = pdata; | 494 | da830_mcasp1_device.dev.platform_data = pdata; |
diff --git a/arch/arm/mach-davinci/gpio-tnetv107x.c b/arch/arm/mach-davinci/gpio-tnetv107x.c index d10298620e2c..3fa3e2867e19 100644 --- a/arch/arm/mach-davinci/gpio-tnetv107x.c +++ b/arch/arm/mach-davinci/gpio-tnetv107x.c | |||
@@ -58,7 +58,7 @@ static int tnetv107x_gpio_request(struct gpio_chip *chip, unsigned offset) | |||
58 | 58 | ||
59 | spin_lock_irqsave(&ctlr->lock, flags); | 59 | spin_lock_irqsave(&ctlr->lock, flags); |
60 | 60 | ||
61 | gpio_reg_set_bit(®s->enable, gpio); | 61 | gpio_reg_set_bit(regs->enable, gpio); |
62 | 62 | ||
63 | spin_unlock_irqrestore(&ctlr->lock, flags); | 63 | spin_unlock_irqrestore(&ctlr->lock, flags); |
64 | 64 | ||
@@ -74,7 +74,7 @@ static void tnetv107x_gpio_free(struct gpio_chip *chip, unsigned offset) | |||
74 | 74 | ||
75 | spin_lock_irqsave(&ctlr->lock, flags); | 75 | spin_lock_irqsave(&ctlr->lock, flags); |
76 | 76 | ||
77 | gpio_reg_clear_bit(®s->enable, gpio); | 77 | gpio_reg_clear_bit(regs->enable, gpio); |
78 | 78 | ||
79 | spin_unlock_irqrestore(&ctlr->lock, flags); | 79 | spin_unlock_irqrestore(&ctlr->lock, flags); |
80 | } | 80 | } |
@@ -88,7 +88,7 @@ static int tnetv107x_gpio_dir_in(struct gpio_chip *chip, unsigned offset) | |||
88 | 88 | ||
89 | spin_lock_irqsave(&ctlr->lock, flags); | 89 | spin_lock_irqsave(&ctlr->lock, flags); |
90 | 90 | ||
91 | gpio_reg_set_bit(®s->direction, gpio); | 91 | gpio_reg_set_bit(regs->direction, gpio); |
92 | 92 | ||
93 | spin_unlock_irqrestore(&ctlr->lock, flags); | 93 | spin_unlock_irqrestore(&ctlr->lock, flags); |
94 | 94 | ||
@@ -106,11 +106,11 @@ static int tnetv107x_gpio_dir_out(struct gpio_chip *chip, | |||
106 | spin_lock_irqsave(&ctlr->lock, flags); | 106 | spin_lock_irqsave(&ctlr->lock, flags); |
107 | 107 | ||
108 | if (value) | 108 | if (value) |
109 | gpio_reg_set_bit(®s->data_out, gpio); | 109 | gpio_reg_set_bit(regs->data_out, gpio); |
110 | else | 110 | else |
111 | gpio_reg_clear_bit(®s->data_out, gpio); | 111 | gpio_reg_clear_bit(regs->data_out, gpio); |
112 | 112 | ||
113 | gpio_reg_clear_bit(®s->direction, gpio); | 113 | gpio_reg_clear_bit(regs->direction, gpio); |
114 | 114 | ||
115 | spin_unlock_irqrestore(&ctlr->lock, flags); | 115 | spin_unlock_irqrestore(&ctlr->lock, flags); |
116 | 116 | ||
@@ -124,7 +124,7 @@ static int tnetv107x_gpio_get(struct gpio_chip *chip, unsigned offset) | |||
124 | unsigned gpio = chip->base + offset; | 124 | unsigned gpio = chip->base + offset; |
125 | int ret; | 125 | int ret; |
126 | 126 | ||
127 | ret = gpio_reg_get_bit(®s->data_in, gpio); | 127 | ret = gpio_reg_get_bit(regs->data_in, gpio); |
128 | 128 | ||
129 | return ret ? 1 : 0; | 129 | return ret ? 1 : 0; |
130 | } | 130 | } |
@@ -140,9 +140,9 @@ static void tnetv107x_gpio_set(struct gpio_chip *chip, | |||
140 | spin_lock_irqsave(&ctlr->lock, flags); | 140 | spin_lock_irqsave(&ctlr->lock, flags); |
141 | 141 | ||
142 | if (value) | 142 | if (value) |
143 | gpio_reg_set_bit(®s->data_out, gpio); | 143 | gpio_reg_set_bit(regs->data_out, gpio); |
144 | else | 144 | else |
145 | gpio_reg_clear_bit(®s->data_out, gpio); | 145 | gpio_reg_clear_bit(regs->data_out, gpio); |
146 | 146 | ||
147 | spin_unlock_irqrestore(&ctlr->lock, flags); | 147 | spin_unlock_irqrestore(&ctlr->lock, flags); |
148 | } | 148 | } |
diff --git a/arch/arm/mach-davinci/include/mach/clkdev.h b/arch/arm/mach-davinci/include/mach/clkdev.h index 730c49d1ebd8..14a504887189 100644 --- a/arch/arm/mach-davinci/include/mach/clkdev.h +++ b/arch/arm/mach-davinci/include/mach/clkdev.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef __MACH_CLKDEV_H | 1 | #ifndef __MACH_CLKDEV_H |
2 | #define __MACH_CLKDEV_H | 2 | #define __MACH_CLKDEV_H |
3 | 3 | ||
4 | struct clk; | ||
5 | |||
4 | static inline int __clk_get(struct clk *clk) | 6 | static inline int __clk_get(struct clk *clk) |
5 | { | 7 | { |
6 | return 1; | 8 | return 1; |
diff --git a/arch/arm/mach-omap2/clkt_dpll.c b/arch/arm/mach-omap2/clkt_dpll.c index 337392c3f549..acb7ae5b0a25 100644 --- a/arch/arm/mach-omap2/clkt_dpll.c +++ b/arch/arm/mach-omap2/clkt_dpll.c | |||
@@ -77,7 +77,7 @@ static int _dpll_test_fint(struct clk *clk, u8 n) | |||
77 | dd = clk->dpll_data; | 77 | dd = clk->dpll_data; |
78 | 78 | ||
79 | /* DPLL divider must result in a valid jitter correction val */ | 79 | /* DPLL divider must result in a valid jitter correction val */ |
80 | fint = clk->parent->rate / (n + 1); | 80 | fint = clk->parent->rate / n; |
81 | if (fint < DPLL_FINT_BAND1_MIN) { | 81 | if (fint < DPLL_FINT_BAND1_MIN) { |
82 | 82 | ||
83 | pr_debug("rejecting n=%d due to Fint failure, " | 83 | pr_debug("rejecting n=%d due to Fint failure, " |
diff --git a/arch/arm/mach-omap2/mailbox.c b/arch/arm/mach-omap2/mailbox.c index 394413dc7deb..24b88504df0f 100644 --- a/arch/arm/mach-omap2/mailbox.c +++ b/arch/arm/mach-omap2/mailbox.c | |||
@@ -193,10 +193,12 @@ static void omap2_mbox_disable_irq(struct omap_mbox *mbox, | |||
193 | omap_mbox_type_t irq) | 193 | omap_mbox_type_t irq) |
194 | { | 194 | { |
195 | struct omap_mbox2_priv *p = mbox->priv; | 195 | struct omap_mbox2_priv *p = mbox->priv; |
196 | u32 l, bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit; | 196 | u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit; |
197 | l = mbox_read_reg(p->irqdisable); | 197 | |
198 | l &= ~bit; | 198 | if (!cpu_is_omap44xx()) |
199 | mbox_write_reg(l, p->irqdisable); | 199 | bit = mbox_read_reg(p->irqdisable) & ~bit; |
200 | |||
201 | mbox_write_reg(bit, p->irqdisable); | ||
200 | } | 202 | } |
201 | 203 | ||
202 | static void omap2_mbox_ack_irq(struct omap_mbox *mbox, | 204 | static void omap2_mbox_ack_irq(struct omap_mbox *mbox, |
@@ -334,7 +336,7 @@ static struct omap_mbox mbox_iva_info = { | |||
334 | .priv = &omap2_mbox_iva_priv, | 336 | .priv = &omap2_mbox_iva_priv, |
335 | }; | 337 | }; |
336 | 338 | ||
337 | struct omap_mbox *omap2_mboxes[] = { &mbox_iva_info, &mbox_dsp_info, NULL }; | 339 | struct omap_mbox *omap2_mboxes[] = { &mbox_dsp_info, &mbox_iva_info, NULL }; |
338 | #endif | 340 | #endif |
339 | 341 | ||
340 | #if defined(CONFIG_ARCH_OMAP4) | 342 | #if defined(CONFIG_ARCH_OMAP4) |
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c index 98148b6c36e9..6c84659cf846 100644 --- a/arch/arm/mach-omap2/mux.c +++ b/arch/arm/mach-omap2/mux.c | |||
@@ -605,7 +605,7 @@ static void __init omap_mux_dbg_create_entry( | |||
605 | list_for_each_entry(e, &partition->muxmodes, node) { | 605 | list_for_each_entry(e, &partition->muxmodes, node) { |
606 | struct omap_mux *m = &e->mux; | 606 | struct omap_mux *m = &e->mux; |
607 | 607 | ||
608 | (void)debugfs_create_file(m->muxnames[0], S_IWUGO, mux_dbg_dir, | 608 | (void)debugfs_create_file(m->muxnames[0], S_IWUSR, mux_dbg_dir, |
609 | m, &omap_mux_dbg_signal_fops); | 609 | m, &omap_mux_dbg_signal_fops); |
610 | } | 610 | } |
611 | } | 611 | } |
diff --git a/arch/arm/mach-omap2/pm-debug.c b/arch/arm/mach-omap2/pm-debug.c index 125f56591fb5..a5a83b358ddd 100644 --- a/arch/arm/mach-omap2/pm-debug.c +++ b/arch/arm/mach-omap2/pm-debug.c | |||
@@ -637,14 +637,14 @@ static int __init pm_dbg_init(void) | |||
637 | 637 | ||
638 | } | 638 | } |
639 | 639 | ||
640 | (void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUGO, d, | 640 | (void) debugfs_create_file("enable_off_mode", S_IRUGO | S_IWUSR, d, |
641 | &enable_off_mode, &pm_dbg_option_fops); | 641 | &enable_off_mode, &pm_dbg_option_fops); |
642 | (void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUGO, d, | 642 | (void) debugfs_create_file("sleep_while_idle", S_IRUGO | S_IWUSR, d, |
643 | &sleep_while_idle, &pm_dbg_option_fops); | 643 | &sleep_while_idle, &pm_dbg_option_fops); |
644 | (void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUGO, d, | 644 | (void) debugfs_create_file("wakeup_timer_seconds", S_IRUGO | S_IWUSR, d, |
645 | &wakeup_timer_seconds, &pm_dbg_option_fops); | 645 | &wakeup_timer_seconds, &pm_dbg_option_fops); |
646 | (void) debugfs_create_file("wakeup_timer_milliseconds", | 646 | (void) debugfs_create_file("wakeup_timer_milliseconds", |
647 | S_IRUGO | S_IWUGO, d, &wakeup_timer_milliseconds, | 647 | S_IRUGO | S_IWUSR, d, &wakeup_timer_milliseconds, |
648 | &pm_dbg_option_fops); | 648 | &pm_dbg_option_fops); |
649 | pm_dbg_init_done = 1; | 649 | pm_dbg_init_done = 1; |
650 | 650 | ||
diff --git a/arch/arm/mach-omap2/prcm_mpu44xx.h b/arch/arm/mach-omap2/prcm_mpu44xx.h index 729a644ce852..3300ff6e3cfe 100644 --- a/arch/arm/mach-omap2/prcm_mpu44xx.h +++ b/arch/arm/mach-omap2/prcm_mpu44xx.h | |||
@@ -38,8 +38,8 @@ | |||
38 | #define OMAP4430_PRCM_MPU_CPU1_INST 0x0800 | 38 | #define OMAP4430_PRCM_MPU_CPU1_INST 0x0800 |
39 | 39 | ||
40 | /* PRCM_MPU clockdomain register offsets (from instance start) */ | 40 | /* PRCM_MPU clockdomain register offsets (from instance start) */ |
41 | #define OMAP4430_PRCM_MPU_CPU0_MPU_CDOFFS 0x0000 | 41 | #define OMAP4430_PRCM_MPU_CPU0_MPU_CDOFFS 0x0018 |
42 | #define OMAP4430_PRCM_MPU_CPU1_MPU_CDOFFS 0x0000 | 42 | #define OMAP4430_PRCM_MPU_CPU1_MPU_CDOFFS 0x0018 |
43 | 43 | ||
44 | 44 | ||
45 | /* | 45 | /* |
diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c index c37e823266d3..1a777e34d0c2 100644 --- a/arch/arm/mach-omap2/smartreflex.c +++ b/arch/arm/mach-omap2/smartreflex.c | |||
@@ -282,6 +282,7 @@ error: | |||
282 | dev_err(&sr_info->pdev->dev, "%s: ERROR in registering" | 282 | dev_err(&sr_info->pdev->dev, "%s: ERROR in registering" |
283 | "interrupt handler. Smartreflex will" | 283 | "interrupt handler. Smartreflex will" |
284 | "not function as desired\n", __func__); | 284 | "not function as desired\n", __func__); |
285 | kfree(name); | ||
285 | kfree(sr_info); | 286 | kfree(sr_info); |
286 | return ret; | 287 | return ret; |
287 | } | 288 | } |
@@ -879,7 +880,7 @@ static int __init omap_sr_probe(struct platform_device *pdev) | |||
879 | ret = sr_late_init(sr_info); | 880 | ret = sr_late_init(sr_info); |
880 | if (ret) { | 881 | if (ret) { |
881 | pr_warning("%s: Error in SR late init\n", __func__); | 882 | pr_warning("%s: Error in SR late init\n", __func__); |
882 | return ret; | 883 | goto err_release_region; |
883 | } | 884 | } |
884 | } | 885 | } |
885 | 886 | ||
@@ -890,17 +891,20 @@ static int __init omap_sr_probe(struct platform_device *pdev) | |||
890 | * not try to create rest of the debugfs entries. | 891 | * not try to create rest of the debugfs entries. |
891 | */ | 892 | */ |
892 | vdd_dbg_dir = omap_voltage_get_dbgdir(sr_info->voltdm); | 893 | vdd_dbg_dir = omap_voltage_get_dbgdir(sr_info->voltdm); |
893 | if (!vdd_dbg_dir) | 894 | if (!vdd_dbg_dir) { |
894 | return -EINVAL; | 895 | ret = -EINVAL; |
896 | goto err_release_region; | ||
897 | } | ||
895 | 898 | ||
896 | dbg_dir = debugfs_create_dir("smartreflex", vdd_dbg_dir); | 899 | dbg_dir = debugfs_create_dir("smartreflex", vdd_dbg_dir); |
897 | if (IS_ERR(dbg_dir)) { | 900 | if (IS_ERR(dbg_dir)) { |
898 | dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n", | 901 | dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n", |
899 | __func__); | 902 | __func__); |
900 | return PTR_ERR(dbg_dir); | 903 | ret = PTR_ERR(dbg_dir); |
904 | goto err_release_region; | ||
901 | } | 905 | } |
902 | 906 | ||
903 | (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUGO, dbg_dir, | 907 | (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, dbg_dir, |
904 | (void *)sr_info, &pm_sr_fops); | 908 | (void *)sr_info, &pm_sr_fops); |
905 | (void) debugfs_create_x32("errweight", S_IRUGO, dbg_dir, | 909 | (void) debugfs_create_x32("errweight", S_IRUGO, dbg_dir, |
906 | &sr_info->err_weight); | 910 | &sr_info->err_weight); |
@@ -913,7 +917,8 @@ static int __init omap_sr_probe(struct platform_device *pdev) | |||
913 | if (IS_ERR(nvalue_dir)) { | 917 | if (IS_ERR(nvalue_dir)) { |
914 | dev_err(&pdev->dev, "%s: Unable to create debugfs directory" | 918 | dev_err(&pdev->dev, "%s: Unable to create debugfs directory" |
915 | "for n-values\n", __func__); | 919 | "for n-values\n", __func__); |
916 | return PTR_ERR(nvalue_dir); | 920 | ret = PTR_ERR(nvalue_dir); |
921 | goto err_release_region; | ||
917 | } | 922 | } |
918 | 923 | ||
919 | omap_voltage_get_volttable(sr_info->voltdm, &volt_data); | 924 | omap_voltage_get_volttable(sr_info->voltdm, &volt_data); |
@@ -922,24 +927,16 @@ static int __init omap_sr_probe(struct platform_device *pdev) | |||
922 | " corresponding vdd vdd_%s. Cannot create debugfs" | 927 | " corresponding vdd vdd_%s. Cannot create debugfs" |
923 | "entries for n-values\n", | 928 | "entries for n-values\n", |
924 | __func__, sr_info->voltdm->name); | 929 | __func__, sr_info->voltdm->name); |
925 | return -ENODATA; | 930 | ret = -ENODATA; |
931 | goto err_release_region; | ||
926 | } | 932 | } |
927 | 933 | ||
928 | for (i = 0; i < sr_info->nvalue_count; i++) { | 934 | for (i = 0; i < sr_info->nvalue_count; i++) { |
929 | char *name; | 935 | char name[NVALUE_NAME_LEN + 1]; |
930 | char volt_name[32]; | ||
931 | |||
932 | name = kzalloc(NVALUE_NAME_LEN + 1, GFP_KERNEL); | ||
933 | if (!name) { | ||
934 | dev_err(&pdev->dev, "%s: Unable to allocate memory" | ||
935 | " for n-value directory name\n", __func__); | ||
936 | return -ENOMEM; | ||
937 | } | ||
938 | 936 | ||
939 | strcpy(name, "volt_"); | 937 | snprintf(name, sizeof(name), "volt_%d", |
940 | sprintf(volt_name, "%d", volt_data[i].volt_nominal); | 938 | volt_data[i].volt_nominal); |
941 | strcat(name, volt_name); | 939 | (void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir, |
942 | (void) debugfs_create_x32(name, S_IRUGO | S_IWUGO, nvalue_dir, | ||
943 | &(sr_info->nvalue_table[i].nvalue)); | 940 | &(sr_info->nvalue_table[i].nvalue)); |
944 | } | 941 | } |
945 | 942 | ||
diff --git a/arch/arm/mach-omap2/timer-gp.c b/arch/arm/mach-omap2/timer-gp.c index 7b7c2683ae7b..0fc550e7e482 100644 --- a/arch/arm/mach-omap2/timer-gp.c +++ b/arch/arm/mach-omap2/timer-gp.c | |||
@@ -39,6 +39,7 @@ | |||
39 | #include <asm/mach/time.h> | 39 | #include <asm/mach/time.h> |
40 | #include <plat/dmtimer.h> | 40 | #include <plat/dmtimer.h> |
41 | #include <asm/localtimer.h> | 41 | #include <asm/localtimer.h> |
42 | #include <asm/sched_clock.h> | ||
42 | 43 | ||
43 | #include "timer-gp.h" | 44 | #include "timer-gp.h" |
44 | 45 | ||
@@ -190,6 +191,7 @@ static void __init omap2_gp_clocksource_init(void) | |||
190 | /* | 191 | /* |
191 | * clocksource | 192 | * clocksource |
192 | */ | 193 | */ |
194 | static DEFINE_CLOCK_DATA(cd); | ||
193 | static struct omap_dm_timer *gpt_clocksource; | 195 | static struct omap_dm_timer *gpt_clocksource; |
194 | static cycle_t clocksource_read_cycles(struct clocksource *cs) | 196 | static cycle_t clocksource_read_cycles(struct clocksource *cs) |
195 | { | 197 | { |
@@ -204,6 +206,15 @@ static struct clocksource clocksource_gpt = { | |||
204 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 206 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
205 | }; | 207 | }; |
206 | 208 | ||
209 | static void notrace dmtimer_update_sched_clock(void) | ||
210 | { | ||
211 | u32 cyc; | ||
212 | |||
213 | cyc = omap_dm_timer_read_counter(gpt_clocksource); | ||
214 | |||
215 | update_sched_clock(&cd, cyc, (u32)~0); | ||
216 | } | ||
217 | |||
207 | /* Setup free-running counter for clocksource */ | 218 | /* Setup free-running counter for clocksource */ |
208 | static void __init omap2_gp_clocksource_init(void) | 219 | static void __init omap2_gp_clocksource_init(void) |
209 | { | 220 | { |
@@ -224,6 +235,8 @@ static void __init omap2_gp_clocksource_init(void) | |||
224 | 235 | ||
225 | omap_dm_timer_set_load_start(gpt, 1, 0); | 236 | omap_dm_timer_set_load_start(gpt, 1, 0); |
226 | 237 | ||
238 | init_sched_clock(&cd, dmtimer_update_sched_clock, 32, tick_rate); | ||
239 | |||
227 | if (clocksource_register_hz(&clocksource_gpt, tick_rate)) | 240 | if (clocksource_register_hz(&clocksource_gpt, tick_rate)) |
228 | printk(err2, clocksource_gpt.name); | 241 | printk(err2, clocksource_gpt.name); |
229 | } | 242 | } |
diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c index fbc5b775f895..b166b1d845d7 100644 --- a/arch/arm/mach-pxa/pxa25x.c +++ b/arch/arm/mach-pxa/pxa25x.c | |||
@@ -347,6 +347,7 @@ static struct platform_device *pxa25x_devices[] __initdata = { | |||
347 | &pxa25x_device_assp, | 347 | &pxa25x_device_assp, |
348 | &pxa25x_device_pwm0, | 348 | &pxa25x_device_pwm0, |
349 | &pxa25x_device_pwm1, | 349 | &pxa25x_device_pwm1, |
350 | &pxa_device_asoc_platform, | ||
350 | }; | 351 | }; |
351 | 352 | ||
352 | static struct sys_device pxa25x_sysdev[] = { | 353 | static struct sys_device pxa25x_sysdev[] = { |
diff --git a/arch/arm/mach-pxa/tosa-bt.c b/arch/arm/mach-pxa/tosa-bt.c index c31e601eb49c..b9b1e5c2b290 100644 --- a/arch/arm/mach-pxa/tosa-bt.c +++ b/arch/arm/mach-pxa/tosa-bt.c | |||
@@ -81,8 +81,6 @@ static int tosa_bt_probe(struct platform_device *dev) | |||
81 | goto err_rfk_alloc; | 81 | goto err_rfk_alloc; |
82 | } | 82 | } |
83 | 83 | ||
84 | rfkill_set_led_trigger_name(rfk, "tosa-bt"); | ||
85 | |||
86 | rc = rfkill_register(rfk); | 84 | rc = rfkill_register(rfk); |
87 | if (rc) | 85 | if (rc) |
88 | goto err_rfkill; | 86 | goto err_rfkill; |
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c index af152e70cfcf..f2582ec300d9 100644 --- a/arch/arm/mach-pxa/tosa.c +++ b/arch/arm/mach-pxa/tosa.c | |||
@@ -875,6 +875,11 @@ static struct platform_device sharpsl_rom_device = { | |||
875 | .dev.platform_data = &sharpsl_rom_data, | 875 | .dev.platform_data = &sharpsl_rom_data, |
876 | }; | 876 | }; |
877 | 877 | ||
878 | static struct platform_device wm9712_device = { | ||
879 | .name = "wm9712-codec", | ||
880 | .id = -1, | ||
881 | }; | ||
882 | |||
878 | static struct platform_device *devices[] __initdata = { | 883 | static struct platform_device *devices[] __initdata = { |
879 | &tosascoop_device, | 884 | &tosascoop_device, |
880 | &tosascoop_jc_device, | 885 | &tosascoop_jc_device, |
@@ -885,6 +890,7 @@ static struct platform_device *devices[] __initdata = { | |||
885 | &tosaled_device, | 890 | &tosaled_device, |
886 | &tosa_bt_device, | 891 | &tosa_bt_device, |
887 | &sharpsl_rom_device, | 892 | &sharpsl_rom_device, |
893 | &wm9712_device, | ||
888 | }; | 894 | }; |
889 | 895 | ||
890 | static void tosa_poweroff(void) | 896 | static void tosa_poweroff(void) |
diff --git a/arch/arm/mach-s3c2440/Kconfig b/arch/arm/mach-s3c2440/Kconfig index a0cb2581894f..50825a3f91cc 100644 --- a/arch/arm/mach-s3c2440/Kconfig +++ b/arch/arm/mach-s3c2440/Kconfig | |||
@@ -99,6 +99,7 @@ config MACH_NEO1973_GTA02 | |||
99 | select POWER_SUPPLY | 99 | select POWER_SUPPLY |
100 | select MACH_NEO1973 | 100 | select MACH_NEO1973 |
101 | select S3C2410_PWM | 101 | select S3C2410_PWM |
102 | select S3C_DEV_USB_HOST | ||
102 | help | 103 | help |
103 | Say Y here if you are using the Openmoko GTA02 / Freerunner GSM Phone | 104 | Say Y here if you are using the Openmoko GTA02 / Freerunner GSM Phone |
104 | 105 | ||
diff --git a/arch/arm/mach-s3c2440/include/mach/gta02.h b/arch/arm/mach-s3c2440/include/mach/gta02.h index 953331d8d56a..3a56a229cac6 100644 --- a/arch/arm/mach-s3c2440/include/mach/gta02.h +++ b/arch/arm/mach-s3c2440/include/mach/gta02.h | |||
@@ -44,19 +44,19 @@ | |||
44 | #define GTA02v3_GPIO_nUSB_FLT S3C2410_GPG(10) /* v3 + v4 only */ | 44 | #define GTA02v3_GPIO_nUSB_FLT S3C2410_GPG(10) /* v3 + v4 only */ |
45 | #define GTA02v3_GPIO_nGSM_OC S3C2410_GPG(11) /* v3 + v4 only */ | 45 | #define GTA02v3_GPIO_nGSM_OC S3C2410_GPG(11) /* v3 + v4 only */ |
46 | 46 | ||
47 | #define GTA02_GPIO_AMP_SHUT S3C2440_GPJ1 /* v2 + v3 + v4 only */ | 47 | #define GTA02_GPIO_AMP_SHUT S3C2410_GPJ(1) /* v2 + v3 + v4 only */ |
48 | #define GTA02v1_GPIO_WLAN_GPIO10 S3C2440_GPJ2 | 48 | #define GTA02v1_GPIO_WLAN_GPIO10 S3C2410_GPJ(2) |
49 | #define GTA02_GPIO_HP_IN S3C2440_GPJ2 /* v2 + v3 + v4 only */ | 49 | #define GTA02_GPIO_HP_IN S3C2410_GPJ(2) /* v2 + v3 + v4 only */ |
50 | #define GTA02_GPIO_INT0 S3C2440_GPJ3 /* v2 + v3 + v4 only */ | 50 | #define GTA02_GPIO_INT0 S3C2410_GPJ(3) /* v2 + v3 + v4 only */ |
51 | #define GTA02_GPIO_nGSM_EN S3C2440_GPJ4 | 51 | #define GTA02_GPIO_nGSM_EN S3C2410_GPJ(4) |
52 | #define GTA02_GPIO_3D_RESET S3C2440_GPJ5 | 52 | #define GTA02_GPIO_3D_RESET S3C2410_GPJ(5) |
53 | #define GTA02_GPIO_nDL_GSM S3C2440_GPJ6 /* v4 + v5 only */ | 53 | #define GTA02_GPIO_nDL_GSM S3C2410_GPJ(6) /* v4 + v5 only */ |
54 | #define GTA02_GPIO_WLAN_GPIO0 S3C2440_GPJ7 | 54 | #define GTA02_GPIO_WLAN_GPIO0 S3C2410_GPJ(7) |
55 | #define GTA02v1_GPIO_BAT_ID S3C2440_GPJ8 | 55 | #define GTA02v1_GPIO_BAT_ID S3C2410_GPJ(8) |
56 | #define GTA02_GPIO_KEEPACT S3C2440_GPJ8 | 56 | #define GTA02_GPIO_KEEPACT S3C2410_GPJ(8) |
57 | #define GTA02v1_GPIO_HP_IN S3C2440_GPJ10 | 57 | #define GTA02v1_GPIO_HP_IN S3C2410_GPJ(10) |
58 | #define GTA02_CHIP_PWD S3C2440_GPJ11 /* v2 + v3 + v4 only */ | 58 | #define GTA02_CHIP_PWD S3C2410_GPJ(11) /* v2 + v3 + v4 only */ |
59 | #define GTA02_GPIO_nWLAN_RESET S3C2440_GPJ12 /* v2 + v3 + v4 only */ | 59 | #define GTA02_GPIO_nWLAN_RESET S3C2410_GPJ(12) /* v2 + v3 + v4 only */ |
60 | 60 | ||
61 | #define GTA02_IRQ_GSENSOR_1 IRQ_EINT0 | 61 | #define GTA02_IRQ_GSENSOR_1 IRQ_EINT0 |
62 | #define GTA02_IRQ_MODEM IRQ_EINT1 | 62 | #define GTA02_IRQ_MODEM IRQ_EINT1 |
diff --git a/arch/arm/mach-s3c64xx/clock.c b/arch/arm/mach-s3c64xx/clock.c index dd3782064508..fdfc4d5e37a1 100644 --- a/arch/arm/mach-s3c64xx/clock.c +++ b/arch/arm/mach-s3c64xx/clock.c | |||
@@ -151,6 +151,12 @@ static struct clk init_clocks_off[] = { | |||
151 | .enable = s3c64xx_pclk_ctrl, | 151 | .enable = s3c64xx_pclk_ctrl, |
152 | .ctrlbit = S3C_CLKCON_PCLK_IIC, | 152 | .ctrlbit = S3C_CLKCON_PCLK_IIC, |
153 | }, { | 153 | }, { |
154 | .name = "i2c", | ||
155 | .id = 1, | ||
156 | .parent = &clk_p, | ||
157 | .enable = s3c64xx_pclk_ctrl, | ||
158 | .ctrlbit = S3C6410_CLKCON_PCLK_I2C1, | ||
159 | }, { | ||
154 | .name = "iis", | 160 | .name = "iis", |
155 | .id = 0, | 161 | .id = 0, |
156 | .parent = &clk_p, | 162 | .parent = &clk_p, |
diff --git a/arch/arm/mach-s3c64xx/dma.c b/arch/arm/mach-s3c64xx/dma.c index 135db1b41252..c35585cf8c4f 100644 --- a/arch/arm/mach-s3c64xx/dma.c +++ b/arch/arm/mach-s3c64xx/dma.c | |||
@@ -690,12 +690,12 @@ static int s3c64xx_dma_init1(int chno, enum dma_ch chbase, | |||
690 | 690 | ||
691 | regptr = regs + PL080_Cx_BASE(0); | 691 | regptr = regs + PL080_Cx_BASE(0); |
692 | 692 | ||
693 | for (ch = 0; ch < 8; ch++, chno++, chptr++) { | 693 | for (ch = 0; ch < 8; ch++, chptr++) { |
694 | printk(KERN_INFO "%s: registering DMA %d (%p)\n", | 694 | pr_debug("%s: registering DMA %d (%p)\n", |
695 | __func__, chno, regptr); | 695 | __func__, chno + ch, regptr); |
696 | 696 | ||
697 | chptr->bit = 1 << ch; | 697 | chptr->bit = 1 << ch; |
698 | chptr->number = chno; | 698 | chptr->number = chno + ch; |
699 | chptr->dmac = dmac; | 699 | chptr->dmac = dmac; |
700 | chptr->regs = regptr; | 700 | chptr->regs = regptr; |
701 | regptr += PL080_Cx_STRIDE; | 701 | regptr += PL080_Cx_STRIDE; |
@@ -704,7 +704,8 @@ static int s3c64xx_dma_init1(int chno, enum dma_ch chbase, | |||
704 | /* for the moment, permanently enable the controller */ | 704 | /* for the moment, permanently enable the controller */ |
705 | writel(PL080_CONFIG_ENABLE, regs + PL080_CONFIG); | 705 | writel(PL080_CONFIG_ENABLE, regs + PL080_CONFIG); |
706 | 706 | ||
707 | printk(KERN_INFO "PL080: IRQ %d, at %p\n", irq, regs); | 707 | printk(KERN_INFO "PL080: IRQ %d, at %p, channels %d..%d\n", |
708 | irq, regs, chno, chno+8); | ||
708 | 709 | ||
709 | return 0; | 710 | return 0; |
710 | 711 | ||
diff --git a/arch/arm/mach-s3c64xx/gpiolib.c b/arch/arm/mach-s3c64xx/gpiolib.c index fd99a82e82c4..92b09085caaa 100644 --- a/arch/arm/mach-s3c64xx/gpiolib.c +++ b/arch/arm/mach-s3c64xx/gpiolib.c | |||
@@ -72,7 +72,7 @@ static struct s3c_gpio_cfg gpio_4bit_cfg_eint0011 = { | |||
72 | .get_pull = s3c_gpio_getpull_updown, | 72 | .get_pull = s3c_gpio_getpull_updown, |
73 | }; | 73 | }; |
74 | 74 | ||
75 | int s3c64xx_gpio2int_gpm(struct gpio_chip *chip, unsigned pin) | 75 | static int s3c64xx_gpio2int_gpm(struct gpio_chip *chip, unsigned pin) |
76 | { | 76 | { |
77 | return pin < 5 ? IRQ_EINT(23) + pin : -ENXIO; | 77 | return pin < 5 ? IRQ_EINT(23) + pin : -ENXIO; |
78 | } | 78 | } |
@@ -138,7 +138,7 @@ static struct s3c_gpio_chip gpio_4bit[] = { | |||
138 | }, | 138 | }, |
139 | }; | 139 | }; |
140 | 140 | ||
141 | int s3c64xx_gpio2int_gpl(struct gpio_chip *chip, unsigned pin) | 141 | static int s3c64xx_gpio2int_gpl(struct gpio_chip *chip, unsigned pin) |
142 | { | 142 | { |
143 | return pin >= 8 ? IRQ_EINT(16) + pin - 8 : -ENXIO; | 143 | return pin >= 8 ? IRQ_EINT(16) + pin - 8 : -ENXIO; |
144 | } | 144 | } |
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c index e85192a86fbe..a80a3163dd30 100644 --- a/arch/arm/mach-s3c64xx/mach-smdk6410.c +++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/delay.h> | 28 | #include <linux/delay.h> |
29 | #include <linux/smsc911x.h> | 29 | #include <linux/smsc911x.h> |
30 | #include <linux/regulator/fixed.h> | 30 | #include <linux/regulator/fixed.h> |
31 | #include <linux/regulator/machine.h> | ||
31 | 32 | ||
32 | #ifdef CONFIG_SMDK6410_WM1190_EV1 | 33 | #ifdef CONFIG_SMDK6410_WM1190_EV1 |
33 | #include <linux/mfd/wm8350/core.h> | 34 | #include <linux/mfd/wm8350/core.h> |
@@ -351,7 +352,7 @@ static struct regulator_init_data smdk6410_vddpll = { | |||
351 | /* VDD_UH_MMC, LDO5 on J5 */ | 352 | /* VDD_UH_MMC, LDO5 on J5 */ |
352 | static struct regulator_init_data smdk6410_vdduh_mmc = { | 353 | static struct regulator_init_data smdk6410_vdduh_mmc = { |
353 | .constraints = { | 354 | .constraints = { |
354 | .name = "PVDD_UH/PVDD_MMC", | 355 | .name = "PVDD_UH+PVDD_MMC", |
355 | .always_on = 1, | 356 | .always_on = 1, |
356 | }, | 357 | }, |
357 | }; | 358 | }; |
@@ -417,7 +418,7 @@ static struct regulator_init_data smdk6410_vddaudio = { | |||
417 | /* S3C64xx internal logic & PLL */ | 418 | /* S3C64xx internal logic & PLL */ |
418 | static struct regulator_init_data wm8350_dcdc1_data = { | 419 | static struct regulator_init_data wm8350_dcdc1_data = { |
419 | .constraints = { | 420 | .constraints = { |
420 | .name = "PVDD_INT/PVDD_PLL", | 421 | .name = "PVDD_INT+PVDD_PLL", |
421 | .min_uV = 1200000, | 422 | .min_uV = 1200000, |
422 | .max_uV = 1200000, | 423 | .max_uV = 1200000, |
423 | .always_on = 1, | 424 | .always_on = 1, |
@@ -452,7 +453,7 @@ static struct regulator_consumer_supply wm8350_dcdc4_consumers[] = { | |||
452 | 453 | ||
453 | static struct regulator_init_data wm8350_dcdc4_data = { | 454 | static struct regulator_init_data wm8350_dcdc4_data = { |
454 | .constraints = { | 455 | .constraints = { |
455 | .name = "PVDD_HI/PVDD_EXT/PVDD_SYS/PVCCM2MTV", | 456 | .name = "PVDD_HI+PVDD_EXT+PVDD_SYS+PVCCM2MTV", |
456 | .min_uV = 3000000, | 457 | .min_uV = 3000000, |
457 | .max_uV = 3000000, | 458 | .max_uV = 3000000, |
458 | .always_on = 1, | 459 | .always_on = 1, |
@@ -464,7 +465,7 @@ static struct regulator_init_data wm8350_dcdc4_data = { | |||
464 | /* OTGi/1190-EV1 HPVDD & AVDD */ | 465 | /* OTGi/1190-EV1 HPVDD & AVDD */ |
465 | static struct regulator_init_data wm8350_ldo4_data = { | 466 | static struct regulator_init_data wm8350_ldo4_data = { |
466 | .constraints = { | 467 | .constraints = { |
467 | .name = "PVDD_OTGI/HPVDD/AVDD", | 468 | .name = "PVDD_OTGI+HPVDD+AVDD", |
468 | .min_uV = 1200000, | 469 | .min_uV = 1200000, |
469 | .max_uV = 1200000, | 470 | .max_uV = 1200000, |
470 | .apply_uV = 1, | 471 | .apply_uV = 1, |
@@ -552,7 +553,7 @@ static struct wm831x_backlight_pdata wm1192_backlight_pdata = { | |||
552 | 553 | ||
553 | static struct regulator_init_data wm1192_dcdc3 = { | 554 | static struct regulator_init_data wm1192_dcdc3 = { |
554 | .constraints = { | 555 | .constraints = { |
555 | .name = "PVDD_MEM/PVDD_GPS", | 556 | .name = "PVDD_MEM+PVDD_GPS", |
556 | .always_on = 1, | 557 | .always_on = 1, |
557 | }, | 558 | }, |
558 | }; | 559 | }; |
@@ -563,7 +564,7 @@ static struct regulator_consumer_supply wm1192_ldo1_consumers[] = { | |||
563 | 564 | ||
564 | static struct regulator_init_data wm1192_ldo1 = { | 565 | static struct regulator_init_data wm1192_ldo1 = { |
565 | .constraints = { | 566 | .constraints = { |
566 | .name = "PVDD_LCD/PVDD_EXT", | 567 | .name = "PVDD_LCD+PVDD_EXT", |
567 | .always_on = 1, | 568 | .always_on = 1, |
568 | }, | 569 | }, |
569 | .consumer_supplies = wm1192_ldo1_consumers, | 570 | .consumer_supplies = wm1192_ldo1_consumers, |
diff --git a/arch/arm/mach-s3c64xx/setup-keypad.c b/arch/arm/mach-s3c64xx/setup-keypad.c index f8ed0d22db70..1d4d0ee9e870 100644 --- a/arch/arm/mach-s3c64xx/setup-keypad.c +++ b/arch/arm/mach-s3c64xx/setup-keypad.c | |||
@@ -17,7 +17,7 @@ | |||
17 | void samsung_keypad_cfg_gpio(unsigned int rows, unsigned int cols) | 17 | void samsung_keypad_cfg_gpio(unsigned int rows, unsigned int cols) |
18 | { | 18 | { |
19 | /* Set all the necessary GPK pins to special-function 3: KP_ROW[x] */ | 19 | /* Set all the necessary GPK pins to special-function 3: KP_ROW[x] */ |
20 | s3c_gpio_cfgrange_nopull(S3C64XX_GPK(8), 8 + rows, S3C_GPIO_SFN(3)); | 20 | s3c_gpio_cfgrange_nopull(S3C64XX_GPK(8), rows, S3C_GPIO_SFN(3)); |
21 | 21 | ||
22 | /* Set all the necessary GPL pins to special-function 3: KP_COL[x] */ | 22 | /* Set all the necessary GPL pins to special-function 3: KP_COL[x] */ |
23 | s3c_gpio_cfgrange_nopull(S3C64XX_GPL(0), cols, S3C_GPIO_SFN(3)); | 23 | s3c_gpio_cfgrange_nopull(S3C64XX_GPL(0), cols, S3C_GPIO_SFN(3)); |
diff --git a/arch/arm/mach-s3c64xx/setup-sdhci.c b/arch/arm/mach-s3c64xx/setup-sdhci.c index 1a942037c4ef..f344a222bc84 100644 --- a/arch/arm/mach-s3c64xx/setup-sdhci.c +++ b/arch/arm/mach-s3c64xx/setup-sdhci.c | |||
@@ -56,7 +56,7 @@ void s3c6400_setup_sdhci_cfg_card(struct platform_device *dev, | |||
56 | else | 56 | else |
57 | ctrl3 = (S3C_SDHCI_CTRL3_FCSEL1 | S3C_SDHCI_CTRL3_FCSEL0); | 57 | ctrl3 = (S3C_SDHCI_CTRL3_FCSEL1 | S3C_SDHCI_CTRL3_FCSEL0); |
58 | 58 | ||
59 | printk(KERN_INFO "%s: CTRL 2=%08x, 3=%08x\n", __func__, ctrl2, ctrl3); | 59 | pr_debug("%s: CTRL 2=%08x, 3=%08x\n", __func__, ctrl2, ctrl3); |
60 | writel(ctrl2, r + S3C_SDHCI_CONTROL2); | 60 | writel(ctrl2, r + S3C_SDHCI_CONTROL2); |
61 | writel(ctrl3, r + S3C_SDHCI_CONTROL3); | 61 | writel(ctrl3, r + S3C_SDHCI_CONTROL3); |
62 | } | 62 | } |
diff --git a/arch/arm/mach-s5p64x0/include/mach/gpio.h b/arch/arm/mach-s5p64x0/include/mach/gpio.h index 5486c8f01f1d..adb5f298ead8 100644 --- a/arch/arm/mach-s5p64x0/include/mach/gpio.h +++ b/arch/arm/mach-s5p64x0/include/mach/gpio.h | |||
@@ -23,7 +23,7 @@ | |||
23 | #define S5P6440_GPIO_A_NR (6) | 23 | #define S5P6440_GPIO_A_NR (6) |
24 | #define S5P6440_GPIO_B_NR (7) | 24 | #define S5P6440_GPIO_B_NR (7) |
25 | #define S5P6440_GPIO_C_NR (8) | 25 | #define S5P6440_GPIO_C_NR (8) |
26 | #define S5P6440_GPIO_F_NR (2) | 26 | #define S5P6440_GPIO_F_NR (16) |
27 | #define S5P6440_GPIO_G_NR (7) | 27 | #define S5P6440_GPIO_G_NR (7) |
28 | #define S5P6440_GPIO_H_NR (10) | 28 | #define S5P6440_GPIO_H_NR (10) |
29 | #define S5P6440_GPIO_I_NR (16) | 29 | #define S5P6440_GPIO_I_NR (16) |
@@ -36,7 +36,7 @@ | |||
36 | #define S5P6450_GPIO_B_NR (7) | 36 | #define S5P6450_GPIO_B_NR (7) |
37 | #define S5P6450_GPIO_C_NR (8) | 37 | #define S5P6450_GPIO_C_NR (8) |
38 | #define S5P6450_GPIO_D_NR (8) | 38 | #define S5P6450_GPIO_D_NR (8) |
39 | #define S5P6450_GPIO_F_NR (2) | 39 | #define S5P6450_GPIO_F_NR (16) |
40 | #define S5P6450_GPIO_G_NR (14) | 40 | #define S5P6450_GPIO_G_NR (14) |
41 | #define S5P6450_GPIO_H_NR (10) | 41 | #define S5P6450_GPIO_H_NR (10) |
42 | #define S5P6450_GPIO_I_NR (16) | 42 | #define S5P6450_GPIO_I_NR (16) |
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c index 2123b96b5638..4303a86e6e38 100644 --- a/arch/arm/mach-shmobile/board-ag5evm.c +++ b/arch/arm/mach-shmobile/board-ag5evm.c | |||
@@ -454,6 +454,7 @@ static void __init ag5evm_init(void) | |||
454 | gpio_direction_output(GPIO_PORT217, 0); | 454 | gpio_direction_output(GPIO_PORT217, 0); |
455 | mdelay(1); | 455 | mdelay(1); |
456 | gpio_set_value(GPIO_PORT217, 1); | 456 | gpio_set_value(GPIO_PORT217, 1); |
457 | mdelay(100); | ||
457 | 458 | ||
458 | /* LCD backlight controller */ | 459 | /* LCD backlight controller */ |
459 | gpio_request(GPIO_PORT235, NULL); /* RESET */ | 460 | gpio_request(GPIO_PORT235, NULL); /* RESET */ |
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c index 3cf0951caa2d..81d6536552a9 100644 --- a/arch/arm/mach-shmobile/board-ap4evb.c +++ b/arch/arm/mach-shmobile/board-ap4evb.c | |||
@@ -1303,7 +1303,7 @@ static void __init ap4evb_init(void) | |||
1303 | 1303 | ||
1304 | lcdc_info.clock_source = LCDC_CLK_BUS; | 1304 | lcdc_info.clock_source = LCDC_CLK_BUS; |
1305 | lcdc_info.ch[0].interface_type = RGB18; | 1305 | lcdc_info.ch[0].interface_type = RGB18; |
1306 | lcdc_info.ch[0].clock_divider = 2; | 1306 | lcdc_info.ch[0].clock_divider = 3; |
1307 | lcdc_info.ch[0].flags = 0; | 1307 | lcdc_info.ch[0].flags = 0; |
1308 | lcdc_info.ch[0].lcd_size_cfg.width = 152; | 1308 | lcdc_info.ch[0].lcd_size_cfg.width = 152; |
1309 | lcdc_info.ch[0].lcd_size_cfg.height = 91; | 1309 | lcdc_info.ch[0].lcd_size_cfg.height = 91; |
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c index fb4213a4e15a..1657eac5dde2 100644 --- a/arch/arm/mach-shmobile/board-mackerel.c +++ b/arch/arm/mach-shmobile/board-mackerel.c | |||
@@ -303,7 +303,7 @@ static struct sh_mobile_lcdc_info lcdc_info = { | |||
303 | .lcd_cfg = mackerel_lcdc_modes, | 303 | .lcd_cfg = mackerel_lcdc_modes, |
304 | .num_cfg = ARRAY_SIZE(mackerel_lcdc_modes), | 304 | .num_cfg = ARRAY_SIZE(mackerel_lcdc_modes), |
305 | .interface_type = RGB24, | 305 | .interface_type = RGB24, |
306 | .clock_divider = 2, | 306 | .clock_divider = 3, |
307 | .flags = 0, | 307 | .flags = 0, |
308 | .lcd_size_cfg.width = 152, | 308 | .lcd_size_cfg.width = 152, |
309 | .lcd_size_cfg.height = 91, | 309 | .lcd_size_cfg.height = 91, |
diff --git a/arch/arm/mach-shmobile/clock-sh73a0.c b/arch/arm/mach-shmobile/clock-sh73a0.c index ddd4a1b775f0..7e58904c1c8c 100644 --- a/arch/arm/mach-shmobile/clock-sh73a0.c +++ b/arch/arm/mach-shmobile/clock-sh73a0.c | |||
@@ -263,7 +263,7 @@ static struct clk div6_clks[DIV6_NR] = { | |||
263 | }; | 263 | }; |
264 | 264 | ||
265 | enum { MSTP001, | 265 | enum { MSTP001, |
266 | MSTP125, MSTP118, MSTP116, MSTP100, | 266 | MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, MSTP118, MSTP116, MSTP100, |
267 | MSTP219, | 267 | MSTP219, |
268 | MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, | 268 | MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, |
269 | MSTP331, MSTP329, MSTP325, MSTP323, MSTP312, | 269 | MSTP331, MSTP329, MSTP325, MSTP323, MSTP312, |
@@ -275,6 +275,10 @@ enum { MSTP001, | |||
275 | 275 | ||
276 | static struct clk mstp_clks[MSTP_NR] = { | 276 | static struct clk mstp_clks[MSTP_NR] = { |
277 | [MSTP001] = MSTP(&div4_clks[DIV4_HP], SMSTPCR0, 1, 0), /* IIC2 */ | 277 | [MSTP001] = MSTP(&div4_clks[DIV4_HP], SMSTPCR0, 1, 0), /* IIC2 */ |
278 | [MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* CEU1 */ | ||
279 | [MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* CSI2-RX1 */ | ||
280 | [MSTP127] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 27, 0), /* CEU0 */ | ||
281 | [MSTP126] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 26, 0), /* CSI2-RX0 */ | ||
278 | [MSTP125] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */ | 282 | [MSTP125] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */ |
279 | [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX0 */ | 283 | [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX0 */ |
280 | [MSTP116] = MSTP(&div4_clks[DIV4_HP], SMSTPCR1, 16, 0), /* IIC0 */ | 284 | [MSTP116] = MSTP(&div4_clks[DIV4_HP], SMSTPCR1, 16, 0), /* IIC0 */ |
@@ -306,6 +310,9 @@ static struct clk_lookup lookups[] = { | |||
306 | CLKDEV_CON_ID("r_clk", &r_clk), | 310 | CLKDEV_CON_ID("r_clk", &r_clk), |
307 | 311 | ||
308 | /* DIV6 clocks */ | 312 | /* DIV6 clocks */ |
313 | CLKDEV_CON_ID("vck1_clk", &div6_clks[DIV6_VCK1]), | ||
314 | CLKDEV_CON_ID("vck2_clk", &div6_clks[DIV6_VCK2]), | ||
315 | CLKDEV_CON_ID("vck3_clk", &div6_clks[DIV6_VCK3]), | ||
309 | CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSIT]), | 316 | CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSIT]), |
310 | CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSIT]), | 317 | CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSIT]), |
311 | CLKDEV_ICK_ID("dsi0p_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSI0P]), | 318 | CLKDEV_ICK_ID("dsi0p_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSI0P]), |
@@ -313,11 +320,15 @@ static struct clk_lookup lookups[] = { | |||
313 | 320 | ||
314 | /* MSTP32 clocks */ | 321 | /* MSTP32 clocks */ |
315 | CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* I2C2 */ | 322 | CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* I2C2 */ |
316 | CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */ | 323 | CLKDEV_DEV_ID("sh_mobile_ceu.1", &mstp_clks[MSTP129]), /* CEU1 */ |
324 | CLKDEV_DEV_ID("sh-mobile-csi2.1", &mstp_clks[MSTP128]), /* CSI2-RX1 */ | ||
325 | CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), /* CEU0 */ | ||
326 | CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2-RX0 */ | ||
317 | CLKDEV_DEV_ID("sh_tmu.0", &mstp_clks[MSTP125]), /* TMU00 */ | 327 | CLKDEV_DEV_ID("sh_tmu.0", &mstp_clks[MSTP125]), /* TMU00 */ |
318 | CLKDEV_DEV_ID("sh_tmu.1", &mstp_clks[MSTP125]), /* TMU01 */ | 328 | CLKDEV_DEV_ID("sh_tmu.1", &mstp_clks[MSTP125]), /* TMU01 */ |
319 | CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */ | ||
320 | CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */ | 329 | CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */ |
330 | CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */ | ||
331 | CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */ | ||
321 | CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */ | 332 | CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */ |
322 | CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */ | 333 | CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */ |
323 | CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */ | 334 | CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */ |
diff --git a/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt b/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt index efd3687ba190..3029aba38688 100644 --- a/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt +++ b/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt | |||
@@ -6,13 +6,10 @@ LIST "RWT Setting" | |||
6 | EW 0xE6020004, 0xA500 | 6 | EW 0xE6020004, 0xA500 |
7 | EW 0xE6030004, 0xA500 | 7 | EW 0xE6030004, 0xA500 |
8 | 8 | ||
9 | DD 0x01001000, 0x01001000 | ||
10 | |||
11 | LIST "GPIO Setting" | 9 | LIST "GPIO Setting" |
12 | EB 0xE6051013, 0xA2 | 10 | EB 0xE6051013, 0xA2 |
13 | 11 | ||
14 | LIST "CPG" | 12 | LIST "CPG" |
15 | ED 0xE6150080, 0x00000180 | ||
16 | ED 0xE61500C0, 0x00000002 | 13 | ED 0xE61500C0, 0x00000002 |
17 | 14 | ||
18 | WAIT 1, 0xFE40009C | 15 | WAIT 1, 0xFE40009C |
@@ -37,6 +34,9 @@ ED 0xE615002C, 0x93000040 | |||
37 | 34 | ||
38 | WAIT 1, 0xFE40009C | 35 | WAIT 1, 0xFE40009C |
39 | 36 | ||
37 | LIST "SUB/USBClk" | ||
38 | ED 0xE6150080, 0x00000180 | ||
39 | |||
40 | LIST "BSC" | 40 | LIST "BSC" |
41 | ED 0xFEC10000, 0x00E0001B | 41 | ED 0xFEC10000, 0x00E0001B |
42 | 42 | ||
@@ -53,7 +53,7 @@ ED 0xFE400048, 0x20C18505 | |||
53 | ED 0xFE40004C, 0x00110209 | 53 | ED 0xFE40004C, 0x00110209 |
54 | ED 0xFE400010, 0x00000087 | 54 | ED 0xFE400010, 0x00000087 |
55 | 55 | ||
56 | WAIT 10, 0xFE40009C | 56 | WAIT 30, 0xFE40009C |
57 | 57 | ||
58 | ED 0xFE400084, 0x0000003F | 58 | ED 0xFE400084, 0x0000003F |
59 | EB 0xFE500000, 0x00 | 59 | EB 0xFE500000, 0x00 |
@@ -84,7 +84,7 @@ ED 0xE6150004, 0x80331050 | |||
84 | 84 | ||
85 | WAIT 1, 0xFE40009C | 85 | WAIT 1, 0xFE40009C |
86 | 86 | ||
87 | ED 0xE6150354, 0x00000002 | 87 | ED 0xFE400354, 0x01AD8002 |
88 | 88 | ||
89 | LIST "SCIF0 - Serial port for earlyprintk" | 89 | LIST "SCIF0 - Serial port for earlyprintk" |
90 | EB 0xE6053098, 0x11 | 90 | EB 0xE6053098, 0x11 |
diff --git a/arch/arm/mach-shmobile/include/mach/head-mackerel.txt b/arch/arm/mach-shmobile/include/mach/head-mackerel.txt index efd3687ba190..3029aba38688 100644 --- a/arch/arm/mach-shmobile/include/mach/head-mackerel.txt +++ b/arch/arm/mach-shmobile/include/mach/head-mackerel.txt | |||
@@ -6,13 +6,10 @@ LIST "RWT Setting" | |||
6 | EW 0xE6020004, 0xA500 | 6 | EW 0xE6020004, 0xA500 |
7 | EW 0xE6030004, 0xA500 | 7 | EW 0xE6030004, 0xA500 |
8 | 8 | ||
9 | DD 0x01001000, 0x01001000 | ||
10 | |||
11 | LIST "GPIO Setting" | 9 | LIST "GPIO Setting" |
12 | EB 0xE6051013, 0xA2 | 10 | EB 0xE6051013, 0xA2 |
13 | 11 | ||
14 | LIST "CPG" | 12 | LIST "CPG" |
15 | ED 0xE6150080, 0x00000180 | ||
16 | ED 0xE61500C0, 0x00000002 | 13 | ED 0xE61500C0, 0x00000002 |
17 | 14 | ||
18 | WAIT 1, 0xFE40009C | 15 | WAIT 1, 0xFE40009C |
@@ -37,6 +34,9 @@ ED 0xE615002C, 0x93000040 | |||
37 | 34 | ||
38 | WAIT 1, 0xFE40009C | 35 | WAIT 1, 0xFE40009C |
39 | 36 | ||
37 | LIST "SUB/USBClk" | ||
38 | ED 0xE6150080, 0x00000180 | ||
39 | |||
40 | LIST "BSC" | 40 | LIST "BSC" |
41 | ED 0xFEC10000, 0x00E0001B | 41 | ED 0xFEC10000, 0x00E0001B |
42 | 42 | ||
@@ -53,7 +53,7 @@ ED 0xFE400048, 0x20C18505 | |||
53 | ED 0xFE40004C, 0x00110209 | 53 | ED 0xFE40004C, 0x00110209 |
54 | ED 0xFE400010, 0x00000087 | 54 | ED 0xFE400010, 0x00000087 |
55 | 55 | ||
56 | WAIT 10, 0xFE40009C | 56 | WAIT 30, 0xFE40009C |
57 | 57 | ||
58 | ED 0xFE400084, 0x0000003F | 58 | ED 0xFE400084, 0x0000003F |
59 | EB 0xFE500000, 0x00 | 59 | EB 0xFE500000, 0x00 |
@@ -84,7 +84,7 @@ ED 0xE6150004, 0x80331050 | |||
84 | 84 | ||
85 | WAIT 1, 0xFE40009C | 85 | WAIT 1, 0xFE40009C |
86 | 86 | ||
87 | ED 0xE6150354, 0x00000002 | 87 | ED 0xFE400354, 0x01AD8002 |
88 | 88 | ||
89 | LIST "SCIF0 - Serial port for earlyprintk" | 89 | LIST "SCIF0 - Serial port for earlyprintk" |
90 | EB 0xE6053098, 0x11 | 90 | EB 0xE6053098, 0x11 |
diff --git a/arch/arm/mach-tegra/include/mach/kbc.h b/arch/arm/mach-tegra/include/mach/kbc.h index 66ad2760c621..04c779832c78 100644 --- a/arch/arm/mach-tegra/include/mach/kbc.h +++ b/arch/arm/mach-tegra/include/mach/kbc.h | |||
@@ -57,5 +57,6 @@ struct tegra_kbc_platform_data { | |||
57 | const struct matrix_keymap_data *keymap_data; | 57 | const struct matrix_keymap_data *keymap_data; |
58 | 58 | ||
59 | bool wakeup; | 59 | bool wakeup; |
60 | bool use_fn_map; | ||
60 | }; | 61 | }; |
61 | #endif | 62 | #endif |
diff --git a/arch/arm/plat-omap/mailbox.c b/arch/arm/plat-omap/mailbox.c index 459b319a9fad..49d3208793e5 100644 --- a/arch/arm/plat-omap/mailbox.c +++ b/arch/arm/plat-omap/mailbox.c | |||
@@ -322,15 +322,18 @@ static void omap_mbox_fini(struct omap_mbox *mbox) | |||
322 | 322 | ||
323 | struct omap_mbox *omap_mbox_get(const char *name, struct notifier_block *nb) | 323 | struct omap_mbox *omap_mbox_get(const char *name, struct notifier_block *nb) |
324 | { | 324 | { |
325 | struct omap_mbox *mbox; | 325 | struct omap_mbox *_mbox, *mbox = NULL; |
326 | int ret; | 326 | int i, ret; |
327 | 327 | ||
328 | if (!mboxes) | 328 | if (!mboxes) |
329 | return ERR_PTR(-EINVAL); | 329 | return ERR_PTR(-EINVAL); |
330 | 330 | ||
331 | for (mbox = *mboxes; mbox; mbox++) | 331 | for (i = 0; (_mbox = mboxes[i]); i++) { |
332 | if (!strcmp(mbox->name, name)) | 332 | if (!strcmp(_mbox->name, name)) { |
333 | mbox = _mbox; | ||
333 | break; | 334 | break; |
335 | } | ||
336 | } | ||
334 | 337 | ||
335 | if (!mbox) | 338 | if (!mbox) |
336 | return ERR_PTR(-ENOENT); | 339 | return ERR_PTR(-ENOENT); |
diff --git a/arch/arm/plat-samsung/dev-uart.c b/arch/arm/plat-samsung/dev-uart.c index 3776cd952450..5928105490fa 100644 --- a/arch/arm/plat-samsung/dev-uart.c +++ b/arch/arm/plat-samsung/dev-uart.c | |||
@@ -15,6 +15,8 @@ | |||
15 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
16 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
17 | 17 | ||
18 | #include <plat/devs.h> | ||
19 | |||
18 | /* uart devices */ | 20 | /* uart devices */ |
19 | 21 | ||
20 | static struct platform_device s3c24xx_uart_device0 = { | 22 | static struct platform_device s3c24xx_uart_device0 = { |
diff --git a/arch/blackfin/lib/outs.S b/arch/blackfin/lib/outs.S index 250f4d4b9436..06a5e674401f 100644 --- a/arch/blackfin/lib/outs.S +++ b/arch/blackfin/lib/outs.S | |||
@@ -13,6 +13,8 @@ | |||
13 | .align 2 | 13 | .align 2 |
14 | 14 | ||
15 | ENTRY(_outsl) | 15 | ENTRY(_outsl) |
16 | CC = R2 == 0; | ||
17 | IF CC JUMP 1f; | ||
16 | P0 = R0; /* P0 = port */ | 18 | P0 = R0; /* P0 = port */ |
17 | P1 = R1; /* P1 = address */ | 19 | P1 = R1; /* P1 = address */ |
18 | P2 = R2; /* P2 = count */ | 20 | P2 = R2; /* P2 = count */ |
@@ -20,10 +22,12 @@ ENTRY(_outsl) | |||
20 | LSETUP( .Llong_loop_s, .Llong_loop_e) LC0 = P2; | 22 | LSETUP( .Llong_loop_s, .Llong_loop_e) LC0 = P2; |
21 | .Llong_loop_s: R0 = [P1++]; | 23 | .Llong_loop_s: R0 = [P1++]; |
22 | .Llong_loop_e: [P0] = R0; | 24 | .Llong_loop_e: [P0] = R0; |
23 | RTS; | 25 | 1: RTS; |
24 | ENDPROC(_outsl) | 26 | ENDPROC(_outsl) |
25 | 27 | ||
26 | ENTRY(_outsw) | 28 | ENTRY(_outsw) |
29 | CC = R2 == 0; | ||
30 | IF CC JUMP 1f; | ||
27 | P0 = R0; /* P0 = port */ | 31 | P0 = R0; /* P0 = port */ |
28 | P1 = R1; /* P1 = address */ | 32 | P1 = R1; /* P1 = address */ |
29 | P2 = R2; /* P2 = count */ | 33 | P2 = R2; /* P2 = count */ |
@@ -31,10 +35,12 @@ ENTRY(_outsw) | |||
31 | LSETUP( .Lword_loop_s, .Lword_loop_e) LC0 = P2; | 35 | LSETUP( .Lword_loop_s, .Lword_loop_e) LC0 = P2; |
32 | .Lword_loop_s: R0 = W[P1++]; | 36 | .Lword_loop_s: R0 = W[P1++]; |
33 | .Lword_loop_e: W[P0] = R0; | 37 | .Lword_loop_e: W[P0] = R0; |
34 | RTS; | 38 | 1: RTS; |
35 | ENDPROC(_outsw) | 39 | ENDPROC(_outsw) |
36 | 40 | ||
37 | ENTRY(_outsb) | 41 | ENTRY(_outsb) |
42 | CC = R2 == 0; | ||
43 | IF CC JUMP 1f; | ||
38 | P0 = R0; /* P0 = port */ | 44 | P0 = R0; /* P0 = port */ |
39 | P1 = R1; /* P1 = address */ | 45 | P1 = R1; /* P1 = address */ |
40 | P2 = R2; /* P2 = count */ | 46 | P2 = R2; /* P2 = count */ |
@@ -42,10 +48,12 @@ ENTRY(_outsb) | |||
42 | LSETUP( .Lbyte_loop_s, .Lbyte_loop_e) LC0 = P2; | 48 | LSETUP( .Lbyte_loop_s, .Lbyte_loop_e) LC0 = P2; |
43 | .Lbyte_loop_s: R0 = B[P1++]; | 49 | .Lbyte_loop_s: R0 = B[P1++]; |
44 | .Lbyte_loop_e: B[P0] = R0; | 50 | .Lbyte_loop_e: B[P0] = R0; |
45 | RTS; | 51 | 1: RTS; |
46 | ENDPROC(_outsb) | 52 | ENDPROC(_outsb) |
47 | 53 | ||
48 | ENTRY(_outsw_8) | 54 | ENTRY(_outsw_8) |
55 | CC = R2 == 0; | ||
56 | IF CC JUMP 1f; | ||
49 | P0 = R0; /* P0 = port */ | 57 | P0 = R0; /* P0 = port */ |
50 | P1 = R1; /* P1 = address */ | 58 | P1 = R1; /* P1 = address */ |
51 | P2 = R2; /* P2 = count */ | 59 | P2 = R2; /* P2 = count */ |
@@ -56,5 +64,5 @@ ENTRY(_outsw_8) | |||
56 | R0 = R0 << 8; | 64 | R0 = R0 << 8; |
57 | R0 = R0 + R1; | 65 | R0 = R0 + R1; |
58 | .Lword8_loop_e: W[P0] = R0; | 66 | .Lword8_loop_e: W[P0] = R0; |
59 | RTS; | 67 | 1: RTS; |
60 | ENDPROC(_outsw_8) | 68 | ENDPROC(_outsw_8) |
diff --git a/arch/blackfin/mach-common/cache.S b/arch/blackfin/mach-common/cache.S index 790c767ca95a..ab4a925a443e 100644 --- a/arch/blackfin/mach-common/cache.S +++ b/arch/blackfin/mach-common/cache.S | |||
@@ -58,6 +58,8 @@ | |||
58 | 1: | 58 | 1: |
59 | .ifeqs "\flushins", BROK_FLUSH_INST | 59 | .ifeqs "\flushins", BROK_FLUSH_INST |
60 | \flushins [P0++]; | 60 | \flushins [P0++]; |
61 | nop; | ||
62 | nop; | ||
61 | 2: nop; | 63 | 2: nop; |
62 | .else | 64 | .else |
63 | 2: \flushins [P0++]; | 65 | 2: \flushins [P0++]; |
diff --git a/arch/cris/kernel/vmlinux.lds.S b/arch/cris/kernel/vmlinux.lds.S index 442218980db0..c49be845f96a 100644 --- a/arch/cris/kernel/vmlinux.lds.S +++ b/arch/cris/kernel/vmlinux.lds.S | |||
@@ -72,11 +72,6 @@ SECTIONS | |||
72 | INIT_TEXT_SECTION(PAGE_SIZE) | 72 | INIT_TEXT_SECTION(PAGE_SIZE) |
73 | .init.data : { INIT_DATA } | 73 | .init.data : { INIT_DATA } |
74 | .init.setup : { INIT_SETUP(16) } | 74 | .init.setup : { INIT_SETUP(16) } |
75 | #ifdef CONFIG_ETRAX_ARCH_V32 | ||
76 | __start___param = .; | ||
77 | __param : { *(__param) } | ||
78 | __stop___param = .; | ||
79 | #endif | ||
80 | .initcall.init : { | 75 | .initcall.init : { |
81 | INIT_CALLS | 76 | INIT_CALLS |
82 | } | 77 | } |
diff --git a/arch/ia64/include/asm/xen/hypercall.h b/arch/ia64/include/asm/xen/hypercall.h index 96fc62366aa4..ed28bcd5bb85 100644 --- a/arch/ia64/include/asm/xen/hypercall.h +++ b/arch/ia64/include/asm/xen/hypercall.h | |||
@@ -107,7 +107,7 @@ extern unsigned long __hypercall(unsigned long a1, unsigned long a2, | |||
107 | static inline int | 107 | static inline int |
108 | xencomm_arch_hypercall_sched_op(int cmd, struct xencomm_handle *arg) | 108 | xencomm_arch_hypercall_sched_op(int cmd, struct xencomm_handle *arg) |
109 | { | 109 | { |
110 | return _hypercall2(int, sched_op_new, cmd, arg); | 110 | return _hypercall2(int, sched_op, cmd, arg); |
111 | } | 111 | } |
112 | 112 | ||
113 | static inline long | 113 | static inline long |
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index f5ecc0566bc2..d88983516e26 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -4,6 +4,7 @@ config MIPS | |||
4 | select HAVE_GENERIC_DMA_COHERENT | 4 | select HAVE_GENERIC_DMA_COHERENT |
5 | select HAVE_IDE | 5 | select HAVE_IDE |
6 | select HAVE_OPROFILE | 6 | select HAVE_OPROFILE |
7 | select HAVE_IRQ_WORK | ||
7 | select HAVE_PERF_EVENTS | 8 | select HAVE_PERF_EVENTS |
8 | select PERF_USE_VMALLOC | 9 | select PERF_USE_VMALLOC |
9 | select HAVE_ARCH_KGDB | 10 | select HAVE_ARCH_KGDB |
@@ -208,6 +209,7 @@ config MACH_JZ4740 | |||
208 | select ARCH_REQUIRE_GPIOLIB | 209 | select ARCH_REQUIRE_GPIOLIB |
209 | select SYS_HAS_EARLY_PRINTK | 210 | select SYS_HAS_EARLY_PRINTK |
210 | select HAVE_PWM | 211 | select HAVE_PWM |
212 | select HAVE_CLK | ||
211 | 213 | ||
212 | config LASAT | 214 | config LASAT |
213 | bool "LASAT Networks platforms" | 215 | bool "LASAT Networks platforms" |
@@ -333,6 +335,8 @@ config PNX8550_STB810 | |||
333 | config PMC_MSP | 335 | config PMC_MSP |
334 | bool "PMC-Sierra MSP chipsets" | 336 | bool "PMC-Sierra MSP chipsets" |
335 | depends on EXPERIMENTAL | 337 | depends on EXPERIMENTAL |
338 | select CEVT_R4K | ||
339 | select CSRC_R4K | ||
336 | select DMA_NONCOHERENT | 340 | select DMA_NONCOHERENT |
337 | select SWAP_IO_SPACE | 341 | select SWAP_IO_SPACE |
338 | select NO_EXCEPT_FILL | 342 | select NO_EXCEPT_FILL |
diff --git a/arch/mips/alchemy/mtx-1/board_setup.c b/arch/mips/alchemy/mtx-1/board_setup.c index 6398fa95905c..40b84b991191 100644 --- a/arch/mips/alchemy/mtx-1/board_setup.c +++ b/arch/mips/alchemy/mtx-1/board_setup.c | |||
@@ -54,8 +54,8 @@ int mtx1_pci_idsel(unsigned int devsel, int assert); | |||
54 | 54 | ||
55 | static void mtx1_reset(char *c) | 55 | static void mtx1_reset(char *c) |
56 | { | 56 | { |
57 | /* Hit BCSR.SYSTEM_CONTROL[SW_RST] */ | 57 | /* Jump to the reset vector */ |
58 | au_writel(0x00000000, 0xAE00001C); | 58 | __asm__ __volatile__("jr\t%0"::"r"(0xbfc00000)); |
59 | } | 59 | } |
60 | 60 | ||
61 | static void mtx1_power_off(void) | 61 | static void mtx1_power_off(void) |
diff --git a/arch/mips/alchemy/mtx-1/platform.c b/arch/mips/alchemy/mtx-1/platform.c index e30e42add697..956f946218c5 100644 --- a/arch/mips/alchemy/mtx-1/platform.c +++ b/arch/mips/alchemy/mtx-1/platform.c | |||
@@ -28,6 +28,8 @@ | |||
28 | #include <linux/mtd/physmap.h> | 28 | #include <linux/mtd/physmap.h> |
29 | #include <mtd/mtd-abi.h> | 29 | #include <mtd/mtd-abi.h> |
30 | 30 | ||
31 | #include <asm/mach-au1x00/au1xxx_eth.h> | ||
32 | |||
31 | static struct gpio_keys_button mtx1_gpio_button[] = { | 33 | static struct gpio_keys_button mtx1_gpio_button[] = { |
32 | { | 34 | { |
33 | .gpio = 207, | 35 | .gpio = 207, |
@@ -140,10 +142,17 @@ static struct __initdata platform_device * mtx1_devs[] = { | |||
140 | &mtx1_mtd, | 142 | &mtx1_mtd, |
141 | }; | 143 | }; |
142 | 144 | ||
145 | static struct au1000_eth_platform_data mtx1_au1000_eth0_pdata = { | ||
146 | .phy_search_highest_addr = 1, | ||
147 | .phy1_search_mac0 = 1, | ||
148 | }; | ||
149 | |||
143 | static int __init mtx1_register_devices(void) | 150 | static int __init mtx1_register_devices(void) |
144 | { | 151 | { |
145 | int rc; | 152 | int rc; |
146 | 153 | ||
154 | au1xxx_override_eth_cfg(0, &mtx1_au1000_eth0_pdata); | ||
155 | |||
147 | rc = gpio_request(mtx1_gpio_button[0].gpio, | 156 | rc = gpio_request(mtx1_gpio_button[0].gpio, |
148 | mtx1_gpio_button[0].desc); | 157 | mtx1_gpio_button[0].desc); |
149 | if (rc < 0) { | 158 | if (rc < 0) { |
diff --git a/arch/mips/alchemy/xxs1500/board_setup.c b/arch/mips/alchemy/xxs1500/board_setup.c index b43c918925d3..80c521e5290d 100644 --- a/arch/mips/alchemy/xxs1500/board_setup.c +++ b/arch/mips/alchemy/xxs1500/board_setup.c | |||
@@ -36,8 +36,8 @@ | |||
36 | 36 | ||
37 | static void xxs1500_reset(char *c) | 37 | static void xxs1500_reset(char *c) |
38 | { | 38 | { |
39 | /* Hit BCSR.SYSTEM_CONTROL[SW_RST] */ | 39 | /* Jump to the reset vector */ |
40 | au_writel(0x00000000, 0xAE00001C); | 40 | __asm__ __volatile__("jr\t%0"::"r"(0xbfc00000)); |
41 | } | 41 | } |
42 | 42 | ||
43 | static void xxs1500_power_off(void) | 43 | static void xxs1500_power_off(void) |
diff --git a/arch/mips/include/asm/perf_event.h b/arch/mips/include/asm/perf_event.h index e00007cf8162..d0c77496c728 100644 --- a/arch/mips/include/asm/perf_event.h +++ b/arch/mips/include/asm/perf_event.h | |||
@@ -11,15 +11,5 @@ | |||
11 | 11 | ||
12 | #ifndef __MIPS_PERF_EVENT_H__ | 12 | #ifndef __MIPS_PERF_EVENT_H__ |
13 | #define __MIPS_PERF_EVENT_H__ | 13 | #define __MIPS_PERF_EVENT_H__ |
14 | 14 | /* Leave it empty here. The file is required by linux/perf_event.h */ | |
15 | /* | ||
16 | * MIPS performance counters do not raise NMI upon overflow, a regular | ||
17 | * interrupt will be signaled. Hence we can do the pending perf event | ||
18 | * work at the tail of the irq handler. | ||
19 | */ | ||
20 | static inline void | ||
21 | set_perf_event_pending(void) | ||
22 | { | ||
23 | } | ||
24 | |||
25 | #endif /* __MIPS_PERF_EVENT_H__ */ | 15 | #endif /* __MIPS_PERF_EVENT_H__ */ |
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index 5a84a1f11231..94ca2b018af7 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c | |||
@@ -17,29 +17,13 @@ | |||
17 | #include <asm/cacheflush.h> | 17 | #include <asm/cacheflush.h> |
18 | #include <asm/uasm.h> | 18 | #include <asm/uasm.h> |
19 | 19 | ||
20 | /* | 20 | #include <asm-generic/sections.h> |
21 | * If the Instruction Pointer is in module space (0xc0000000), return true; | ||
22 | * otherwise, it is in kernel space (0x80000000), return false. | ||
23 | * | ||
24 | * FIXME: This will not work when the kernel space and module space are the | ||
25 | * same. If they are the same, we need to modify scripts/recordmcount.pl, | ||
26 | * ftrace_make_nop/call() and the other related parts to ensure the | ||
27 | * enabling/disabling of the calling site to _mcount is right for both kernel | ||
28 | * and module. | ||
29 | */ | ||
30 | |||
31 | static inline int in_module(unsigned long ip) | ||
32 | { | ||
33 | return ip & 0x40000000; | ||
34 | } | ||
35 | 21 | ||
36 | #ifdef CONFIG_DYNAMIC_FTRACE | 22 | #ifdef CONFIG_DYNAMIC_FTRACE |
37 | 23 | ||
38 | #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ | 24 | #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ |
39 | #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ | 25 | #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ |
40 | 26 | ||
41 | #define INSN_B_1F_4 0x10000004 /* b 1f; offset = 4 */ | ||
42 | #define INSN_B_1F_5 0x10000005 /* b 1f; offset = 5 */ | ||
43 | #define INSN_NOP 0x00000000 /* nop */ | 27 | #define INSN_NOP 0x00000000 /* nop */ |
44 | #define INSN_JAL(addr) \ | 28 | #define INSN_JAL(addr) \ |
45 | ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK))) | 29 | ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK))) |
@@ -69,6 +53,20 @@ static inline void ftrace_dyn_arch_init_insns(void) | |||
69 | #endif | 53 | #endif |
70 | } | 54 | } |
71 | 55 | ||
56 | /* | ||
57 | * Check if the address is in kernel space | ||
58 | * | ||
59 | * Clone core_kernel_text() from kernel/extable.c, but doesn't call | ||
60 | * init_kernel_text() for Ftrace doesn't trace functions in init sections. | ||
61 | */ | ||
62 | static inline int in_kernel_space(unsigned long ip) | ||
63 | { | ||
64 | if (ip >= (unsigned long)_stext && | ||
65 | ip <= (unsigned long)_etext) | ||
66 | return 1; | ||
67 | return 0; | ||
68 | } | ||
69 | |||
72 | static int ftrace_modify_code(unsigned long ip, unsigned int new_code) | 70 | static int ftrace_modify_code(unsigned long ip, unsigned int new_code) |
73 | { | 71 | { |
74 | int faulted; | 72 | int faulted; |
@@ -84,6 +82,42 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code) | |||
84 | return 0; | 82 | return 0; |
85 | } | 83 | } |
86 | 84 | ||
85 | /* | ||
86 | * The details about the calling site of mcount on MIPS | ||
87 | * | ||
88 | * 1. For kernel: | ||
89 | * | ||
90 | * move at, ra | ||
91 | * jal _mcount --> nop | ||
92 | * | ||
93 | * 2. For modules: | ||
94 | * | ||
95 | * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT | ||
96 | * | ||
97 | * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) | ||
98 | * addiu v1, v1, low_16bit_of_mcount | ||
99 | * move at, ra | ||
100 | * move $12, ra_address | ||
101 | * jalr v1 | ||
102 | * sub sp, sp, 8 | ||
103 | * 1: offset = 5 instructions | ||
104 | * 2.2 For the Other situations | ||
105 | * | ||
106 | * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) | ||
107 | * addiu v1, v1, low_16bit_of_mcount | ||
108 | * move at, ra | ||
109 | * jalr v1 | ||
110 | * nop | move $12, ra_address | sub sp, sp, 8 | ||
111 | * 1: offset = 4 instructions | ||
112 | */ | ||
113 | |||
114 | #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) | ||
115 | #define MCOUNT_OFFSET_INSNS 5 | ||
116 | #else | ||
117 | #define MCOUNT_OFFSET_INSNS 4 | ||
118 | #endif | ||
119 | #define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS) | ||
120 | |||
87 | int ftrace_make_nop(struct module *mod, | 121 | int ftrace_make_nop(struct module *mod, |
88 | struct dyn_ftrace *rec, unsigned long addr) | 122 | struct dyn_ftrace *rec, unsigned long addr) |
89 | { | 123 | { |
@@ -91,39 +125,11 @@ int ftrace_make_nop(struct module *mod, | |||
91 | unsigned long ip = rec->ip; | 125 | unsigned long ip = rec->ip; |
92 | 126 | ||
93 | /* | 127 | /* |
94 | * We have compiled module with -mlong-calls, but compiled the kernel | 128 | * If ip is in kernel space, no long call, otherwise, long call is |
95 | * without it, we need to cope with them respectively. | 129 | * needed. |
96 | */ | 130 | */ |
97 | if (in_module(ip)) { | 131 | new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F; |
98 | #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) | 132 | |
99 | /* | ||
100 | * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) | ||
101 | * addiu v1, v1, low_16bit_of_mcount | ||
102 | * move at, ra | ||
103 | * move $12, ra_address | ||
104 | * jalr v1 | ||
105 | * sub sp, sp, 8 | ||
106 | * 1: offset = 5 instructions | ||
107 | */ | ||
108 | new = INSN_B_1F_5; | ||
109 | #else | ||
110 | /* | ||
111 | * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) | ||
112 | * addiu v1, v1, low_16bit_of_mcount | ||
113 | * move at, ra | ||
114 | * jalr v1 | ||
115 | * nop | move $12, ra_address | sub sp, sp, 8 | ||
116 | * 1: offset = 4 instructions | ||
117 | */ | ||
118 | new = INSN_B_1F_4; | ||
119 | #endif | ||
120 | } else { | ||
121 | /* | ||
122 | * move at, ra | ||
123 | * jal _mcount --> nop | ||
124 | */ | ||
125 | new = INSN_NOP; | ||
126 | } | ||
127 | return ftrace_modify_code(ip, new); | 133 | return ftrace_modify_code(ip, new); |
128 | } | 134 | } |
129 | 135 | ||
@@ -132,8 +138,8 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | |||
132 | unsigned int new; | 138 | unsigned int new; |
133 | unsigned long ip = rec->ip; | 139 | unsigned long ip = rec->ip; |
134 | 140 | ||
135 | /* ip, module: 0xc0000000, kernel: 0x80000000 */ | 141 | new = in_kernel_space(ip) ? insn_jal_ftrace_caller : |
136 | new = in_module(ip) ? insn_lui_v1_hi16_mcount : insn_jal_ftrace_caller; | 142 | insn_lui_v1_hi16_mcount; |
137 | 143 | ||
138 | return ftrace_modify_code(ip, new); | 144 | return ftrace_modify_code(ip, new); |
139 | } | 145 | } |
@@ -190,29 +196,25 @@ int ftrace_disable_ftrace_graph_caller(void) | |||
190 | #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ | 196 | #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ |
191 | #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ | 197 | #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ |
192 | 198 | ||
193 | unsigned long ftrace_get_parent_addr(unsigned long self_addr, | 199 | unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long |
194 | unsigned long parent, | 200 | old_parent_ra, unsigned long parent_ra_addr, unsigned long fp) |
195 | unsigned long parent_addr, | ||
196 | unsigned long fp) | ||
197 | { | 201 | { |
198 | unsigned long sp, ip, ra; | 202 | unsigned long sp, ip, tmp; |
199 | unsigned int code; | 203 | unsigned int code; |
200 | int faulted; | 204 | int faulted; |
201 | 205 | ||
202 | /* | 206 | /* |
203 | * For module, move the ip from calling site of mcount to the | 207 | * For module, move the ip from the return address after the |
204 | * instruction "lui v1, hi_16bit_of_mcount"(offset is 20), but for | 208 | * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for |
205 | * kernel, move to the instruction "move ra, at"(offset is 12) | 209 | * kernel, move after the instruction "move ra, at"(offset is 16) |
206 | */ | 210 | */ |
207 | ip = self_addr - (in_module(self_addr) ? 20 : 12); | 211 | ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24); |
208 | 212 | ||
209 | /* | 213 | /* |
210 | * search the text until finding the non-store instruction or "s{d,w} | 214 | * search the text until finding the non-store instruction or "s{d,w} |
211 | * ra, offset(sp)" instruction | 215 | * ra, offset(sp)" instruction |
212 | */ | 216 | */ |
213 | do { | 217 | do { |
214 | ip -= 4; | ||
215 | |||
216 | /* get the code at "ip": code = *(unsigned int *)ip; */ | 218 | /* get the code at "ip": code = *(unsigned int *)ip; */ |
217 | safe_load_code(code, ip, faulted); | 219 | safe_load_code(code, ip, faulted); |
218 | 220 | ||
@@ -224,18 +226,20 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr, | |||
224 | * store the ra on the stack | 226 | * store the ra on the stack |
225 | */ | 227 | */ |
226 | if ((code & S_R_SP) != S_R_SP) | 228 | if ((code & S_R_SP) != S_R_SP) |
227 | return parent_addr; | 229 | return parent_ra_addr; |
228 | 230 | ||
229 | } while (((code & S_RA_SP) != S_RA_SP)); | 231 | /* Move to the next instruction */ |
232 | ip -= 4; | ||
233 | } while ((code & S_RA_SP) != S_RA_SP); | ||
230 | 234 | ||
231 | sp = fp + (code & OFFSET_MASK); | 235 | sp = fp + (code & OFFSET_MASK); |
232 | 236 | ||
233 | /* ra = *(unsigned long *)sp; */ | 237 | /* tmp = *(unsigned long *)sp; */ |
234 | safe_load_stack(ra, sp, faulted); | 238 | safe_load_stack(tmp, sp, faulted); |
235 | if (unlikely(faulted)) | 239 | if (unlikely(faulted)) |
236 | return 0; | 240 | return 0; |
237 | 241 | ||
238 | if (ra == parent) | 242 | if (tmp == old_parent_ra) |
239 | return sp; | 243 | return sp; |
240 | return 0; | 244 | return 0; |
241 | } | 245 | } |
@@ -246,21 +250,21 @@ unsigned long ftrace_get_parent_addr(unsigned long self_addr, | |||
246 | * Hook the return address and push it in the stack of return addrs | 250 | * Hook the return address and push it in the stack of return addrs |
247 | * in current thread info. | 251 | * in current thread info. |
248 | */ | 252 | */ |
249 | void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, | 253 | void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, |
250 | unsigned long fp) | 254 | unsigned long fp) |
251 | { | 255 | { |
252 | unsigned long old; | 256 | unsigned long old_parent_ra; |
253 | struct ftrace_graph_ent trace; | 257 | struct ftrace_graph_ent trace; |
254 | unsigned long return_hooker = (unsigned long) | 258 | unsigned long return_hooker = (unsigned long) |
255 | &return_to_handler; | 259 | &return_to_handler; |
256 | int faulted; | 260 | int faulted, insns; |
257 | 261 | ||
258 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | 262 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) |
259 | return; | 263 | return; |
260 | 264 | ||
261 | /* | 265 | /* |
262 | * "parent" is the stack address saved the return address of the caller | 266 | * "parent_ra_addr" is the stack address saved the return address of |
263 | * of _mcount. | 267 | * the caller of _mcount. |
264 | * | 268 | * |
265 | * if the gcc < 4.5, a leaf function does not save the return address | 269 | * if the gcc < 4.5, a leaf function does not save the return address |
266 | * in the stack address, so, we "emulate" one in _mcount's stack space, | 270 | * in the stack address, so, we "emulate" one in _mcount's stack space, |
@@ -275,37 +279,44 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, | |||
275 | * do it in ftrace_graph_caller of mcount.S. | 279 | * do it in ftrace_graph_caller of mcount.S. |
276 | */ | 280 | */ |
277 | 281 | ||
278 | /* old = *parent; */ | 282 | /* old_parent_ra = *parent_ra_addr; */ |
279 | safe_load_stack(old, parent, faulted); | 283 | safe_load_stack(old_parent_ra, parent_ra_addr, faulted); |
280 | if (unlikely(faulted)) | 284 | if (unlikely(faulted)) |
281 | goto out; | 285 | goto out; |
282 | #ifndef KBUILD_MCOUNT_RA_ADDRESS | 286 | #ifndef KBUILD_MCOUNT_RA_ADDRESS |
283 | parent = (unsigned long *)ftrace_get_parent_addr(self_addr, old, | 287 | parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra, |
284 | (unsigned long)parent, fp); | 288 | old_parent_ra, (unsigned long)parent_ra_addr, fp); |
285 | /* | 289 | /* |
286 | * If fails when getting the stack address of the non-leaf function's | 290 | * If fails when getting the stack address of the non-leaf function's |
287 | * ra, stop function graph tracer and return | 291 | * ra, stop function graph tracer and return |
288 | */ | 292 | */ |
289 | if (parent == 0) | 293 | if (parent_ra_addr == 0) |
290 | goto out; | 294 | goto out; |
291 | #endif | 295 | #endif |
292 | /* *parent = return_hooker; */ | 296 | /* *parent_ra_addr = return_hooker; */ |
293 | safe_store_stack(return_hooker, parent, faulted); | 297 | safe_store_stack(return_hooker, parent_ra_addr, faulted); |
294 | if (unlikely(faulted)) | 298 | if (unlikely(faulted)) |
295 | goto out; | 299 | goto out; |
296 | 300 | ||
297 | if (ftrace_push_return_trace(old, self_addr, &trace.depth, fp) == | 301 | if (ftrace_push_return_trace(old_parent_ra, self_ra, &trace.depth, fp) |
298 | -EBUSY) { | 302 | == -EBUSY) { |
299 | *parent = old; | 303 | *parent_ra_addr = old_parent_ra; |
300 | return; | 304 | return; |
301 | } | 305 | } |
302 | 306 | ||
303 | trace.func = self_addr; | 307 | /* |
308 | * Get the recorded ip of the current mcount calling site in the | ||
309 | * __mcount_loc section, which will be used to filter the function | ||
310 | * entries configured through the tracing/set_graph_function interface. | ||
311 | */ | ||
312 | |||
313 | insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; | ||
314 | trace.func = self_ra - (MCOUNT_INSN_SIZE * insns); | ||
304 | 315 | ||
305 | /* Only trace if the calling function expects to */ | 316 | /* Only trace if the calling function expects to */ |
306 | if (!ftrace_graph_entry(&trace)) { | 317 | if (!ftrace_graph_entry(&trace)) { |
307 | current->curr_ret_stack--; | 318 | current->curr_ret_stack--; |
308 | *parent = old; | 319 | *parent_ra_addr = old_parent_ra; |
309 | } | 320 | } |
310 | return; | 321 | return; |
311 | out: | 322 | out: |
diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c index 2b7f3f703b83..a8244854d3dc 100644 --- a/arch/mips/kernel/perf_event.c +++ b/arch/mips/kernel/perf_event.c | |||
@@ -161,41 +161,6 @@ mipspmu_event_set_period(struct perf_event *event, | |||
161 | return ret; | 161 | return ret; |
162 | } | 162 | } |
163 | 163 | ||
164 | static int mipspmu_enable(struct perf_event *event) | ||
165 | { | ||
166 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
167 | struct hw_perf_event *hwc = &event->hw; | ||
168 | int idx; | ||
169 | int err = 0; | ||
170 | |||
171 | /* To look for a free counter for this event. */ | ||
172 | idx = mipspmu->alloc_counter(cpuc, hwc); | ||
173 | if (idx < 0) { | ||
174 | err = idx; | ||
175 | goto out; | ||
176 | } | ||
177 | |||
178 | /* | ||
179 | * If there is an event in the counter we are going to use then | ||
180 | * make sure it is disabled. | ||
181 | */ | ||
182 | event->hw.idx = idx; | ||
183 | mipspmu->disable_event(idx); | ||
184 | cpuc->events[idx] = event; | ||
185 | |||
186 | /* Set the period for the event. */ | ||
187 | mipspmu_event_set_period(event, hwc, idx); | ||
188 | |||
189 | /* Enable the event. */ | ||
190 | mipspmu->enable_event(hwc, idx); | ||
191 | |||
192 | /* Propagate our changes to the userspace mapping. */ | ||
193 | perf_event_update_userpage(event); | ||
194 | |||
195 | out: | ||
196 | return err; | ||
197 | } | ||
198 | |||
199 | static void mipspmu_event_update(struct perf_event *event, | 164 | static void mipspmu_event_update(struct perf_event *event, |
200 | struct hw_perf_event *hwc, | 165 | struct hw_perf_event *hwc, |
201 | int idx) | 166 | int idx) |
@@ -204,7 +169,7 @@ static void mipspmu_event_update(struct perf_event *event, | |||
204 | unsigned long flags; | 169 | unsigned long flags; |
205 | int shift = 64 - TOTAL_BITS; | 170 | int shift = 64 - TOTAL_BITS; |
206 | s64 prev_raw_count, new_raw_count; | 171 | s64 prev_raw_count, new_raw_count; |
207 | s64 delta; | 172 | u64 delta; |
208 | 173 | ||
209 | again: | 174 | again: |
210 | prev_raw_count = local64_read(&hwc->prev_count); | 175 | prev_raw_count = local64_read(&hwc->prev_count); |
@@ -231,32 +196,90 @@ again: | |||
231 | return; | 196 | return; |
232 | } | 197 | } |
233 | 198 | ||
234 | static void mipspmu_disable(struct perf_event *event) | 199 | static void mipspmu_start(struct perf_event *event, int flags) |
200 | { | ||
201 | struct hw_perf_event *hwc = &event->hw; | ||
202 | |||
203 | if (!mipspmu) | ||
204 | return; | ||
205 | |||
206 | if (flags & PERF_EF_RELOAD) | ||
207 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | ||
208 | |||
209 | hwc->state = 0; | ||
210 | |||
211 | /* Set the period for the event. */ | ||
212 | mipspmu_event_set_period(event, hwc, hwc->idx); | ||
213 | |||
214 | /* Enable the event. */ | ||
215 | mipspmu->enable_event(hwc, hwc->idx); | ||
216 | } | ||
217 | |||
218 | static void mipspmu_stop(struct perf_event *event, int flags) | ||
219 | { | ||
220 | struct hw_perf_event *hwc = &event->hw; | ||
221 | |||
222 | if (!mipspmu) | ||
223 | return; | ||
224 | |||
225 | if (!(hwc->state & PERF_HES_STOPPED)) { | ||
226 | /* We are working on a local event. */ | ||
227 | mipspmu->disable_event(hwc->idx); | ||
228 | barrier(); | ||
229 | mipspmu_event_update(event, hwc, hwc->idx); | ||
230 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; | ||
231 | } | ||
232 | } | ||
233 | |||
234 | static int mipspmu_add(struct perf_event *event, int flags) | ||
235 | { | 235 | { |
236 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 236 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
237 | struct hw_perf_event *hwc = &event->hw; | 237 | struct hw_perf_event *hwc = &event->hw; |
238 | int idx = hwc->idx; | 238 | int idx; |
239 | int err = 0; | ||
239 | 240 | ||
241 | perf_pmu_disable(event->pmu); | ||
240 | 242 | ||
241 | WARN_ON(idx < 0 || idx >= mipspmu->num_counters); | 243 | /* To look for a free counter for this event. */ |
244 | idx = mipspmu->alloc_counter(cpuc, hwc); | ||
245 | if (idx < 0) { | ||
246 | err = idx; | ||
247 | goto out; | ||
248 | } | ||
242 | 249 | ||
243 | /* We are working on a local event. */ | 250 | /* |
251 | * If there is an event in the counter we are going to use then | ||
252 | * make sure it is disabled. | ||
253 | */ | ||
254 | event->hw.idx = idx; | ||
244 | mipspmu->disable_event(idx); | 255 | mipspmu->disable_event(idx); |
256 | cpuc->events[idx] = event; | ||
245 | 257 | ||
246 | barrier(); | 258 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; |
247 | 259 | if (flags & PERF_EF_START) | |
248 | mipspmu_event_update(event, hwc, idx); | 260 | mipspmu_start(event, PERF_EF_RELOAD); |
249 | cpuc->events[idx] = NULL; | ||
250 | clear_bit(idx, cpuc->used_mask); | ||
251 | 261 | ||
262 | /* Propagate our changes to the userspace mapping. */ | ||
252 | perf_event_update_userpage(event); | 263 | perf_event_update_userpage(event); |
264 | |||
265 | out: | ||
266 | perf_pmu_enable(event->pmu); | ||
267 | return err; | ||
253 | } | 268 | } |
254 | 269 | ||
255 | static void mipspmu_unthrottle(struct perf_event *event) | 270 | static void mipspmu_del(struct perf_event *event, int flags) |
256 | { | 271 | { |
272 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
257 | struct hw_perf_event *hwc = &event->hw; | 273 | struct hw_perf_event *hwc = &event->hw; |
274 | int idx = hwc->idx; | ||
258 | 275 | ||
259 | mipspmu->enable_event(hwc, hwc->idx); | 276 | WARN_ON(idx < 0 || idx >= mipspmu->num_counters); |
277 | |||
278 | mipspmu_stop(event, PERF_EF_UPDATE); | ||
279 | cpuc->events[idx] = NULL; | ||
280 | clear_bit(idx, cpuc->used_mask); | ||
281 | |||
282 | perf_event_update_userpage(event); | ||
260 | } | 283 | } |
261 | 284 | ||
262 | static void mipspmu_read(struct perf_event *event) | 285 | static void mipspmu_read(struct perf_event *event) |
@@ -270,12 +293,17 @@ static void mipspmu_read(struct perf_event *event) | |||
270 | mipspmu_event_update(event, hwc, hwc->idx); | 293 | mipspmu_event_update(event, hwc, hwc->idx); |
271 | } | 294 | } |
272 | 295 | ||
273 | static struct pmu pmu = { | 296 | static void mipspmu_enable(struct pmu *pmu) |
274 | .enable = mipspmu_enable, | 297 | { |
275 | .disable = mipspmu_disable, | 298 | if (mipspmu) |
276 | .unthrottle = mipspmu_unthrottle, | 299 | mipspmu->start(); |
277 | .read = mipspmu_read, | 300 | } |
278 | }; | 301 | |
302 | static void mipspmu_disable(struct pmu *pmu) | ||
303 | { | ||
304 | if (mipspmu) | ||
305 | mipspmu->stop(); | ||
306 | } | ||
279 | 307 | ||
280 | static atomic_t active_events = ATOMIC_INIT(0); | 308 | static atomic_t active_events = ATOMIC_INIT(0); |
281 | static DEFINE_MUTEX(pmu_reserve_mutex); | 309 | static DEFINE_MUTEX(pmu_reserve_mutex); |
@@ -318,6 +346,82 @@ static void mipspmu_free_irq(void) | |||
318 | perf_irq = save_perf_irq; | 346 | perf_irq = save_perf_irq; |
319 | } | 347 | } |
320 | 348 | ||
349 | /* | ||
350 | * mipsxx/rm9000/loongson2 have different performance counters, they have | ||
351 | * specific low-level init routines. | ||
352 | */ | ||
353 | static void reset_counters(void *arg); | ||
354 | static int __hw_perf_event_init(struct perf_event *event); | ||
355 | |||
356 | static void hw_perf_event_destroy(struct perf_event *event) | ||
357 | { | ||
358 | if (atomic_dec_and_mutex_lock(&active_events, | ||
359 | &pmu_reserve_mutex)) { | ||
360 | /* | ||
361 | * We must not call the destroy function with interrupts | ||
362 | * disabled. | ||
363 | */ | ||
364 | on_each_cpu(reset_counters, | ||
365 | (void *)(long)mipspmu->num_counters, 1); | ||
366 | mipspmu_free_irq(); | ||
367 | mutex_unlock(&pmu_reserve_mutex); | ||
368 | } | ||
369 | } | ||
370 | |||
371 | static int mipspmu_event_init(struct perf_event *event) | ||
372 | { | ||
373 | int err = 0; | ||
374 | |||
375 | switch (event->attr.type) { | ||
376 | case PERF_TYPE_RAW: | ||
377 | case PERF_TYPE_HARDWARE: | ||
378 | case PERF_TYPE_HW_CACHE: | ||
379 | break; | ||
380 | |||
381 | default: | ||
382 | return -ENOENT; | ||
383 | } | ||
384 | |||
385 | if (!mipspmu || event->cpu >= nr_cpumask_bits || | ||
386 | (event->cpu >= 0 && !cpu_online(event->cpu))) | ||
387 | return -ENODEV; | ||
388 | |||
389 | if (!atomic_inc_not_zero(&active_events)) { | ||
390 | if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) { | ||
391 | atomic_dec(&active_events); | ||
392 | return -ENOSPC; | ||
393 | } | ||
394 | |||
395 | mutex_lock(&pmu_reserve_mutex); | ||
396 | if (atomic_read(&active_events) == 0) | ||
397 | err = mipspmu_get_irq(); | ||
398 | |||
399 | if (!err) | ||
400 | atomic_inc(&active_events); | ||
401 | mutex_unlock(&pmu_reserve_mutex); | ||
402 | } | ||
403 | |||
404 | if (err) | ||
405 | return err; | ||
406 | |||
407 | err = __hw_perf_event_init(event); | ||
408 | if (err) | ||
409 | hw_perf_event_destroy(event); | ||
410 | |||
411 | return err; | ||
412 | } | ||
413 | |||
414 | static struct pmu pmu = { | ||
415 | .pmu_enable = mipspmu_enable, | ||
416 | .pmu_disable = mipspmu_disable, | ||
417 | .event_init = mipspmu_event_init, | ||
418 | .add = mipspmu_add, | ||
419 | .del = mipspmu_del, | ||
420 | .start = mipspmu_start, | ||
421 | .stop = mipspmu_stop, | ||
422 | .read = mipspmu_read, | ||
423 | }; | ||
424 | |||
321 | static inline unsigned int | 425 | static inline unsigned int |
322 | mipspmu_perf_event_encode(const struct mips_perf_event *pev) | 426 | mipspmu_perf_event_encode(const struct mips_perf_event *pev) |
323 | { | 427 | { |
@@ -382,8 +486,9 @@ static int validate_event(struct cpu_hw_events *cpuc, | |||
382 | { | 486 | { |
383 | struct hw_perf_event fake_hwc = event->hw; | 487 | struct hw_perf_event fake_hwc = event->hw; |
384 | 488 | ||
385 | if (event->pmu && event->pmu != &pmu) | 489 | /* Allow mixed event group. So return 1 to pass validation. */ |
386 | return 0; | 490 | if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF) |
491 | return 1; | ||
387 | 492 | ||
388 | return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0; | 493 | return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0; |
389 | } | 494 | } |
@@ -409,73 +514,6 @@ static int validate_group(struct perf_event *event) | |||
409 | return 0; | 514 | return 0; |
410 | } | 515 | } |
411 | 516 | ||
412 | /* | ||
413 | * mipsxx/rm9000/loongson2 have different performance counters, they have | ||
414 | * specific low-level init routines. | ||
415 | */ | ||
416 | static void reset_counters(void *arg); | ||
417 | static int __hw_perf_event_init(struct perf_event *event); | ||
418 | |||
419 | static void hw_perf_event_destroy(struct perf_event *event) | ||
420 | { | ||
421 | if (atomic_dec_and_mutex_lock(&active_events, | ||
422 | &pmu_reserve_mutex)) { | ||
423 | /* | ||
424 | * We must not call the destroy function with interrupts | ||
425 | * disabled. | ||
426 | */ | ||
427 | on_each_cpu(reset_counters, | ||
428 | (void *)(long)mipspmu->num_counters, 1); | ||
429 | mipspmu_free_irq(); | ||
430 | mutex_unlock(&pmu_reserve_mutex); | ||
431 | } | ||
432 | } | ||
433 | |||
434 | const struct pmu *hw_perf_event_init(struct perf_event *event) | ||
435 | { | ||
436 | int err = 0; | ||
437 | |||
438 | if (!mipspmu || event->cpu >= nr_cpumask_bits || | ||
439 | (event->cpu >= 0 && !cpu_online(event->cpu))) | ||
440 | return ERR_PTR(-ENODEV); | ||
441 | |||
442 | if (!atomic_inc_not_zero(&active_events)) { | ||
443 | if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) { | ||
444 | atomic_dec(&active_events); | ||
445 | return ERR_PTR(-ENOSPC); | ||
446 | } | ||
447 | |||
448 | mutex_lock(&pmu_reserve_mutex); | ||
449 | if (atomic_read(&active_events) == 0) | ||
450 | err = mipspmu_get_irq(); | ||
451 | |||
452 | if (!err) | ||
453 | atomic_inc(&active_events); | ||
454 | mutex_unlock(&pmu_reserve_mutex); | ||
455 | } | ||
456 | |||
457 | if (err) | ||
458 | return ERR_PTR(err); | ||
459 | |||
460 | err = __hw_perf_event_init(event); | ||
461 | if (err) | ||
462 | hw_perf_event_destroy(event); | ||
463 | |||
464 | return err ? ERR_PTR(err) : &pmu; | ||
465 | } | ||
466 | |||
467 | void hw_perf_enable(void) | ||
468 | { | ||
469 | if (mipspmu) | ||
470 | mipspmu->start(); | ||
471 | } | ||
472 | |||
473 | void hw_perf_disable(void) | ||
474 | { | ||
475 | if (mipspmu) | ||
476 | mipspmu->stop(); | ||
477 | } | ||
478 | |||
479 | /* This is needed by specific irq handlers in perf_event_*.c */ | 517 | /* This is needed by specific irq handlers in perf_event_*.c */ |
480 | static void | 518 | static void |
481 | handle_associated_event(struct cpu_hw_events *cpuc, | 519 | handle_associated_event(struct cpu_hw_events *cpuc, |
@@ -496,21 +534,13 @@ handle_associated_event(struct cpu_hw_events *cpuc, | |||
496 | #include "perf_event_mipsxx.c" | 534 | #include "perf_event_mipsxx.c" |
497 | 535 | ||
498 | /* Callchain handling code. */ | 536 | /* Callchain handling code. */ |
499 | static inline void | ||
500 | callchain_store(struct perf_callchain_entry *entry, | ||
501 | u64 ip) | ||
502 | { | ||
503 | if (entry->nr < PERF_MAX_STACK_DEPTH) | ||
504 | entry->ip[entry->nr++] = ip; | ||
505 | } | ||
506 | 537 | ||
507 | /* | 538 | /* |
508 | * Leave userspace callchain empty for now. When we find a way to trace | 539 | * Leave userspace callchain empty for now. When we find a way to trace |
509 | * the user stack callchains, we add here. | 540 | * the user stack callchains, we add here. |
510 | */ | 541 | */ |
511 | static void | 542 | void perf_callchain_user(struct perf_callchain_entry *entry, |
512 | perf_callchain_user(struct pt_regs *regs, | 543 | struct pt_regs *regs) |
513 | struct perf_callchain_entry *entry) | ||
514 | { | 544 | { |
515 | } | 545 | } |
516 | 546 | ||
@@ -523,23 +553,21 @@ static void save_raw_perf_callchain(struct perf_callchain_entry *entry, | |||
523 | while (!kstack_end(sp)) { | 553 | while (!kstack_end(sp)) { |
524 | addr = *sp++; | 554 | addr = *sp++; |
525 | if (__kernel_text_address(addr)) { | 555 | if (__kernel_text_address(addr)) { |
526 | callchain_store(entry, addr); | 556 | perf_callchain_store(entry, addr); |
527 | if (entry->nr >= PERF_MAX_STACK_DEPTH) | 557 | if (entry->nr >= PERF_MAX_STACK_DEPTH) |
528 | break; | 558 | break; |
529 | } | 559 | } |
530 | } | 560 | } |
531 | } | 561 | } |
532 | 562 | ||
533 | static void | 563 | void perf_callchain_kernel(struct perf_callchain_entry *entry, |
534 | perf_callchain_kernel(struct pt_regs *regs, | 564 | struct pt_regs *regs) |
535 | struct perf_callchain_entry *entry) | ||
536 | { | 565 | { |
537 | unsigned long sp = regs->regs[29]; | 566 | unsigned long sp = regs->regs[29]; |
538 | #ifdef CONFIG_KALLSYMS | 567 | #ifdef CONFIG_KALLSYMS |
539 | unsigned long ra = regs->regs[31]; | 568 | unsigned long ra = regs->regs[31]; |
540 | unsigned long pc = regs->cp0_epc; | 569 | unsigned long pc = regs->cp0_epc; |
541 | 570 | ||
542 | callchain_store(entry, PERF_CONTEXT_KERNEL); | ||
543 | if (raw_show_trace || !__kernel_text_address(pc)) { | 571 | if (raw_show_trace || !__kernel_text_address(pc)) { |
544 | unsigned long stack_page = | 572 | unsigned long stack_page = |
545 | (unsigned long)task_stack_page(current); | 573 | (unsigned long)task_stack_page(current); |
@@ -549,53 +577,12 @@ perf_callchain_kernel(struct pt_regs *regs, | |||
549 | return; | 577 | return; |
550 | } | 578 | } |
551 | do { | 579 | do { |
552 | callchain_store(entry, pc); | 580 | perf_callchain_store(entry, pc); |
553 | if (entry->nr >= PERF_MAX_STACK_DEPTH) | 581 | if (entry->nr >= PERF_MAX_STACK_DEPTH) |
554 | break; | 582 | break; |
555 | pc = unwind_stack(current, &sp, pc, &ra); | 583 | pc = unwind_stack(current, &sp, pc, &ra); |
556 | } while (pc); | 584 | } while (pc); |
557 | #else | 585 | #else |
558 | callchain_store(entry, PERF_CONTEXT_KERNEL); | ||
559 | save_raw_perf_callchain(entry, sp); | 586 | save_raw_perf_callchain(entry, sp); |
560 | #endif | 587 | #endif |
561 | } | 588 | } |
562 | |||
563 | static void | ||
564 | perf_do_callchain(struct pt_regs *regs, | ||
565 | struct perf_callchain_entry *entry) | ||
566 | { | ||
567 | int is_user; | ||
568 | |||
569 | if (!regs) | ||
570 | return; | ||
571 | |||
572 | is_user = user_mode(regs); | ||
573 | |||
574 | if (!current || !current->pid) | ||
575 | return; | ||
576 | |||
577 | if (is_user && current->state != TASK_RUNNING) | ||
578 | return; | ||
579 | |||
580 | if (!is_user) { | ||
581 | perf_callchain_kernel(regs, entry); | ||
582 | if (current->mm) | ||
583 | regs = task_pt_regs(current); | ||
584 | else | ||
585 | regs = NULL; | ||
586 | } | ||
587 | if (regs) | ||
588 | perf_callchain_user(regs, entry); | ||
589 | } | ||
590 | |||
591 | static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); | ||
592 | |||
593 | struct perf_callchain_entry * | ||
594 | perf_callchain(struct pt_regs *regs) | ||
595 | { | ||
596 | struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry); | ||
597 | |||
598 | entry->nr = 0; | ||
599 | perf_do_callchain(regs, entry); | ||
600 | return entry; | ||
601 | } | ||
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c index 183e0d226669..d9a7db78ed62 100644 --- a/arch/mips/kernel/perf_event_mipsxx.c +++ b/arch/mips/kernel/perf_event_mipsxx.c | |||
@@ -696,7 +696,7 @@ static int mipsxx_pmu_handle_shared_irq(void) | |||
696 | * interrupt, not NMI. | 696 | * interrupt, not NMI. |
697 | */ | 697 | */ |
698 | if (handled == IRQ_HANDLED) | 698 | if (handled == IRQ_HANDLED) |
699 | perf_event_do_pending(); | 699 | irq_work_run(); |
700 | 700 | ||
701 | #ifdef CONFIG_MIPS_MT_SMP | 701 | #ifdef CONFIG_MIPS_MT_SMP |
702 | read_unlock(&pmuint_rwlock); | 702 | read_unlock(&pmuint_rwlock); |
@@ -1045,6 +1045,8 @@ init_hw_perf_events(void) | |||
1045 | "CPU, irq %d%s\n", mipspmu->name, counters, irq, | 1045 | "CPU, irq %d%s\n", mipspmu->name, counters, irq, |
1046 | irq < 0 ? " (share with timer interrupt)" : ""); | 1046 | irq < 0 ? " (share with timer interrupt)" : ""); |
1047 | 1047 | ||
1048 | perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); | ||
1049 | |||
1048 | return 0; | 1050 | return 0; |
1049 | } | 1051 | } |
1050 | early_initcall(init_hw_perf_events); | 1052 | early_initcall(init_hw_perf_events); |
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index 5922342bca39..dbbe0ce48d89 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c | |||
@@ -84,7 +84,7 @@ static int protected_save_fp_context(struct sigcontext __user *sc) | |||
84 | 84 | ||
85 | static int protected_restore_fp_context(struct sigcontext __user *sc) | 85 | static int protected_restore_fp_context(struct sigcontext __user *sc) |
86 | { | 86 | { |
87 | int err, tmp; | 87 | int err, tmp __maybe_unused; |
88 | while (1) { | 88 | while (1) { |
89 | lock_fpu_owner(); | 89 | lock_fpu_owner(); |
90 | own_fpu_inatomic(0); | 90 | own_fpu_inatomic(0); |
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index a0ed0e052b2e..aae986613795 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c | |||
@@ -115,7 +115,7 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc) | |||
115 | 115 | ||
116 | static int protected_restore_fp_context32(struct sigcontext32 __user *sc) | 116 | static int protected_restore_fp_context32(struct sigcontext32 __user *sc) |
117 | { | 117 | { |
118 | int err, tmp; | 118 | int err, tmp __maybe_unused; |
119 | while (1) { | 119 | while (1) { |
120 | lock_fpu_owner(); | 120 | lock_fpu_owner(); |
121 | own_fpu_inatomic(0); | 121 | own_fpu_inatomic(0); |
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 383aeb95cb49..32a256101082 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c | |||
@@ -193,6 +193,22 @@ void __devinit smp_prepare_boot_cpu(void) | |||
193 | */ | 193 | */ |
194 | static struct task_struct *cpu_idle_thread[NR_CPUS]; | 194 | static struct task_struct *cpu_idle_thread[NR_CPUS]; |
195 | 195 | ||
196 | struct create_idle { | ||
197 | struct work_struct work; | ||
198 | struct task_struct *idle; | ||
199 | struct completion done; | ||
200 | int cpu; | ||
201 | }; | ||
202 | |||
203 | static void __cpuinit do_fork_idle(struct work_struct *work) | ||
204 | { | ||
205 | struct create_idle *c_idle = | ||
206 | container_of(work, struct create_idle, work); | ||
207 | |||
208 | c_idle->idle = fork_idle(c_idle->cpu); | ||
209 | complete(&c_idle->done); | ||
210 | } | ||
211 | |||
196 | int __cpuinit __cpu_up(unsigned int cpu) | 212 | int __cpuinit __cpu_up(unsigned int cpu) |
197 | { | 213 | { |
198 | struct task_struct *idle; | 214 | struct task_struct *idle; |
@@ -203,8 +219,19 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
203 | * Linux can schedule processes on this slave. | 219 | * Linux can schedule processes on this slave. |
204 | */ | 220 | */ |
205 | if (!cpu_idle_thread[cpu]) { | 221 | if (!cpu_idle_thread[cpu]) { |
206 | idle = fork_idle(cpu); | 222 | /* |
207 | cpu_idle_thread[cpu] = idle; | 223 | * Schedule work item to avoid forking user task |
224 | * Ported from arch/x86/kernel/smpboot.c | ||
225 | */ | ||
226 | struct create_idle c_idle = { | ||
227 | .cpu = cpu, | ||
228 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), | ||
229 | }; | ||
230 | |||
231 | INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); | ||
232 | schedule_work(&c_idle.work); | ||
233 | wait_for_completion(&c_idle.done); | ||
234 | idle = cpu_idle_thread[cpu] = c_idle.idle; | ||
208 | 235 | ||
209 | if (IS_ERR(idle)) | 236 | if (IS_ERR(idle)) |
210 | panic(KERN_ERR "Fork failed for CPU %d", cpu); | 237 | panic(KERN_ERR "Fork failed for CPU %d", cpu); |
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 1dc6edff45e0..58beabf50b3c 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c | |||
@@ -383,12 +383,11 @@ save_static_function(sys_sysmips); | |||
383 | static int __used noinline | 383 | static int __used noinline |
384 | _sys_sysmips(nabi_no_regargs struct pt_regs regs) | 384 | _sys_sysmips(nabi_no_regargs struct pt_regs regs) |
385 | { | 385 | { |
386 | long cmd, arg1, arg2, arg3; | 386 | long cmd, arg1, arg2; |
387 | 387 | ||
388 | cmd = regs.regs[4]; | 388 | cmd = regs.regs[4]; |
389 | arg1 = regs.regs[5]; | 389 | arg1 = regs.regs[5]; |
390 | arg2 = regs.regs[6]; | 390 | arg2 = regs.regs[6]; |
391 | arg3 = regs.regs[7]; | ||
392 | 391 | ||
393 | switch (cmd) { | 392 | switch (cmd) { |
394 | case MIPS_ATOMIC_SET: | 393 | case MIPS_ATOMIC_SET: |
@@ -405,7 +404,7 @@ _sys_sysmips(nabi_no_regargs struct pt_regs regs) | |||
405 | if (arg1 & 2) | 404 | if (arg1 & 2) |
406 | set_thread_flag(TIF_LOGADE); | 405 | set_thread_flag(TIF_LOGADE); |
407 | else | 406 | else |
408 | clear_thread_flag(TIF_FIXADE); | 407 | clear_thread_flag(TIF_LOGADE); |
409 | 408 | ||
410 | return 0; | 409 | return 0; |
411 | 410 | ||
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index 6a1fdfef8fde..ab52b7cf3b6b 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c | |||
@@ -148,9 +148,9 @@ struct { | |||
148 | spinlock_t tc_list_lock; | 148 | spinlock_t tc_list_lock; |
149 | struct list_head tc_list; /* Thread contexts */ | 149 | struct list_head tc_list; /* Thread contexts */ |
150 | } vpecontrol = { | 150 | } vpecontrol = { |
151 | .vpe_list_lock = SPIN_LOCK_UNLOCKED, | 151 | .vpe_list_lock = __SPIN_LOCK_UNLOCKED(vpe_list_lock), |
152 | .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list), | 152 | .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list), |
153 | .tc_list_lock = SPIN_LOCK_UNLOCKED, | 153 | .tc_list_lock = __SPIN_LOCK_UNLOCKED(tc_list_lock), |
154 | .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list) | 154 | .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list) |
155 | }; | 155 | }; |
156 | 156 | ||
diff --git a/arch/mips/loongson/Kconfig b/arch/mips/loongson/Kconfig index 6e1b77fec7ea..aca93eed8779 100644 --- a/arch/mips/loongson/Kconfig +++ b/arch/mips/loongson/Kconfig | |||
@@ -1,6 +1,7 @@ | |||
1 | if MACH_LOONGSON | ||
2 | |||
1 | choice | 3 | choice |
2 | prompt "Machine Type" | 4 | prompt "Machine Type" |
3 | depends on MACH_LOONGSON | ||
4 | 5 | ||
5 | config LEMOTE_FULOONG2E | 6 | config LEMOTE_FULOONG2E |
6 | bool "Lemote Fuloong(2e) mini-PC" | 7 | bool "Lemote Fuloong(2e) mini-PC" |
@@ -87,3 +88,5 @@ config LOONGSON_UART_BASE | |||
87 | config LOONGSON_MC146818 | 88 | config LOONGSON_MC146818 |
88 | bool | 89 | bool |
89 | default n | 90 | default n |
91 | |||
92 | endif # MACH_LOONGSON | ||
diff --git a/arch/mips/loongson/common/cmdline.c b/arch/mips/loongson/common/cmdline.c index 1a06defc4f7f..353e1d2e41a5 100644 --- a/arch/mips/loongson/common/cmdline.c +++ b/arch/mips/loongson/common/cmdline.c | |||
@@ -44,10 +44,5 @@ void __init prom_init_cmdline(void) | |||
44 | strcat(arcs_cmdline, " "); | 44 | strcat(arcs_cmdline, " "); |
45 | } | 45 | } |
46 | 46 | ||
47 | if ((strstr(arcs_cmdline, "console=")) == NULL) | ||
48 | strcat(arcs_cmdline, " console=ttyS0,115200"); | ||
49 | if ((strstr(arcs_cmdline, "root=")) == NULL) | ||
50 | strcat(arcs_cmdline, " root=/dev/hda1"); | ||
51 | |||
52 | prom_init_machtype(); | 47 | prom_init_machtype(); |
53 | } | 48 | } |
diff --git a/arch/mips/loongson/common/machtype.c b/arch/mips/loongson/common/machtype.c index 81fbe6b73f91..2efd5d9dee27 100644 --- a/arch/mips/loongson/common/machtype.c +++ b/arch/mips/loongson/common/machtype.c | |||
@@ -41,7 +41,7 @@ void __weak __init mach_prom_init_machtype(void) | |||
41 | 41 | ||
42 | void __init prom_init_machtype(void) | 42 | void __init prom_init_machtype(void) |
43 | { | 43 | { |
44 | char *p, str[MACHTYPE_LEN]; | 44 | char *p, str[MACHTYPE_LEN + 1]; |
45 | int machtype = MACH_LEMOTE_FL2E; | 45 | int machtype = MACH_LEMOTE_FL2E; |
46 | 46 | ||
47 | mips_machtype = LOONGSON_MACHTYPE; | 47 | mips_machtype = LOONGSON_MACHTYPE; |
@@ -53,6 +53,7 @@ void __init prom_init_machtype(void) | |||
53 | } | 53 | } |
54 | p += strlen("machtype="); | 54 | p += strlen("machtype="); |
55 | strncpy(str, p, MACHTYPE_LEN); | 55 | strncpy(str, p, MACHTYPE_LEN); |
56 | str[MACHTYPE_LEN] = '\0'; | ||
56 | p = strstr(str, " "); | 57 | p = strstr(str, " "); |
57 | if (p) | 58 | if (p) |
58 | *p = '\0'; | 59 | *p = '\0'; |
diff --git a/arch/mips/math-emu/ieee754int.h b/arch/mips/math-emu/ieee754int.h index 2701d9500959..2a7d43f4f161 100644 --- a/arch/mips/math-emu/ieee754int.h +++ b/arch/mips/math-emu/ieee754int.h | |||
@@ -70,7 +70,7 @@ | |||
70 | 70 | ||
71 | 71 | ||
72 | #define COMPXSP \ | 72 | #define COMPXSP \ |
73 | unsigned xm; int xe; int xs; int xc | 73 | unsigned xm; int xe; int xs __maybe_unused; int xc |
74 | 74 | ||
75 | #define COMPYSP \ | 75 | #define COMPYSP \ |
76 | unsigned ym; int ye; int ys; int yc | 76 | unsigned ym; int ye; int ys; int yc |
@@ -104,7 +104,7 @@ | |||
104 | 104 | ||
105 | 105 | ||
106 | #define COMPXDP \ | 106 | #define COMPXDP \ |
107 | u64 xm; int xe; int xs; int xc | 107 | u64 xm; int xe; int xs __maybe_unused; int xc |
108 | 108 | ||
109 | #define COMPYDP \ | 109 | #define COMPYDP \ |
110 | u64 ym; int ye; int ys; int yc | 110 | u64 ym; int ye; int ys; int yc |
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 2efcbd24c82f..279599e9a779 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c | |||
@@ -324,7 +324,7 @@ int page_is_ram(unsigned long pagenr) | |||
324 | void __init paging_init(void) | 324 | void __init paging_init(void) |
325 | { | 325 | { |
326 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | 326 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
327 | unsigned long lastpfn; | 327 | unsigned long lastpfn __maybe_unused; |
328 | 328 | ||
329 | pagetable_init(); | 329 | pagetable_init(); |
330 | 330 | ||
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index 083d3412d0bc..04f9e17db9d0 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c | |||
@@ -109,6 +109,8 @@ static bool scratchpad_available(void) | |||
109 | static int scratchpad_offset(int i) | 109 | static int scratchpad_offset(int i) |
110 | { | 110 | { |
111 | BUG(); | 111 | BUG(); |
112 | /* Really unreachable, but evidently some GCC want this. */ | ||
113 | return 0; | ||
112 | } | 114 | } |
113 | #endif | 115 | #endif |
114 | /* | 116 | /* |
diff --git a/arch/mips/pci/ops-pmcmsp.c b/arch/mips/pci/ops-pmcmsp.c index b7c03d80c88c..68798f869c0f 100644 --- a/arch/mips/pci/ops-pmcmsp.c +++ b/arch/mips/pci/ops-pmcmsp.c | |||
@@ -308,7 +308,7 @@ static struct resource pci_mem_resource = { | |||
308 | * RETURNS: PCIBIOS_SUCCESSFUL - success | 308 | * RETURNS: PCIBIOS_SUCCESSFUL - success |
309 | * | 309 | * |
310 | ****************************************************************************/ | 310 | ****************************************************************************/ |
311 | static int bpci_interrupt(int irq, void *dev_id) | 311 | static irqreturn_t bpci_interrupt(int irq, void *dev_id) |
312 | { | 312 | { |
313 | struct msp_pci_regs *preg = (void *)PCI_BASE_REG; | 313 | struct msp_pci_regs *preg = (void *)PCI_BASE_REG; |
314 | unsigned int stat = preg->if_status; | 314 | unsigned int stat = preg->if_status; |
@@ -326,7 +326,7 @@ static int bpci_interrupt(int irq, void *dev_id) | |||
326 | /* write to clear all asserted interrupts */ | 326 | /* write to clear all asserted interrupts */ |
327 | preg->if_status = stat; | 327 | preg->if_status = stat; |
328 | 328 | ||
329 | return PCIBIOS_SUCCESSFUL; | 329 | return IRQ_HANDLED; |
330 | } | 330 | } |
331 | 331 | ||
332 | /***************************************************************************** | 332 | /***************************************************************************** |
diff --git a/arch/mips/pmc-sierra/Kconfig b/arch/mips/pmc-sierra/Kconfig index c139988bb85d..8d798497c614 100644 --- a/arch/mips/pmc-sierra/Kconfig +++ b/arch/mips/pmc-sierra/Kconfig | |||
@@ -4,15 +4,11 @@ choice | |||
4 | 4 | ||
5 | config PMC_MSP4200_EVAL | 5 | config PMC_MSP4200_EVAL |
6 | bool "PMC-Sierra MSP4200 Eval Board" | 6 | bool "PMC-Sierra MSP4200 Eval Board" |
7 | select CEVT_R4K | ||
8 | select CSRC_R4K | ||
9 | select IRQ_MSP_SLP | 7 | select IRQ_MSP_SLP |
10 | select HW_HAS_PCI | 8 | select HW_HAS_PCI |
11 | 9 | ||
12 | config PMC_MSP4200_GW | 10 | config PMC_MSP4200_GW |
13 | bool "PMC-Sierra MSP4200 VoIP Gateway" | 11 | bool "PMC-Sierra MSP4200 VoIP Gateway" |
14 | select CEVT_R4K | ||
15 | select CSRC_R4K | ||
16 | select IRQ_MSP_SLP | 12 | select IRQ_MSP_SLP |
17 | select HW_HAS_PCI | 13 | select HW_HAS_PCI |
18 | 14 | ||
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_time.c b/arch/mips/pmc-sierra/msp71xx/msp_time.c index cca64e15f57f..01df84ce31e2 100644 --- a/arch/mips/pmc-sierra/msp71xx/msp_time.c +++ b/arch/mips/pmc-sierra/msp71xx/msp_time.c | |||
@@ -81,7 +81,7 @@ void __init plat_time_init(void) | |||
81 | mips_hpt_frequency = cpu_rate/2; | 81 | mips_hpt_frequency = cpu_rate/2; |
82 | } | 82 | } |
83 | 83 | ||
84 | unsigned int __init get_c0_compare_int(void) | 84 | unsigned int __cpuinit get_c0_compare_int(void) |
85 | { | 85 | { |
86 | return MSP_INT_VPE0_TIMER; | 86 | return MSP_INT_VPE0_TIMER; |
87 | } | 87 | } |
diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h index 92d2f9298e38..9d773a639513 100644 --- a/arch/mn10300/include/asm/atomic.h +++ b/arch/mn10300/include/asm/atomic.h | |||
@@ -139,7 +139,7 @@ static inline unsigned long __cmpxchg(volatile unsigned long *m, | |||
139 | * Atomically reads the value of @v. Note that the guaranteed | 139 | * Atomically reads the value of @v. Note that the guaranteed |
140 | * useful range of an atomic_t is only 24 bits. | 140 | * useful range of an atomic_t is only 24 bits. |
141 | */ | 141 | */ |
142 | #define atomic_read(v) ((v)->counter) | 142 | #define atomic_read(v) (ACCESS_ONCE((v)->counter)) |
143 | 143 | ||
144 | /** | 144 | /** |
145 | * atomic_set - set atomic variable | 145 | * atomic_set - set atomic variable |
diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h index 679dee0bbd08..3d6e60dad9d9 100644 --- a/arch/mn10300/include/asm/uaccess.h +++ b/arch/mn10300/include/asm/uaccess.h | |||
@@ -160,9 +160,10 @@ struct __large_struct { unsigned long buf[100]; }; | |||
160 | 160 | ||
161 | #define __get_user_check(x, ptr, size) \ | 161 | #define __get_user_check(x, ptr, size) \ |
162 | ({ \ | 162 | ({ \ |
163 | const __typeof__(ptr) __guc_ptr = (ptr); \ | ||
163 | int _e; \ | 164 | int _e; \ |
164 | if (likely(__access_ok((unsigned long) (ptr), (size)))) \ | 165 | if (likely(__access_ok((unsigned long) __guc_ptr, (size)))) \ |
165 | _e = __get_user_nocheck((x), (ptr), (size)); \ | 166 | _e = __get_user_nocheck((x), __guc_ptr, (size)); \ |
166 | else { \ | 167 | else { \ |
167 | _e = -EFAULT; \ | 168 | _e = -EFAULT; \ |
168 | (x) = (__typeof__(x))0; \ | 169 | (x) = (__typeof__(x))0; \ |
diff --git a/arch/mn10300/mm/cache-inv-icache.c b/arch/mn10300/mm/cache-inv-icache.c index a8933a60b2d4..a6b63dde603d 100644 --- a/arch/mn10300/mm/cache-inv-icache.c +++ b/arch/mn10300/mm/cache-inv-icache.c | |||
@@ -69,7 +69,7 @@ static void flush_icache_page_range(unsigned long start, unsigned long end) | |||
69 | 69 | ||
70 | /* invalidate the icache coverage on that region */ | 70 | /* invalidate the icache coverage on that region */ |
71 | mn10300_local_icache_inv_range2(addr + off, size); | 71 | mn10300_local_icache_inv_range2(addr + off, size); |
72 | smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end); | 72 | smp_cache_call(SMP_ICACHE_INV_RANGE, start, end); |
73 | } | 73 | } |
74 | 74 | ||
75 | /** | 75 | /** |
@@ -101,7 +101,7 @@ void flush_icache_range(unsigned long start, unsigned long end) | |||
101 | * directly */ | 101 | * directly */ |
102 | start_page = (start >= 0x80000000UL) ? start : 0x80000000UL; | 102 | start_page = (start >= 0x80000000UL) ? start : 0x80000000UL; |
103 | mn10300_icache_inv_range(start_page, end); | 103 | mn10300_icache_inv_range(start_page, end); |
104 | smp_cache_call(SMP_ICACHE_INV_FLUSH_RANGE, start, end); | 104 | smp_cache_call(SMP_ICACHE_INV_RANGE, start, end); |
105 | if (start_page == start) | 105 | if (start_page == start) |
106 | goto done; | 106 | goto done; |
107 | end = start_page; | 107 | end = start_page; |
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h index 380d48bacd16..26b8c807f8f1 100644 --- a/arch/powerpc/include/asm/lppaca.h +++ b/arch/powerpc/include/asm/lppaca.h | |||
@@ -33,9 +33,25 @@ | |||
33 | // | 33 | // |
34 | //---------------------------------------------------------------------------- | 34 | //---------------------------------------------------------------------------- |
35 | #include <linux/cache.h> | 35 | #include <linux/cache.h> |
36 | #include <linux/threads.h> | ||
36 | #include <asm/types.h> | 37 | #include <asm/types.h> |
37 | #include <asm/mmu.h> | 38 | #include <asm/mmu.h> |
38 | 39 | ||
40 | /* | ||
41 | * We only have to have statically allocated lppaca structs on | ||
42 | * legacy iSeries, which supports at most 64 cpus. | ||
43 | */ | ||
44 | #ifdef CONFIG_PPC_ISERIES | ||
45 | #if NR_CPUS < 64 | ||
46 | #define NR_LPPACAS NR_CPUS | ||
47 | #else | ||
48 | #define NR_LPPACAS 64 | ||
49 | #endif | ||
50 | #else /* not iSeries */ | ||
51 | #define NR_LPPACAS 1 | ||
52 | #endif | ||
53 | |||
54 | |||
39 | /* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k | 55 | /* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k |
40 | * alignment is sufficient to prevent this */ | 56 | * alignment is sufficient to prevent this */ |
41 | struct lppaca { | 57 | struct lppaca { |
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 991d5998d6be..fe56a23e1ff0 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h | |||
@@ -240,6 +240,12 @@ struct machdep_calls { | |||
240 | * claims to support kexec. | 240 | * claims to support kexec. |
241 | */ | 241 | */ |
242 | int (*machine_kexec_prepare)(struct kimage *image); | 242 | int (*machine_kexec_prepare)(struct kimage *image); |
243 | |||
244 | /* Called to perform the _real_ kexec. | ||
245 | * Do NOT allocate memory or fail here. We are past the point of | ||
246 | * no return. | ||
247 | */ | ||
248 | void (*machine_kexec)(struct kimage *image); | ||
243 | #endif /* CONFIG_KEXEC */ | 249 | #endif /* CONFIG_KEXEC */ |
244 | 250 | ||
245 | #ifdef CONFIG_SUSPEND | 251 | #ifdef CONFIG_SUSPEND |
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index 49a170af8145..a5f8672eeff3 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c | |||
@@ -87,7 +87,10 @@ void machine_kexec(struct kimage *image) | |||
87 | 87 | ||
88 | save_ftrace_enabled = __ftrace_enabled_save(); | 88 | save_ftrace_enabled = __ftrace_enabled_save(); |
89 | 89 | ||
90 | default_machine_kexec(image); | 90 | if (ppc_md.machine_kexec) |
91 | ppc_md.machine_kexec(image); | ||
92 | else | ||
93 | default_machine_kexec(image); | ||
91 | 94 | ||
92 | __ftrace_enabled_restore(save_ftrace_enabled); | 95 | __ftrace_enabled_restore(save_ftrace_enabled); |
93 | 96 | ||
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index ebf9846f3c3b..f4adf89d7614 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c | |||
@@ -27,20 +27,6 @@ extern unsigned long __toc_start; | |||
27 | #ifdef CONFIG_PPC_BOOK3S | 27 | #ifdef CONFIG_PPC_BOOK3S |
28 | 28 | ||
29 | /* | 29 | /* |
30 | * We only have to have statically allocated lppaca structs on | ||
31 | * legacy iSeries, which supports at most 64 cpus. | ||
32 | */ | ||
33 | #ifdef CONFIG_PPC_ISERIES | ||
34 | #if NR_CPUS < 64 | ||
35 | #define NR_LPPACAS NR_CPUS | ||
36 | #else | ||
37 | #define NR_LPPACAS 64 | ||
38 | #endif | ||
39 | #else /* not iSeries */ | ||
40 | #define NR_LPPACAS 1 | ||
41 | #endif | ||
42 | |||
43 | /* | ||
44 | * The structure which the hypervisor knows about - this structure | 30 | * The structure which the hypervisor knows about - this structure |
45 | * should not cross a page boundary. The vpa_init/register_vpa call | 31 | * should not cross a page boundary. The vpa_init/register_vpa call |
46 | * is now known to fail if the lppaca structure crosses a page | 32 | * is now known to fail if the lppaca structure crosses a page |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 7a1d5cb76932..8303a6c65ef7 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -353,6 +353,7 @@ static void switch_booke_debug_regs(struct thread_struct *new_thread) | |||
353 | prime_debug_regs(new_thread); | 353 | prime_debug_regs(new_thread); |
354 | } | 354 | } |
355 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ | 355 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ |
356 | #ifndef CONFIG_HAVE_HW_BREAKPOINT | ||
356 | static void set_debug_reg_defaults(struct thread_struct *thread) | 357 | static void set_debug_reg_defaults(struct thread_struct *thread) |
357 | { | 358 | { |
358 | if (thread->dabr) { | 359 | if (thread->dabr) { |
@@ -360,6 +361,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread) | |||
360 | set_dabr(0); | 361 | set_dabr(0); |
361 | } | 362 | } |
362 | } | 363 | } |
364 | #endif /* !CONFIG_HAVE_HW_BREAKPOINT */ | ||
363 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ | 365 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ |
364 | 366 | ||
365 | int set_dabr(unsigned long dabr) | 367 | int set_dabr(unsigned long dabr) |
@@ -670,11 +672,11 @@ void flush_thread(void) | |||
670 | { | 672 | { |
671 | discard_lazy_cpu_state(); | 673 | discard_lazy_cpu_state(); |
672 | 674 | ||
673 | #ifdef CONFIG_HAVE_HW_BREAKPOINTS | 675 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
674 | flush_ptrace_hw_breakpoint(current); | 676 | flush_ptrace_hw_breakpoint(current); |
675 | #else /* CONFIG_HAVE_HW_BREAKPOINTS */ | 677 | #else /* CONFIG_HAVE_HW_BREAKPOINT */ |
676 | set_debug_reg_defaults(¤t->thread); | 678 | set_debug_reg_defaults(¤t->thread); |
677 | #endif /* CONFIG_HAVE_HW_BREAKPOINTS */ | 679 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ |
678 | } | 680 | } |
679 | 681 | ||
680 | void | 682 | void |
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index fd4812329570..0dc95c0aa3be 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
@@ -1516,7 +1516,8 @@ int start_topology_update(void) | |||
1516 | { | 1516 | { |
1517 | int rc = 0; | 1517 | int rc = 0; |
1518 | 1518 | ||
1519 | if (firmware_has_feature(FW_FEATURE_VPHN) && | 1519 | /* Disabled until races with load balancing are fixed */ |
1520 | if (0 && firmware_has_feature(FW_FEATURE_VPHN) && | ||
1520 | get_lppaca()->shared_proc) { | 1521 | get_lppaca()->shared_proc) { |
1521 | vphn_enabled = 1; | 1522 | vphn_enabled = 1; |
1522 | setup_cpu_associativity_change_counters(); | 1523 | setup_cpu_associativity_change_counters(); |
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c index 1ec06576f619..c14d09f614f3 100644 --- a/arch/powerpc/mm/tlb_hash64.c +++ b/arch/powerpc/mm/tlb_hash64.c | |||
@@ -38,13 +38,11 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); | |||
38 | * neesd to be flushed. This function will either perform the flush | 38 | * neesd to be flushed. This function will either perform the flush |
39 | * immediately or will batch it up if the current CPU has an active | 39 | * immediately or will batch it up if the current CPU has an active |
40 | * batch on it. | 40 | * batch on it. |
41 | * | ||
42 | * Must be called from within some kind of spinlock/non-preempt region... | ||
43 | */ | 41 | */ |
44 | void hpte_need_flush(struct mm_struct *mm, unsigned long addr, | 42 | void hpte_need_flush(struct mm_struct *mm, unsigned long addr, |
45 | pte_t *ptep, unsigned long pte, int huge) | 43 | pte_t *ptep, unsigned long pte, int huge) |
46 | { | 44 | { |
47 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); | 45 | struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); |
48 | unsigned long vsid, vaddr; | 46 | unsigned long vsid, vaddr; |
49 | unsigned int psize; | 47 | unsigned int psize; |
50 | int ssize; | 48 | int ssize; |
@@ -99,6 +97,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, | |||
99 | */ | 97 | */ |
100 | if (!batch->active) { | 98 | if (!batch->active) { |
101 | flush_hash_page(vaddr, rpte, psize, ssize, 0); | 99 | flush_hash_page(vaddr, rpte, psize, ssize, 0); |
100 | put_cpu_var(ppc64_tlb_batch); | ||
102 | return; | 101 | return; |
103 | } | 102 | } |
104 | 103 | ||
@@ -127,6 +126,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, | |||
127 | batch->index = ++i; | 126 | batch->index = ++i; |
128 | if (i >= PPC64_TLB_BATCH_NR) | 127 | if (i >= PPC64_TLB_BATCH_NR) |
129 | __flush_tlb_pending(batch); | 128 | __flush_tlb_pending(batch); |
129 | put_cpu_var(ppc64_tlb_batch); | ||
130 | } | 130 | } |
131 | 131 | ||
132 | /* | 132 | /* |
diff --git a/arch/powerpc/platforms/iseries/dt.c b/arch/powerpc/platforms/iseries/dt.c index fdb7384c0c4f..f0491cc28900 100644 --- a/arch/powerpc/platforms/iseries/dt.c +++ b/arch/powerpc/platforms/iseries/dt.c | |||
@@ -242,8 +242,8 @@ static void __init dt_cpus(struct iseries_flat_dt *dt) | |||
242 | pft_size[0] = 0; /* NUMA CEC cookie, 0 for non NUMA */ | 242 | pft_size[0] = 0; /* NUMA CEC cookie, 0 for non NUMA */ |
243 | pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE); | 243 | pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE); |
244 | 244 | ||
245 | for (i = 0; i < NR_CPUS; i++) { | 245 | for (i = 0; i < NR_LPPACAS; i++) { |
246 | if (lppaca_of(i).dyn_proc_status >= 2) | 246 | if (lppaca[i].dyn_proc_status >= 2) |
247 | continue; | 247 | continue; |
248 | 248 | ||
249 | snprintf(p, 32 - (p - buf), "@%d", i); | 249 | snprintf(p, 32 - (p - buf), "@%d", i); |
@@ -251,7 +251,7 @@ static void __init dt_cpus(struct iseries_flat_dt *dt) | |||
251 | 251 | ||
252 | dt_prop_str(dt, "device_type", device_type_cpu); | 252 | dt_prop_str(dt, "device_type", device_type_cpu); |
253 | 253 | ||
254 | index = lppaca_of(i).dyn_hv_phys_proc_index; | 254 | index = lppaca[i].dyn_hv_phys_proc_index; |
255 | d = &xIoHriProcessorVpd[index]; | 255 | d = &xIoHriProcessorVpd[index]; |
256 | 256 | ||
257 | dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024); | 257 | dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024); |
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c index b0863410517f..2946ae10fbfd 100644 --- a/arch/powerpc/platforms/iseries/setup.c +++ b/arch/powerpc/platforms/iseries/setup.c | |||
@@ -680,6 +680,7 @@ void * __init iSeries_early_setup(void) | |||
680 | * on but calling this function multiple times is fine. | 680 | * on but calling this function multiple times is fine. |
681 | */ | 681 | */ |
682 | identify_cpu(0, mfspr(SPRN_PVR)); | 682 | identify_cpu(0, mfspr(SPRN_PVR)); |
683 | initialise_paca(&boot_paca, 0); | ||
683 | 684 | ||
684 | powerpc_firmware_features |= FW_FEATURE_ISERIES; | 685 | powerpc_firmware_features |= FW_FEATURE_ISERIES; |
685 | powerpc_firmware_features |= FW_FEATURE_LPAR; | 686 | powerpc_firmware_features |= FW_FEATURE_LPAR; |
diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h index a78701da775b..4a5350037c8f 100644 --- a/arch/sh/include/asm/sections.h +++ b/arch/sh/include/asm/sections.h | |||
@@ -3,7 +3,7 @@ | |||
3 | 3 | ||
4 | #include <asm-generic/sections.h> | 4 | #include <asm-generic/sections.h> |
5 | 5 | ||
6 | extern void __nosave_begin, __nosave_end; | 6 | extern long __nosave_begin, __nosave_end; |
7 | extern long __machvec_start, __machvec_end; | 7 | extern long __machvec_start, __machvec_end; |
8 | extern char __uncached_start, __uncached_end; | 8 | extern char __uncached_start, __uncached_end; |
9 | extern char _ebss[]; | 9 | extern char _ebss[]; |
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c index 672944f5b19c..e53b4b38bd11 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c | |||
@@ -14,7 +14,7 @@ | |||
14 | #include <linux/io.h> | 14 | #include <linux/io.h> |
15 | #include <linux/sh_timer.h> | 15 | #include <linux/sh_timer.h> |
16 | #include <linux/serial_sci.h> | 16 | #include <linux/serial_sci.h> |
17 | #include <asm/machtypes.h> | 17 | #include <generated/machtypes.h> |
18 | 18 | ||
19 | static struct resource rtc_resources[] = { | 19 | static struct resource rtc_resources[] = { |
20 | [0] = { | 20 | [0] = { |
@@ -255,12 +255,17 @@ static struct platform_device *sh7750_early_devices[] __initdata = { | |||
255 | 255 | ||
256 | void __init plat_early_device_setup(void) | 256 | void __init plat_early_device_setup(void) |
257 | { | 257 | { |
258 | struct platform_device *dev[1]; | ||
259 | |||
258 | if (mach_is_rts7751r2d()) { | 260 | if (mach_is_rts7751r2d()) { |
259 | scif_platform_data.scscr |= SCSCR_CKE1; | 261 | scif_platform_data.scscr |= SCSCR_CKE1; |
260 | early_platform_add_devices(&scif_device, 1); | 262 | dev[0] = &scif_device; |
263 | early_platform_add_devices(dev, 1); | ||
261 | } else { | 264 | } else { |
262 | early_platform_add_devices(&sci_device, 1); | 265 | dev[0] = &sci_device; |
263 | early_platform_add_devices(&scif_device, 1); | 266 | early_platform_add_devices(dev, 1); |
267 | dev[0] = &scif_device; | ||
268 | early_platform_add_devices(dev, 1); | ||
264 | } | 269 | } |
265 | 270 | ||
266 | early_platform_add_devices(sh7750_early_devices, | 271 | early_platform_add_devices(sh7750_early_devices, |
diff --git a/arch/sh/lib/delay.c b/arch/sh/lib/delay.c index faa8f86c0db4..0901b2f14e15 100644 --- a/arch/sh/lib/delay.c +++ b/arch/sh/lib/delay.c | |||
@@ -10,6 +10,16 @@ | |||
10 | void __delay(unsigned long loops) | 10 | void __delay(unsigned long loops) |
11 | { | 11 | { |
12 | __asm__ __volatile__( | 12 | __asm__ __volatile__( |
13 | /* | ||
14 | * ST40-300 appears to have an issue with this code, | ||
15 | * normally taking two cycles each loop, as with all | ||
16 | * other SH variants. If however the branch and the | ||
17 | * delay slot straddle an 8 byte boundary, this increases | ||
18 | * to 3 cycles. | ||
19 | * This align directive ensures this doesn't occur. | ||
20 | */ | ||
21 | ".balign 8\n\t" | ||
22 | |||
13 | "tst %0, %0\n\t" | 23 | "tst %0, %0\n\t" |
14 | "1:\t" | 24 | "1:\t" |
15 | "bf/s 1b\n\t" | 25 | "bf/s 1b\n\t" |
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index 88d3dc3d30d5..5a580ea04429 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c | |||
@@ -108,7 +108,8 @@ void copy_user_highpage(struct page *to, struct page *from, | |||
108 | kunmap_atomic(vfrom, KM_USER0); | 108 | kunmap_atomic(vfrom, KM_USER0); |
109 | } | 109 | } |
110 | 110 | ||
111 | if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) | 111 | if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) || |
112 | (vma->vm_flags & VM_EXEC)) | ||
112 | __flush_purge_region(vto, PAGE_SIZE); | 113 | __flush_purge_region(vto, PAGE_SIZE); |
113 | 114 | ||
114 | kunmap_atomic(vto, KM_USER1); | 115 | kunmap_atomic(vto, KM_USER1); |
diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c index 646aa78ba5fd..46a823882437 100644 --- a/arch/x86/boot/compressed/mkpiggy.c +++ b/arch/x86/boot/compressed/mkpiggy.c | |||
@@ -62,7 +62,12 @@ int main(int argc, char *argv[]) | |||
62 | if (fseek(f, -4L, SEEK_END)) { | 62 | if (fseek(f, -4L, SEEK_END)) { |
63 | perror(argv[1]); | 63 | perror(argv[1]); |
64 | } | 64 | } |
65 | fread(&olen, sizeof olen, 1, f); | 65 | |
66 | if (fread(&olen, sizeof(olen), 1, f) != 1) { | ||
67 | perror(argv[1]); | ||
68 | return 1; | ||
69 | } | ||
70 | |||
66 | ilen = ftell(f); | 71 | ilen = ftell(f); |
67 | olen = getle32(&olen); | 72 | olen = getle32(&olen); |
68 | fclose(f); | 73 | fclose(f); |
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 211ca3f7fd16..4ea15ca89b2b 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h | |||
@@ -88,6 +88,7 @@ extern int acpi_disabled; | |||
88 | extern int acpi_pci_disabled; | 88 | extern int acpi_pci_disabled; |
89 | extern int acpi_skip_timer_override; | 89 | extern int acpi_skip_timer_override; |
90 | extern int acpi_use_timer_override; | 90 | extern int acpi_use_timer_override; |
91 | extern int acpi_fix_pin2_polarity; | ||
91 | 92 | ||
92 | extern u8 acpi_sci_flags; | 93 | extern u8 acpi_sci_flags; |
93 | extern int acpi_sci_override_gsi; | 94 | extern int acpi_sci_override_gsi; |
diff --git a/arch/x86/include/asm/ce4100.h b/arch/x86/include/asm/ce4100.h new file mode 100644 index 000000000000..e656ad8c0a2e --- /dev/null +++ b/arch/x86/include/asm/ce4100.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef _ASM_CE4100_H_ | ||
2 | #define _ASM_CE4100_H_ | ||
3 | |||
4 | int ce4100_pci_init(void); | ||
5 | |||
6 | #endif | ||
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 4d0dfa0d998e..43a18c77676d 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
@@ -36,6 +36,11 @@ | |||
36 | #define MSR_IA32_PERFCTR1 0x000000c2 | 36 | #define MSR_IA32_PERFCTR1 0x000000c2 |
37 | #define MSR_FSB_FREQ 0x000000cd | 37 | #define MSR_FSB_FREQ 0x000000cd |
38 | 38 | ||
39 | #define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2 | ||
40 | #define NHM_C3_AUTO_DEMOTE (1UL << 25) | ||
41 | #define NHM_C1_AUTO_DEMOTE (1UL << 26) | ||
42 | #define ATM_LNC_C6_AUTO_DEMOTE (1UL << 25) | ||
43 | |||
39 | #define MSR_MTRRcap 0x000000fe | 44 | #define MSR_MTRRcap 0x000000fe |
40 | #define MSR_IA32_BBL_CR_CTL 0x00000119 | 45 | #define MSR_IA32_BBL_CR_CTL 0x00000119 |
41 | 46 | ||
diff --git a/arch/x86/include/asm/perf_event_p4.h b/arch/x86/include/asm/perf_event_p4.h index e2f6a99f14ab..cc29086e30cd 100644 --- a/arch/x86/include/asm/perf_event_p4.h +++ b/arch/x86/include/asm/perf_event_p4.h | |||
@@ -22,6 +22,7 @@ | |||
22 | 22 | ||
23 | #define ARCH_P4_CNTRVAL_BITS (40) | 23 | #define ARCH_P4_CNTRVAL_BITS (40) |
24 | #define ARCH_P4_CNTRVAL_MASK ((1ULL << ARCH_P4_CNTRVAL_BITS) - 1) | 24 | #define ARCH_P4_CNTRVAL_MASK ((1ULL << ARCH_P4_CNTRVAL_BITS) - 1) |
25 | #define ARCH_P4_UNFLAGGED_BIT ((1ULL) << (ARCH_P4_CNTRVAL_BITS - 1)) | ||
25 | 26 | ||
26 | #define P4_ESCR_EVENT_MASK 0x7e000000U | 27 | #define P4_ESCR_EVENT_MASK 0x7e000000U |
27 | #define P4_ESCR_EVENT_SHIFT 25 | 28 | #define P4_ESCR_EVENT_SHIFT 25 |
diff --git a/arch/x86/include/asm/smpboot_hooks.h b/arch/x86/include/asm/smpboot_hooks.h index 6c22bf353f26..725b77831993 100644 --- a/arch/x86/include/asm/smpboot_hooks.h +++ b/arch/x86/include/asm/smpboot_hooks.h | |||
@@ -34,7 +34,7 @@ static inline void smpboot_restore_warm_reset_vector(void) | |||
34 | */ | 34 | */ |
35 | CMOS_WRITE(0, 0xf); | 35 | CMOS_WRITE(0, 0xf); |
36 | 36 | ||
37 | *((volatile long *)phys_to_virt(apic->trampoline_phys_low)) = 0; | 37 | *((volatile u32 *)phys_to_virt(apic->trampoline_phys_low)) = 0; |
38 | } | 38 | } |
39 | 39 | ||
40 | static inline void __init smpboot_setup_io_apic(void) | 40 | static inline void __init smpboot_setup_io_apic(void) |
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h index ce1d54c8a433..3e094af443c3 100644 --- a/arch/x86/include/asm/uv/uv_bau.h +++ b/arch/x86/include/asm/uv/uv_bau.h | |||
@@ -176,7 +176,7 @@ struct bau_msg_payload { | |||
176 | struct bau_msg_header { | 176 | struct bau_msg_header { |
177 | unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */ | 177 | unsigned int dest_subnodeid:6; /* must be 0x10, for the LB */ |
178 | /* bits 5:0 */ | 178 | /* bits 5:0 */ |
179 | unsigned int base_dest_nodeid:15; /* nasid (pnode<<1) of */ | 179 | unsigned int base_dest_nodeid:15; /* nasid of the */ |
180 | /* bits 20:6 */ /* first bit in uvhub map */ | 180 | /* bits 20:6 */ /* first bit in uvhub map */ |
181 | unsigned int command:8; /* message type */ | 181 | unsigned int command:8; /* message type */ |
182 | /* bits 28:21 */ | 182 | /* bits 28:21 */ |
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h index f25bdf238a33..c61934fbf22a 100644 --- a/arch/x86/include/asm/xen/page.h +++ b/arch/x86/include/asm/xen/page.h | |||
@@ -29,8 +29,10 @@ typedef struct xpaddr { | |||
29 | 29 | ||
30 | /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ | 30 | /**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/ |
31 | #define INVALID_P2M_ENTRY (~0UL) | 31 | #define INVALID_P2M_ENTRY (~0UL) |
32 | #define FOREIGN_FRAME_BIT (1UL<<31) | 32 | #define FOREIGN_FRAME_BIT (1UL<<(BITS_PER_LONG-1)) |
33 | #define IDENTITY_FRAME_BIT (1UL<<(BITS_PER_LONG-2)) | ||
33 | #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) | 34 | #define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT) |
35 | #define IDENTITY_FRAME(m) ((m) | IDENTITY_FRAME_BIT) | ||
34 | 36 | ||
35 | /* Maximum amount of memory we can handle in a domain in pages */ | 37 | /* Maximum amount of memory we can handle in a domain in pages */ |
36 | #define MAX_DOMAIN_PAGES \ | 38 | #define MAX_DOMAIN_PAGES \ |
@@ -41,12 +43,18 @@ extern unsigned int machine_to_phys_order; | |||
41 | 43 | ||
42 | extern unsigned long get_phys_to_machine(unsigned long pfn); | 44 | extern unsigned long get_phys_to_machine(unsigned long pfn); |
43 | extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn); | 45 | extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn); |
46 | extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); | ||
47 | extern unsigned long set_phys_range_identity(unsigned long pfn_s, | ||
48 | unsigned long pfn_e); | ||
44 | 49 | ||
45 | extern int m2p_add_override(unsigned long mfn, struct page *page); | 50 | extern int m2p_add_override(unsigned long mfn, struct page *page); |
46 | extern int m2p_remove_override(struct page *page); | 51 | extern int m2p_remove_override(struct page *page); |
47 | extern struct page *m2p_find_override(unsigned long mfn); | 52 | extern struct page *m2p_find_override(unsigned long mfn); |
48 | extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); | 53 | extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn); |
49 | 54 | ||
55 | #ifdef CONFIG_XEN_DEBUG_FS | ||
56 | extern int p2m_dump_show(struct seq_file *m, void *v); | ||
57 | #endif | ||
50 | static inline unsigned long pfn_to_mfn(unsigned long pfn) | 58 | static inline unsigned long pfn_to_mfn(unsigned long pfn) |
51 | { | 59 | { |
52 | unsigned long mfn; | 60 | unsigned long mfn; |
@@ -57,7 +65,7 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn) | |||
57 | mfn = get_phys_to_machine(pfn); | 65 | mfn = get_phys_to_machine(pfn); |
58 | 66 | ||
59 | if (mfn != INVALID_P2M_ENTRY) | 67 | if (mfn != INVALID_P2M_ENTRY) |
60 | mfn &= ~FOREIGN_FRAME_BIT; | 68 | mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT); |
61 | 69 | ||
62 | return mfn; | 70 | return mfn; |
63 | } | 71 | } |
@@ -73,25 +81,44 @@ static inline int phys_to_machine_mapping_valid(unsigned long pfn) | |||
73 | static inline unsigned long mfn_to_pfn(unsigned long mfn) | 81 | static inline unsigned long mfn_to_pfn(unsigned long mfn) |
74 | { | 82 | { |
75 | unsigned long pfn; | 83 | unsigned long pfn; |
84 | int ret = 0; | ||
76 | 85 | ||
77 | if (xen_feature(XENFEAT_auto_translated_physmap)) | 86 | if (xen_feature(XENFEAT_auto_translated_physmap)) |
78 | return mfn; | 87 | return mfn; |
79 | 88 | ||
89 | if (unlikely((mfn >> machine_to_phys_order) != 0)) { | ||
90 | pfn = ~0; | ||
91 | goto try_override; | ||
92 | } | ||
80 | pfn = 0; | 93 | pfn = 0; |
81 | /* | 94 | /* |
82 | * The array access can fail (e.g., device space beyond end of RAM). | 95 | * The array access can fail (e.g., device space beyond end of RAM). |
83 | * In such cases it doesn't matter what we return (we return garbage), | 96 | * In such cases it doesn't matter what we return (we return garbage), |
84 | * but we must handle the fault without crashing! | 97 | * but we must handle the fault without crashing! |
85 | */ | 98 | */ |
86 | __get_user(pfn, &machine_to_phys_mapping[mfn]); | 99 | ret = __get_user(pfn, &machine_to_phys_mapping[mfn]); |
87 | 100 | try_override: | |
88 | /* | 101 | /* ret might be < 0 if there are no entries in the m2p for mfn */ |
89 | * If this appears to be a foreign mfn (because the pfn | 102 | if (ret < 0) |
90 | * doesn't map back to the mfn), then check the local override | 103 | pfn = ~0; |
91 | * table to see if there's a better pfn to use. | 104 | else if (get_phys_to_machine(pfn) != mfn) |
105 | /* | ||
106 | * If this appears to be a foreign mfn (because the pfn | ||
107 | * doesn't map back to the mfn), then check the local override | ||
108 | * table to see if there's a better pfn to use. | ||
109 | * | ||
110 | * m2p_find_override_pfn returns ~0 if it doesn't find anything. | ||
111 | */ | ||
112 | pfn = m2p_find_override_pfn(mfn, ~0); | ||
113 | |||
114 | /* | ||
115 | * pfn is ~0 if there are no entries in the m2p for mfn or if the | ||
116 | * entry doesn't map back to the mfn and m2p_override doesn't have a | ||
117 | * valid entry for it. | ||
92 | */ | 118 | */ |
93 | if (get_phys_to_machine(pfn) != mfn) | 119 | if (pfn == ~0 && |
94 | pfn = m2p_find_override_pfn(mfn, pfn); | 120 | get_phys_to_machine(mfn) == IDENTITY_FRAME(mfn)) |
121 | pfn = mfn; | ||
95 | 122 | ||
96 | return pfn; | 123 | return pfn; |
97 | } | 124 | } |
diff --git a/arch/x86/include/asm/xen/pci.h b/arch/x86/include/asm/xen/pci.h index 2329b3eaf8d3..aa8620989162 100644 --- a/arch/x86/include/asm/xen/pci.h +++ b/arch/x86/include/asm/xen/pci.h | |||
@@ -27,16 +27,16 @@ static inline void __init xen_setup_pirqs(void) | |||
27 | * its own functions. | 27 | * its own functions. |
28 | */ | 28 | */ |
29 | struct xen_pci_frontend_ops { | 29 | struct xen_pci_frontend_ops { |
30 | int (*enable_msi)(struct pci_dev *dev, int **vectors); | 30 | int (*enable_msi)(struct pci_dev *dev, int vectors[]); |
31 | void (*disable_msi)(struct pci_dev *dev); | 31 | void (*disable_msi)(struct pci_dev *dev); |
32 | int (*enable_msix)(struct pci_dev *dev, int **vectors, int nvec); | 32 | int (*enable_msix)(struct pci_dev *dev, int vectors[], int nvec); |
33 | void (*disable_msix)(struct pci_dev *dev); | 33 | void (*disable_msix)(struct pci_dev *dev); |
34 | }; | 34 | }; |
35 | 35 | ||
36 | extern struct xen_pci_frontend_ops *xen_pci_frontend; | 36 | extern struct xen_pci_frontend_ops *xen_pci_frontend; |
37 | 37 | ||
38 | static inline int xen_pci_frontend_enable_msi(struct pci_dev *dev, | 38 | static inline int xen_pci_frontend_enable_msi(struct pci_dev *dev, |
39 | int **vectors) | 39 | int vectors[]) |
40 | { | 40 | { |
41 | if (xen_pci_frontend && xen_pci_frontend->enable_msi) | 41 | if (xen_pci_frontend && xen_pci_frontend->enable_msi) |
42 | return xen_pci_frontend->enable_msi(dev, vectors); | 42 | return xen_pci_frontend->enable_msi(dev, vectors); |
@@ -48,7 +48,7 @@ static inline void xen_pci_frontend_disable_msi(struct pci_dev *dev) | |||
48 | xen_pci_frontend->disable_msi(dev); | 48 | xen_pci_frontend->disable_msi(dev); |
49 | } | 49 | } |
50 | static inline int xen_pci_frontend_enable_msix(struct pci_dev *dev, | 50 | static inline int xen_pci_frontend_enable_msix(struct pci_dev *dev, |
51 | int **vectors, int nvec) | 51 | int vectors[], int nvec) |
52 | { | 52 | { |
53 | if (xen_pci_frontend && xen_pci_frontend->enable_msix) | 53 | if (xen_pci_frontend && xen_pci_frontend->enable_msix) |
54 | return xen_pci_frontend->enable_msix(dev, vectors, nvec); | 54 | return xen_pci_frontend->enable_msix(dev, vectors, nvec); |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index b3a71137983a..3e6e2d68f761 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -72,6 +72,7 @@ u8 acpi_sci_flags __initdata; | |||
72 | int acpi_sci_override_gsi __initdata; | 72 | int acpi_sci_override_gsi __initdata; |
73 | int acpi_skip_timer_override __initdata; | 73 | int acpi_skip_timer_override __initdata; |
74 | int acpi_use_timer_override __initdata; | 74 | int acpi_use_timer_override __initdata; |
75 | int acpi_fix_pin2_polarity __initdata; | ||
75 | 76 | ||
76 | #ifdef CONFIG_X86_LOCAL_APIC | 77 | #ifdef CONFIG_X86_LOCAL_APIC |
77 | static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; | 78 | static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE; |
@@ -415,10 +416,15 @@ acpi_parse_int_src_ovr(struct acpi_subtable_header * header, | |||
415 | return 0; | 416 | return 0; |
416 | } | 417 | } |
417 | 418 | ||
418 | if (acpi_skip_timer_override && | 419 | if (intsrc->source_irq == 0 && intsrc->global_irq == 2) { |
419 | intsrc->source_irq == 0 && intsrc->global_irq == 2) { | 420 | if (acpi_skip_timer_override) { |
420 | printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n"); | 421 | printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n"); |
421 | return 0; | 422 | return 0; |
423 | } | ||
424 | if (acpi_fix_pin2_polarity && (intsrc->inti_flags & ACPI_MADT_POLARITY_MASK)) { | ||
425 | intsrc->inti_flags &= ~ACPI_MADT_POLARITY_MASK; | ||
426 | printk(PREFIX "BIOS IRQ0 pin2 override: forcing polarity to high active.\n"); | ||
427 | } | ||
422 | } | 428 | } |
423 | 429 | ||
424 | mp_override_legacy_irq(intsrc->source_irq, | 430 | mp_override_legacy_irq(intsrc->source_irq, |
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c index 51ef31a89be9..51d4e1663066 100644 --- a/arch/x86/kernel/apb_timer.c +++ b/arch/x86/kernel/apb_timer.c | |||
@@ -284,7 +284,7 @@ static int __init apbt_clockevent_register(void) | |||
284 | memcpy(&adev->evt, &apbt_clockevent, sizeof(struct clock_event_device)); | 284 | memcpy(&adev->evt, &apbt_clockevent, sizeof(struct clock_event_device)); |
285 | 285 | ||
286 | if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) { | 286 | if (mrst_timer_options == MRST_TIMER_LAPIC_APBT) { |
287 | apbt_clockevent.rating = APBT_CLOCKEVENT_RATING - 100; | 287 | adev->evt.rating = APBT_CLOCKEVENT_RATING - 100; |
288 | global_clock_event = &adev->evt; | 288 | global_clock_event = &adev->evt; |
289 | printk(KERN_DEBUG "%s clockevent registered as global\n", | 289 | printk(KERN_DEBUG "%s clockevent registered as global\n", |
290 | global_clock_event->name); | 290 | global_clock_event->name); |
diff --git a/arch/x86/kernel/check.c b/arch/x86/kernel/check.c index 13a389179514..452932d34730 100644 --- a/arch/x86/kernel/check.c +++ b/arch/x86/kernel/check.c | |||
@@ -106,8 +106,8 @@ void __init setup_bios_corruption_check(void) | |||
106 | addr += size; | 106 | addr += size; |
107 | } | 107 | } |
108 | 108 | ||
109 | printk(KERN_INFO "Scanning %d areas for low memory corruption\n", | 109 | if (num_scan_areas) |
110 | num_scan_areas); | 110 | printk(KERN_INFO "Scanning %d areas for low memory corruption\n", num_scan_areas); |
111 | } | 111 | } |
112 | 112 | ||
113 | 113 | ||
@@ -143,12 +143,12 @@ static void check_corruption(struct work_struct *dummy) | |||
143 | { | 143 | { |
144 | check_for_bios_corruption(); | 144 | check_for_bios_corruption(); |
145 | schedule_delayed_work(&bios_check_work, | 145 | schedule_delayed_work(&bios_check_work, |
146 | round_jiffies_relative(corruption_check_period*HZ)); | 146 | round_jiffies_relative(corruption_check_period*HZ)); |
147 | } | 147 | } |
148 | 148 | ||
149 | static int start_periodic_check_for_corruption(void) | 149 | static int start_periodic_check_for_corruption(void) |
150 | { | 150 | { |
151 | if (!memory_corruption_check || corruption_check_period == 0) | 151 | if (!num_scan_areas || !memory_corruption_check || corruption_check_period == 0) |
152 | return 0; | 152 | return 0; |
153 | 153 | ||
154 | printk(KERN_INFO "Scanning for low memory corruption every %d seconds\n", | 154 | printk(KERN_INFO "Scanning for low memory corruption every %d seconds\n", |
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index bd1cac747f67..52c93648e492 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | |||
@@ -158,9 +158,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) | |||
158 | { | 158 | { |
159 | if (c->x86 == 0x06) { | 159 | if (c->x86 == 0x06) { |
160 | if (cpu_has(c, X86_FEATURE_EST)) | 160 | if (cpu_has(c, X86_FEATURE_EST)) |
161 | printk(KERN_WARNING PFX "Warning: EST-capable CPU " | 161 | printk_once(KERN_WARNING PFX "Warning: EST-capable " |
162 | "detected. The acpi-cpufreq module offers " | 162 | "CPU detected. The acpi-cpufreq module offers " |
163 | "voltage scaling in addition of frequency " | 163 | "voltage scaling in addition to frequency " |
164 | "scaling. You should use that instead of " | 164 | "scaling. You should use that instead of " |
165 | "p4-clockmod, if possible.\n"); | 165 | "p4-clockmod, if possible.\n"); |
166 | switch (c->x86_model) { | 166 | switch (c->x86_model) { |
diff --git a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c index 4f6f679f2799..4a5a42b842ad 100644 --- a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c | |||
@@ -195,7 +195,7 @@ static unsigned int pcc_get_freq(unsigned int cpu) | |||
195 | cmd_incomplete: | 195 | cmd_incomplete: |
196 | iowrite16(0, &pcch_hdr->status); | 196 | iowrite16(0, &pcch_hdr->status); |
197 | spin_unlock(&pcc_lock); | 197 | spin_unlock(&pcc_lock); |
198 | return -EINVAL; | 198 | return 0; |
199 | } | 199 | } |
200 | 200 | ||
201 | static int pcc_cpufreq_target(struct cpufreq_policy *policy, | 201 | static int pcc_cpufreq_target(struct cpufreq_policy *policy, |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 35c7e65e59be..c567dec854f6 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -1537,6 +1537,7 @@ static struct notifier_block cpb_nb = { | |||
1537 | static int __cpuinit powernowk8_init(void) | 1537 | static int __cpuinit powernowk8_init(void) |
1538 | { | 1538 | { |
1539 | unsigned int i, supported_cpus = 0, cpu; | 1539 | unsigned int i, supported_cpus = 0, cpu; |
1540 | int rv; | ||
1540 | 1541 | ||
1541 | for_each_online_cpu(i) { | 1542 | for_each_online_cpu(i) { |
1542 | int rc; | 1543 | int rc; |
@@ -1555,14 +1556,14 @@ static int __cpuinit powernowk8_init(void) | |||
1555 | 1556 | ||
1556 | cpb_capable = true; | 1557 | cpb_capable = true; |
1557 | 1558 | ||
1558 | register_cpu_notifier(&cpb_nb); | ||
1559 | |||
1560 | msrs = msrs_alloc(); | 1559 | msrs = msrs_alloc(); |
1561 | if (!msrs) { | 1560 | if (!msrs) { |
1562 | printk(KERN_ERR "%s: Error allocating msrs!\n", __func__); | 1561 | printk(KERN_ERR "%s: Error allocating msrs!\n", __func__); |
1563 | return -ENOMEM; | 1562 | return -ENOMEM; |
1564 | } | 1563 | } |
1565 | 1564 | ||
1565 | register_cpu_notifier(&cpb_nb); | ||
1566 | |||
1566 | rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs); | 1567 | rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs); |
1567 | 1568 | ||
1568 | for_each_cpu(cpu, cpu_online_mask) { | 1569 | for_each_cpu(cpu, cpu_online_mask) { |
@@ -1574,7 +1575,13 @@ static int __cpuinit powernowk8_init(void) | |||
1574 | (cpb_enabled ? "on" : "off")); | 1575 | (cpb_enabled ? "on" : "off")); |
1575 | } | 1576 | } |
1576 | 1577 | ||
1577 | return cpufreq_register_driver(&cpufreq_amd64_driver); | 1578 | rv = cpufreq_register_driver(&cpufreq_amd64_driver); |
1579 | if (rv < 0 && boot_cpu_has(X86_FEATURE_CPB)) { | ||
1580 | unregister_cpu_notifier(&cpb_nb); | ||
1581 | msrs_free(msrs); | ||
1582 | msrs = NULL; | ||
1583 | } | ||
1584 | return rv; | ||
1578 | } | 1585 | } |
1579 | 1586 | ||
1580 | /* driver entry point for term */ | 1587 | /* driver entry point for term */ |
diff --git a/arch/x86/kernel/cpu/perf_event_p4.c b/arch/x86/kernel/cpu/perf_event_p4.c index f7a0993c1e7c..ff751a9f182b 100644 --- a/arch/x86/kernel/cpu/perf_event_p4.c +++ b/arch/x86/kernel/cpu/perf_event_p4.c | |||
@@ -770,9 +770,14 @@ static inline int p4_pmu_clear_cccr_ovf(struct hw_perf_event *hwc) | |||
770 | return 1; | 770 | return 1; |
771 | } | 771 | } |
772 | 772 | ||
773 | /* it might be unflagged overflow */ | 773 | /* |
774 | rdmsrl(hwc->event_base + hwc->idx, v); | 774 | * In some circumstances the overflow might issue an NMI but did |
775 | if (!(v & ARCH_P4_CNTRVAL_MASK)) | 775 | * not set P4_CCCR_OVF bit. Because a counter holds a negative value |
776 | * we simply check for high bit being set, if it's cleared it means | ||
777 | * the counter has reached zero value and continued counting before | ||
778 | * real NMI signal was received: | ||
779 | */ | ||
780 | if (!(v & ARCH_P4_UNFLAGGED_BIT)) | ||
776 | return 1; | 781 | return 1; |
777 | 782 | ||
778 | return 0; | 783 | return 0; |
diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 76b8cd953dee..9efbdcc56425 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c | |||
@@ -143,15 +143,10 @@ static void __init ati_bugs(int num, int slot, int func) | |||
143 | 143 | ||
144 | static u32 __init ati_sbx00_rev(int num, int slot, int func) | 144 | static u32 __init ati_sbx00_rev(int num, int slot, int func) |
145 | { | 145 | { |
146 | u32 old, d; | 146 | u32 d; |
147 | 147 | ||
148 | d = read_pci_config(num, slot, func, 0x70); | ||
149 | old = d; | ||
150 | d &= ~(1<<8); | ||
151 | write_pci_config(num, slot, func, 0x70, d); | ||
152 | d = read_pci_config(num, slot, func, 0x8); | 148 | d = read_pci_config(num, slot, func, 0x8); |
153 | d &= 0xff; | 149 | d &= 0xff; |
154 | write_pci_config(num, slot, func, 0x70, old); | ||
155 | 150 | ||
156 | return d; | 151 | return d; |
157 | } | 152 | } |
@@ -160,13 +155,16 @@ static void __init ati_bugs_contd(int num, int slot, int func) | |||
160 | { | 155 | { |
161 | u32 d, rev; | 156 | u32 d, rev; |
162 | 157 | ||
163 | if (acpi_use_timer_override) | ||
164 | return; | ||
165 | |||
166 | rev = ati_sbx00_rev(num, slot, func); | 158 | rev = ati_sbx00_rev(num, slot, func); |
159 | if (rev >= 0x40) | ||
160 | acpi_fix_pin2_polarity = 1; | ||
161 | |||
167 | if (rev > 0x13) | 162 | if (rev > 0x13) |
168 | return; | 163 | return; |
169 | 164 | ||
165 | if (acpi_use_timer_override) | ||
166 | return; | ||
167 | |||
170 | /* check for IRQ0 interrupt swap */ | 168 | /* check for IRQ0 interrupt swap */ |
171 | d = read_pci_config(num, slot, func, 0x64); | 169 | d = read_pci_config(num, slot, func, 0x64); |
172 | if (!(d & (1<<14))) | 170 | if (!(d & (1<<14))) |
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index fc7aae1e2bc7..715037caeb43 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -285,6 +285,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { | |||
285 | DMI_MATCH(DMI_BOARD_NAME, "P4S800"), | 285 | DMI_MATCH(DMI_BOARD_NAME, "P4S800"), |
286 | }, | 286 | }, |
287 | }, | 287 | }, |
288 | { /* Handle problems with rebooting on VersaLogic Menlow boards */ | ||
289 | .callback = set_bios_reboot, | ||
290 | .ident = "VersaLogic Menlow based board", | ||
291 | .matches = { | ||
292 | DMI_MATCH(DMI_BOARD_VENDOR, "VersaLogic Corporation"), | ||
293 | DMI_MATCH(DMI_BOARD_NAME, "VersaLogic Menlow board"), | ||
294 | }, | ||
295 | }, | ||
288 | { } | 296 | { } |
289 | }; | 297 | }; |
290 | 298 | ||
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 54ce246a383e..63fec1531e89 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -2777,6 +2777,8 @@ static int dr_interception(struct vcpu_svm *svm) | |||
2777 | kvm_register_write(&svm->vcpu, reg, val); | 2777 | kvm_register_write(&svm->vcpu, reg, val); |
2778 | } | 2778 | } |
2779 | 2779 | ||
2780 | skip_emulated_instruction(&svm->vcpu); | ||
2781 | |||
2780 | return 1; | 2782 | return 1; |
2781 | } | 2783 | } |
2782 | 2784 | ||
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 7d90ceb882a4..20e3f8702d1e 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c | |||
@@ -229,15 +229,14 @@ void vmalloc_sync_all(void) | |||
229 | for (address = VMALLOC_START & PMD_MASK; | 229 | for (address = VMALLOC_START & PMD_MASK; |
230 | address >= TASK_SIZE && address < FIXADDR_TOP; | 230 | address >= TASK_SIZE && address < FIXADDR_TOP; |
231 | address += PMD_SIZE) { | 231 | address += PMD_SIZE) { |
232 | |||
233 | unsigned long flags; | ||
234 | struct page *page; | 232 | struct page *page; |
235 | 233 | ||
236 | spin_lock_irqsave(&pgd_lock, flags); | 234 | spin_lock(&pgd_lock); |
237 | list_for_each_entry(page, &pgd_list, lru) { | 235 | list_for_each_entry(page, &pgd_list, lru) { |
238 | spinlock_t *pgt_lock; | 236 | spinlock_t *pgt_lock; |
239 | pmd_t *ret; | 237 | pmd_t *ret; |
240 | 238 | ||
239 | /* the pgt_lock only for Xen */ | ||
241 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; | 240 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; |
242 | 241 | ||
243 | spin_lock(pgt_lock); | 242 | spin_lock(pgt_lock); |
@@ -247,7 +246,7 @@ void vmalloc_sync_all(void) | |||
247 | if (!ret) | 246 | if (!ret) |
248 | break; | 247 | break; |
249 | } | 248 | } |
250 | spin_unlock_irqrestore(&pgd_lock, flags); | 249 | spin_unlock(&pgd_lock); |
251 | } | 250 | } |
252 | } | 251 | } |
253 | 252 | ||
@@ -828,6 +827,13 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code, | |||
828 | unsigned long address, unsigned int fault) | 827 | unsigned long address, unsigned int fault) |
829 | { | 828 | { |
830 | if (fault & VM_FAULT_OOM) { | 829 | if (fault & VM_FAULT_OOM) { |
830 | /* Kernel mode? Handle exceptions or die: */ | ||
831 | if (!(error_code & PF_USER)) { | ||
832 | up_read(¤t->mm->mmap_sem); | ||
833 | no_context(regs, error_code, address); | ||
834 | return; | ||
835 | } | ||
836 | |||
831 | out_of_memory(regs, error_code, address); | 837 | out_of_memory(regs, error_code, address); |
832 | } else { | 838 | } else { |
833 | if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| | 839 | if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON| |
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 71a59296af80..c14a5422e152 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c | |||
@@ -105,18 +105,18 @@ void sync_global_pgds(unsigned long start, unsigned long end) | |||
105 | 105 | ||
106 | for (address = start; address <= end; address += PGDIR_SIZE) { | 106 | for (address = start; address <= end; address += PGDIR_SIZE) { |
107 | const pgd_t *pgd_ref = pgd_offset_k(address); | 107 | const pgd_t *pgd_ref = pgd_offset_k(address); |
108 | unsigned long flags; | ||
109 | struct page *page; | 108 | struct page *page; |
110 | 109 | ||
111 | if (pgd_none(*pgd_ref)) | 110 | if (pgd_none(*pgd_ref)) |
112 | continue; | 111 | continue; |
113 | 112 | ||
114 | spin_lock_irqsave(&pgd_lock, flags); | 113 | spin_lock(&pgd_lock); |
115 | list_for_each_entry(page, &pgd_list, lru) { | 114 | list_for_each_entry(page, &pgd_list, lru) { |
116 | pgd_t *pgd; | 115 | pgd_t *pgd; |
117 | spinlock_t *pgt_lock; | 116 | spinlock_t *pgt_lock; |
118 | 117 | ||
119 | pgd = (pgd_t *)page_address(page) + pgd_index(address); | 118 | pgd = (pgd_t *)page_address(page) + pgd_index(address); |
119 | /* the pgt_lock only for Xen */ | ||
120 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; | 120 | pgt_lock = &pgd_page_get_mm(page)->page_table_lock; |
121 | spin_lock(pgt_lock); | 121 | spin_lock(pgt_lock); |
122 | 122 | ||
@@ -128,7 +128,7 @@ void sync_global_pgds(unsigned long start, unsigned long end) | |||
128 | 128 | ||
129 | spin_unlock(pgt_lock); | 129 | spin_unlock(pgt_lock); |
130 | } | 130 | } |
131 | spin_unlock_irqrestore(&pgd_lock, flags); | 131 | spin_unlock(&pgd_lock); |
132 | } | 132 | } |
133 | } | 133 | } |
134 | 134 | ||
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 95ea1551eebc..1337c51b07d7 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c | |||
@@ -780,11 +780,7 @@ void __cpuinit numa_add_cpu(int cpu) | |||
780 | int physnid; | 780 | int physnid; |
781 | int nid = NUMA_NO_NODE; | 781 | int nid = NUMA_NO_NODE; |
782 | 782 | ||
783 | apicid = early_per_cpu(x86_cpu_to_apicid, cpu); | 783 | nid = early_cpu_to_node(cpu); |
784 | if (apicid != BAD_APICID) | ||
785 | nid = apicid_to_node[apicid]; | ||
786 | if (nid == NUMA_NO_NODE) | ||
787 | nid = early_cpu_to_node(cpu); | ||
788 | BUG_ON(nid == NUMA_NO_NODE || !node_online(nid)); | 784 | BUG_ON(nid == NUMA_NO_NODE || !node_online(nid)); |
789 | 785 | ||
790 | /* | 786 | /* |
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c index d343b3c81f3c..90825f2eb0f4 100644 --- a/arch/x86/mm/pageattr.c +++ b/arch/x86/mm/pageattr.c | |||
@@ -57,12 +57,10 @@ static unsigned long direct_pages_count[PG_LEVEL_NUM]; | |||
57 | 57 | ||
58 | void update_page_count(int level, unsigned long pages) | 58 | void update_page_count(int level, unsigned long pages) |
59 | { | 59 | { |
60 | unsigned long flags; | ||
61 | |||
62 | /* Protect against CPA */ | 60 | /* Protect against CPA */ |
63 | spin_lock_irqsave(&pgd_lock, flags); | 61 | spin_lock(&pgd_lock); |
64 | direct_pages_count[level] += pages; | 62 | direct_pages_count[level] += pages; |
65 | spin_unlock_irqrestore(&pgd_lock, flags); | 63 | spin_unlock(&pgd_lock); |
66 | } | 64 | } |
67 | 65 | ||
68 | static void split_page_count(int level) | 66 | static void split_page_count(int level) |
@@ -394,7 +392,7 @@ static int | |||
394 | try_preserve_large_page(pte_t *kpte, unsigned long address, | 392 | try_preserve_large_page(pte_t *kpte, unsigned long address, |
395 | struct cpa_data *cpa) | 393 | struct cpa_data *cpa) |
396 | { | 394 | { |
397 | unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn; | 395 | unsigned long nextpage_addr, numpages, pmask, psize, addr, pfn; |
398 | pte_t new_pte, old_pte, *tmp; | 396 | pte_t new_pte, old_pte, *tmp; |
399 | pgprot_t old_prot, new_prot, req_prot; | 397 | pgprot_t old_prot, new_prot, req_prot; |
400 | int i, do_split = 1; | 398 | int i, do_split = 1; |
@@ -403,7 +401,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, | |||
403 | if (cpa->force_split) | 401 | if (cpa->force_split) |
404 | return 1; | 402 | return 1; |
405 | 403 | ||
406 | spin_lock_irqsave(&pgd_lock, flags); | 404 | spin_lock(&pgd_lock); |
407 | /* | 405 | /* |
408 | * Check for races, another CPU might have split this page | 406 | * Check for races, another CPU might have split this page |
409 | * up already: | 407 | * up already: |
@@ -498,14 +496,14 @@ try_preserve_large_page(pte_t *kpte, unsigned long address, | |||
498 | } | 496 | } |
499 | 497 | ||
500 | out_unlock: | 498 | out_unlock: |
501 | spin_unlock_irqrestore(&pgd_lock, flags); | 499 | spin_unlock(&pgd_lock); |
502 | 500 | ||
503 | return do_split; | 501 | return do_split; |
504 | } | 502 | } |
505 | 503 | ||
506 | static int split_large_page(pte_t *kpte, unsigned long address) | 504 | static int split_large_page(pte_t *kpte, unsigned long address) |
507 | { | 505 | { |
508 | unsigned long flags, pfn, pfninc = 1; | 506 | unsigned long pfn, pfninc = 1; |
509 | unsigned int i, level; | 507 | unsigned int i, level; |
510 | pte_t *pbase, *tmp; | 508 | pte_t *pbase, *tmp; |
511 | pgprot_t ref_prot; | 509 | pgprot_t ref_prot; |
@@ -519,7 +517,7 @@ static int split_large_page(pte_t *kpte, unsigned long address) | |||
519 | if (!base) | 517 | if (!base) |
520 | return -ENOMEM; | 518 | return -ENOMEM; |
521 | 519 | ||
522 | spin_lock_irqsave(&pgd_lock, flags); | 520 | spin_lock(&pgd_lock); |
523 | /* | 521 | /* |
524 | * Check for races, another CPU might have split this page | 522 | * Check for races, another CPU might have split this page |
525 | * up for us already: | 523 | * up for us already: |
@@ -591,7 +589,7 @@ out_unlock: | |||
591 | */ | 589 | */ |
592 | if (base) | 590 | if (base) |
593 | __free_page(base); | 591 | __free_page(base); |
594 | spin_unlock_irqrestore(&pgd_lock, flags); | 592 | spin_unlock(&pgd_lock); |
595 | 593 | ||
596 | return 0; | 594 | return 0; |
597 | } | 595 | } |
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c index 500242d3c96d..0113d19c8aa6 100644 --- a/arch/x86/mm/pgtable.c +++ b/arch/x86/mm/pgtable.c | |||
@@ -121,14 +121,12 @@ static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) | |||
121 | 121 | ||
122 | static void pgd_dtor(pgd_t *pgd) | 122 | static void pgd_dtor(pgd_t *pgd) |
123 | { | 123 | { |
124 | unsigned long flags; /* can be called from interrupt context */ | ||
125 | |||
126 | if (SHARED_KERNEL_PMD) | 124 | if (SHARED_KERNEL_PMD) |
127 | return; | 125 | return; |
128 | 126 | ||
129 | spin_lock_irqsave(&pgd_lock, flags); | 127 | spin_lock(&pgd_lock); |
130 | pgd_list_del(pgd); | 128 | pgd_list_del(pgd); |
131 | spin_unlock_irqrestore(&pgd_lock, flags); | 129 | spin_unlock(&pgd_lock); |
132 | } | 130 | } |
133 | 131 | ||
134 | /* | 132 | /* |
@@ -260,7 +258,6 @@ pgd_t *pgd_alloc(struct mm_struct *mm) | |||
260 | { | 258 | { |
261 | pgd_t *pgd; | 259 | pgd_t *pgd; |
262 | pmd_t *pmds[PREALLOCATED_PMDS]; | 260 | pmd_t *pmds[PREALLOCATED_PMDS]; |
263 | unsigned long flags; | ||
264 | 261 | ||
265 | pgd = (pgd_t *)__get_free_page(PGALLOC_GFP); | 262 | pgd = (pgd_t *)__get_free_page(PGALLOC_GFP); |
266 | 263 | ||
@@ -280,12 +277,12 @@ pgd_t *pgd_alloc(struct mm_struct *mm) | |||
280 | * respect to anything walking the pgd_list, so that they | 277 | * respect to anything walking the pgd_list, so that they |
281 | * never see a partially populated pgd. | 278 | * never see a partially populated pgd. |
282 | */ | 279 | */ |
283 | spin_lock_irqsave(&pgd_lock, flags); | 280 | spin_lock(&pgd_lock); |
284 | 281 | ||
285 | pgd_ctor(mm, pgd); | 282 | pgd_ctor(mm, pgd); |
286 | pgd_prepopulate_pmd(mm, pgd, pmds); | 283 | pgd_prepopulate_pmd(mm, pgd, pmds); |
287 | 284 | ||
288 | spin_unlock_irqrestore(&pgd_lock, flags); | 285 | spin_unlock(&pgd_lock); |
289 | 286 | ||
290 | return pgd; | 287 | return pgd; |
291 | 288 | ||
diff --git a/arch/x86/pci/ce4100.c b/arch/x86/pci/ce4100.c index 85b68ef5e809..9260b3eb18d4 100644 --- a/arch/x86/pci/ce4100.c +++ b/arch/x86/pci/ce4100.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/pci.h> | 34 | #include <linux/pci.h> |
35 | #include <linux/init.h> | 35 | #include <linux/init.h> |
36 | 36 | ||
37 | #include <asm/ce4100.h> | ||
37 | #include <asm/pci_x86.h> | 38 | #include <asm/pci_x86.h> |
38 | 39 | ||
39 | struct sim_reg { | 40 | struct sim_reg { |
@@ -306,10 +307,10 @@ struct pci_raw_ops ce4100_pci_conf = { | |||
306 | .write = ce4100_conf_write, | 307 | .write = ce4100_conf_write, |
307 | }; | 308 | }; |
308 | 309 | ||
309 | static int __init ce4100_pci_init(void) | 310 | int __init ce4100_pci_init(void) |
310 | { | 311 | { |
311 | init_sim_regs(); | 312 | init_sim_regs(); |
312 | raw_pci_ops = &ce4100_pci_conf; | 313 | raw_pci_ops = &ce4100_pci_conf; |
313 | return 0; | 314 | /* Indicate caller that it should invoke pci_legacy_init() */ |
315 | return 1; | ||
314 | } | 316 | } |
315 | subsys_initcall(ce4100_pci_init); | ||
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 25cd4a07d09f..8c4085a95ef1 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c | |||
@@ -20,7 +20,8 @@ | |||
20 | #include <asm/xen/pci.h> | 20 | #include <asm/xen/pci.h> |
21 | 21 | ||
22 | #ifdef CONFIG_ACPI | 22 | #ifdef CONFIG_ACPI |
23 | static int xen_hvm_register_pirq(u32 gsi, int triggering) | 23 | static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi, |
24 | int trigger, int polarity) | ||
24 | { | 25 | { |
25 | int rc, irq; | 26 | int rc, irq; |
26 | struct physdev_map_pirq map_irq; | 27 | struct physdev_map_pirq map_irq; |
@@ -41,7 +42,7 @@ static int xen_hvm_register_pirq(u32 gsi, int triggering) | |||
41 | return -1; | 42 | return -1; |
42 | } | 43 | } |
43 | 44 | ||
44 | if (triggering == ACPI_EDGE_SENSITIVE) { | 45 | if (trigger == ACPI_EDGE_SENSITIVE) { |
45 | shareable = 0; | 46 | shareable = 0; |
46 | name = "ioapic-edge"; | 47 | name = "ioapic-edge"; |
47 | } else { | 48 | } else { |
@@ -55,12 +56,6 @@ static int xen_hvm_register_pirq(u32 gsi, int triggering) | |||
55 | 56 | ||
56 | return irq; | 57 | return irq; |
57 | } | 58 | } |
58 | |||
59 | static int acpi_register_gsi_xen_hvm(struct device *dev, u32 gsi, | ||
60 | int trigger, int polarity) | ||
61 | { | ||
62 | return xen_hvm_register_pirq(gsi, trigger); | ||
63 | } | ||
64 | #endif | 59 | #endif |
65 | 60 | ||
66 | #if defined(CONFIG_PCI_MSI) | 61 | #if defined(CONFIG_PCI_MSI) |
@@ -91,7 +86,7 @@ static void xen_msi_compose_msg(struct pci_dev *pdev, unsigned int pirq, | |||
91 | 86 | ||
92 | static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | 87 | static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) |
93 | { | 88 | { |
94 | int irq, pirq, ret = 0; | 89 | int irq, pirq; |
95 | struct msi_desc *msidesc; | 90 | struct msi_desc *msidesc; |
96 | struct msi_msg msg; | 91 | struct msi_msg msg; |
97 | 92 | ||
@@ -99,39 +94,32 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | |||
99 | __read_msi_msg(msidesc, &msg); | 94 | __read_msi_msg(msidesc, &msg); |
100 | pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) | | 95 | pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) | |
101 | ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff); | 96 | ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff); |
102 | if (xen_irq_from_pirq(pirq) >= 0 && msg.data == XEN_PIRQ_MSI_DATA) { | 97 | if (msg.data != XEN_PIRQ_MSI_DATA || |
103 | xen_allocate_pirq_msi((type == PCI_CAP_ID_MSIX) ? | 98 | xen_irq_from_pirq(pirq) < 0) { |
104 | "msi-x" : "msi", &irq, &pirq, XEN_ALLOC_IRQ); | 99 | pirq = xen_allocate_pirq_msi(dev, msidesc); |
105 | if (irq < 0) | 100 | if (pirq < 0) |
106 | goto error; | 101 | goto error; |
107 | ret = set_irq_msi(irq, msidesc); | 102 | xen_msi_compose_msg(dev, pirq, &msg); |
108 | if (ret < 0) | 103 | __write_msi_msg(msidesc, &msg); |
109 | goto error_while; | 104 | dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq); |
110 | printk(KERN_DEBUG "xen: msi already setup: msi --> irq=%d" | 105 | } else { |
111 | " pirq=%d\n", irq, pirq); | 106 | dev_dbg(&dev->dev, |
112 | return 0; | 107 | "xen: msi already bound to pirq=%d\n", pirq); |
113 | } | 108 | } |
114 | xen_allocate_pirq_msi((type == PCI_CAP_ID_MSIX) ? | 109 | irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, 0, |
115 | "msi-x" : "msi", &irq, &pirq, (XEN_ALLOC_IRQ | XEN_ALLOC_PIRQ)); | 110 | (type == PCI_CAP_ID_MSIX) ? |
116 | if (irq < 0 || pirq < 0) | 111 | "msi-x" : "msi"); |
112 | if (irq < 0) | ||
117 | goto error; | 113 | goto error; |
118 | printk(KERN_DEBUG "xen: msi --> irq=%d, pirq=%d\n", irq, pirq); | 114 | dev_dbg(&dev->dev, |
119 | xen_msi_compose_msg(dev, pirq, &msg); | 115 | "xen: msi --> pirq=%d --> irq=%d\n", pirq, irq); |
120 | ret = set_irq_msi(irq, msidesc); | ||
121 | if (ret < 0) | ||
122 | goto error_while; | ||
123 | write_msi_msg(irq, &msg); | ||
124 | } | 116 | } |
125 | return 0; | 117 | return 0; |
126 | 118 | ||
127 | error_while: | ||
128 | unbind_from_irqhandler(irq, NULL); | ||
129 | error: | 119 | error: |
130 | if (ret == -ENODEV) | 120 | dev_err(&dev->dev, |
131 | dev_err(&dev->dev, "Xen PCI frontend has not registered" \ | 121 | "Xen PCI frontend has not registered MSI/MSI-X support!\n"); |
132 | " MSI/MSI-X support!\n"); | 122 | return -ENODEV; |
133 | |||
134 | return ret; | ||
135 | } | 123 | } |
136 | 124 | ||
137 | /* | 125 | /* |
@@ -150,35 +138,26 @@ static int xen_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | |||
150 | return -ENOMEM; | 138 | return -ENOMEM; |
151 | 139 | ||
152 | if (type == PCI_CAP_ID_MSIX) | 140 | if (type == PCI_CAP_ID_MSIX) |
153 | ret = xen_pci_frontend_enable_msix(dev, &v, nvec); | 141 | ret = xen_pci_frontend_enable_msix(dev, v, nvec); |
154 | else | 142 | else |
155 | ret = xen_pci_frontend_enable_msi(dev, &v); | 143 | ret = xen_pci_frontend_enable_msi(dev, v); |
156 | if (ret) | 144 | if (ret) |
157 | goto error; | 145 | goto error; |
158 | i = 0; | 146 | i = 0; |
159 | list_for_each_entry(msidesc, &dev->msi_list, list) { | 147 | list_for_each_entry(msidesc, &dev->msi_list, list) { |
160 | irq = xen_allocate_pirq(v[i], 0, /* not sharable */ | 148 | irq = xen_bind_pirq_msi_to_irq(dev, msidesc, v[i], 0, |
161 | (type == PCI_CAP_ID_MSIX) ? | 149 | (type == PCI_CAP_ID_MSIX) ? |
162 | "pcifront-msi-x" : "pcifront-msi"); | 150 | "pcifront-msi-x" : |
163 | if (irq < 0) { | 151 | "pcifront-msi"); |
164 | ret = -1; | 152 | if (irq < 0) |
165 | goto free; | 153 | goto free; |
166 | } | ||
167 | |||
168 | ret = set_irq_msi(irq, msidesc); | ||
169 | if (ret) | ||
170 | goto error_while; | ||
171 | i++; | 154 | i++; |
172 | } | 155 | } |
173 | kfree(v); | 156 | kfree(v); |
174 | return 0; | 157 | return 0; |
175 | 158 | ||
176 | error_while: | ||
177 | unbind_from_irqhandler(irq, NULL); | ||
178 | error: | 159 | error: |
179 | if (ret == -ENODEV) | 160 | dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n"); |
180 | dev_err(&dev->dev, "Xen PCI frontend has not registered" \ | ||
181 | " MSI/MSI-X support!\n"); | ||
182 | free: | 161 | free: |
183 | kfree(v); | 162 | kfree(v); |
184 | return ret; | 163 | return ret; |
@@ -193,6 +172,9 @@ static void xen_teardown_msi_irqs(struct pci_dev *dev) | |||
193 | xen_pci_frontend_disable_msix(dev); | 172 | xen_pci_frontend_disable_msix(dev); |
194 | else | 173 | else |
195 | xen_pci_frontend_disable_msi(dev); | 174 | xen_pci_frontend_disable_msi(dev); |
175 | |||
176 | /* Free the IRQ's and the msidesc using the generic code. */ | ||
177 | default_teardown_msi_irqs(dev); | ||
196 | } | 178 | } |
197 | 179 | ||
198 | static void xen_teardown_msi_irq(unsigned int irq) | 180 | static void xen_teardown_msi_irq(unsigned int irq) |
@@ -200,47 +182,82 @@ static void xen_teardown_msi_irq(unsigned int irq) | |||
200 | xen_destroy_irq(irq); | 182 | xen_destroy_irq(irq); |
201 | } | 183 | } |
202 | 184 | ||
185 | #ifdef CONFIG_XEN_DOM0 | ||
203 | static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | 186 | static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) |
204 | { | 187 | { |
205 | int irq, ret; | 188 | int ret = 0; |
206 | struct msi_desc *msidesc; | 189 | struct msi_desc *msidesc; |
207 | 190 | ||
208 | list_for_each_entry(msidesc, &dev->msi_list, list) { | 191 | list_for_each_entry(msidesc, &dev->msi_list, list) { |
209 | irq = xen_create_msi_irq(dev, msidesc, type); | 192 | struct physdev_map_pirq map_irq; |
210 | if (irq < 0) | ||
211 | return -1; | ||
212 | 193 | ||
213 | ret = set_irq_msi(irq, msidesc); | 194 | memset(&map_irq, 0, sizeof(map_irq)); |
214 | if (ret) | 195 | map_irq.domid = DOMID_SELF; |
215 | goto error; | 196 | map_irq.type = MAP_PIRQ_TYPE_MSI; |
216 | } | 197 | map_irq.index = -1; |
217 | return 0; | 198 | map_irq.pirq = -1; |
199 | map_irq.bus = dev->bus->number; | ||
200 | map_irq.devfn = dev->devfn; | ||
218 | 201 | ||
219 | error: | 202 | if (type == PCI_CAP_ID_MSIX) { |
220 | xen_destroy_irq(irq); | 203 | int pos; |
204 | u32 table_offset, bir; | ||
205 | |||
206 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); | ||
207 | |||
208 | pci_read_config_dword(dev, pos + PCI_MSIX_TABLE, | ||
209 | &table_offset); | ||
210 | bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); | ||
211 | |||
212 | map_irq.table_base = pci_resource_start(dev, bir); | ||
213 | map_irq.entry_nr = msidesc->msi_attrib.entry_nr; | ||
214 | } | ||
215 | |||
216 | ret = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); | ||
217 | if (ret) { | ||
218 | dev_warn(&dev->dev, "xen map irq failed %d\n", ret); | ||
219 | goto out; | ||
220 | } | ||
221 | |||
222 | ret = xen_bind_pirq_msi_to_irq(dev, msidesc, | ||
223 | map_irq.pirq, map_irq.index, | ||
224 | (type == PCI_CAP_ID_MSIX) ? | ||
225 | "msi-x" : "msi"); | ||
226 | if (ret < 0) | ||
227 | goto out; | ||
228 | } | ||
229 | ret = 0; | ||
230 | out: | ||
221 | return ret; | 231 | return ret; |
222 | } | 232 | } |
223 | #endif | 233 | #endif |
234 | #endif | ||
224 | 235 | ||
225 | static int xen_pcifront_enable_irq(struct pci_dev *dev) | 236 | static int xen_pcifront_enable_irq(struct pci_dev *dev) |
226 | { | 237 | { |
227 | int rc; | 238 | int rc; |
228 | int share = 1; | 239 | int share = 1; |
240 | u8 gsi; | ||
229 | 241 | ||
230 | dev_info(&dev->dev, "Xen PCI enabling IRQ: %d\n", dev->irq); | 242 | rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi); |
231 | 243 | if (rc < 0) { | |
232 | if (dev->irq < 0) | 244 | dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n", |
233 | return -EINVAL; | 245 | rc); |
246 | return rc; | ||
247 | } | ||
234 | 248 | ||
235 | if (dev->irq < NR_IRQS_LEGACY) | 249 | if (gsi < NR_IRQS_LEGACY) |
236 | share = 0; | 250 | share = 0; |
237 | 251 | ||
238 | rc = xen_allocate_pirq(dev->irq, share, "pcifront"); | 252 | rc = xen_allocate_pirq(gsi, share, "pcifront"); |
239 | if (rc < 0) { | 253 | if (rc < 0) { |
240 | dev_warn(&dev->dev, "Xen PCI IRQ: %d, failed to register:%d\n", | 254 | dev_warn(&dev->dev, "Xen PCI: failed to register GSI%d: %d\n", |
241 | dev->irq, rc); | 255 | gsi, rc); |
242 | return rc; | 256 | return rc; |
243 | } | 257 | } |
258 | |||
259 | dev->irq = rc; | ||
260 | dev_info(&dev->dev, "Xen PCI mapped GSI%d to IRQ%d\n", gsi, dev->irq); | ||
244 | return 0; | 261 | return 0; |
245 | } | 262 | } |
246 | 263 | ||
diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c index d2c0d51a7178..cd6f184c3b3f 100644 --- a/arch/x86/platform/ce4100/ce4100.c +++ b/arch/x86/platform/ce4100/ce4100.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/serial_reg.h> | 15 | #include <linux/serial_reg.h> |
16 | #include <linux/serial_8250.h> | 16 | #include <linux/serial_8250.h> |
17 | 17 | ||
18 | #include <asm/ce4100.h> | ||
18 | #include <asm/setup.h> | 19 | #include <asm/setup.h> |
19 | #include <asm/io.h> | 20 | #include <asm/io.h> |
20 | 21 | ||
@@ -129,4 +130,5 @@ void __init x86_ce4100_early_setup(void) | |||
129 | x86_init.resources.probe_roms = x86_init_noop; | 130 | x86_init.resources.probe_roms = x86_init_noop; |
130 | x86_init.mpparse.get_smp_config = x86_init_uint_noop; | 131 | x86_init.mpparse.get_smp_config = x86_init_uint_noop; |
131 | x86_init.mpparse.find_smp_config = sdv_find_smp_config; | 132 | x86_init.mpparse.find_smp_config = sdv_find_smp_config; |
133 | x86_init.pci.init = ce4100_pci_init; | ||
132 | } | 134 | } |
diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c index dab874647530..044bda5b3174 100644 --- a/arch/x86/platform/olpc/olpc_dt.c +++ b/arch/x86/platform/olpc/olpc_dt.c | |||
@@ -140,8 +140,7 @@ void * __init prom_early_alloc(unsigned long size) | |||
140 | * wasted bootmem) and hand off chunks of it to callers. | 140 | * wasted bootmem) and hand off chunks of it to callers. |
141 | */ | 141 | */ |
142 | res = alloc_bootmem(chunk_size); | 142 | res = alloc_bootmem(chunk_size); |
143 | if (!res) | 143 | BUG_ON(!res); |
144 | return NULL; | ||
145 | prom_early_allocated += chunk_size; | 144 | prom_early_allocated += chunk_size; |
146 | memset(res, 0, chunk_size); | 145 | memset(res, 0, chunk_size); |
147 | free_mem = chunk_size; | 146 | free_mem = chunk_size; |
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index df58e9cad96a..a7b38d35c29a 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c | |||
@@ -1364,11 +1364,11 @@ uv_activation_descriptor_init(int node, int pnode) | |||
1364 | memset(bd2, 0, sizeof(struct bau_desc)); | 1364 | memset(bd2, 0, sizeof(struct bau_desc)); |
1365 | bd2->header.sw_ack_flag = 1; | 1365 | bd2->header.sw_ack_flag = 1; |
1366 | /* | 1366 | /* |
1367 | * base_dest_nodeid is the nasid (pnode<<1) of the first uvhub | 1367 | * base_dest_nodeid is the nasid of the first uvhub |
1368 | * in the partition. The bit map will indicate uvhub numbers, | 1368 | * in the partition. The bit map will indicate uvhub numbers, |
1369 | * which are 0-N in a partition. Pnodes are unique system-wide. | 1369 | * which are 0-N in a partition. Pnodes are unique system-wide. |
1370 | */ | 1370 | */ |
1371 | bd2->header.base_dest_nodeid = uv_partition_base_pnode << 1; | 1371 | bd2->header.base_dest_nodeid = UV_PNODE_TO_NASID(uv_partition_base_pnode); |
1372 | bd2->header.dest_subnodeid = 0x10; /* the LB */ | 1372 | bd2->header.dest_subnodeid = 0x10; /* the LB */ |
1373 | bd2->header.command = UV_NET_ENDPOINT_INTD; | 1373 | bd2->header.command = UV_NET_ENDPOINT_INTD; |
1374 | bd2->header.int_both = 1; | 1374 | bd2->header.int_both = 1; |
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig index 5b54892e4bc3..e4343fe488ed 100644 --- a/arch/x86/xen/Kconfig +++ b/arch/x86/xen/Kconfig | |||
@@ -48,3 +48,11 @@ config XEN_DEBUG_FS | |||
48 | help | 48 | help |
49 | Enable statistics output and various tuning options in debugfs. | 49 | Enable statistics output and various tuning options in debugfs. |
50 | Enabling this option may incur a significant performance overhead. | 50 | Enabling this option may incur a significant performance overhead. |
51 | |||
52 | config XEN_DEBUG | ||
53 | bool "Enable Xen debug checks" | ||
54 | depends on XEN | ||
55 | default n | ||
56 | help | ||
57 | Enable various WARN_ON checks in the Xen MMU code. | ||
58 | Enabling this option WILL incur a significant performance overhead. | ||
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index fe02574789c5..49dbd78ec3cb 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -1291,7 +1291,7 @@ static int init_hvm_pv_info(int *major, int *minor) | |||
1291 | return 0; | 1291 | return 0; |
1292 | } | 1292 | } |
1293 | 1293 | ||
1294 | void xen_hvm_init_shared_info(void) | 1294 | void __ref xen_hvm_init_shared_info(void) |
1295 | { | 1295 | { |
1296 | int cpu; | 1296 | int cpu; |
1297 | struct xen_add_to_physmap xatp; | 1297 | struct xen_add_to_physmap xatp; |
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 5e92b61ad574..832765c0fb8c 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include <linux/module.h> | 46 | #include <linux/module.h> |
47 | #include <linux/gfp.h> | 47 | #include <linux/gfp.h> |
48 | #include <linux/memblock.h> | 48 | #include <linux/memblock.h> |
49 | #include <linux/seq_file.h> | ||
49 | 50 | ||
50 | #include <asm/pgtable.h> | 51 | #include <asm/pgtable.h> |
51 | #include <asm/tlbflush.h> | 52 | #include <asm/tlbflush.h> |
@@ -416,8 +417,12 @@ static pteval_t pte_pfn_to_mfn(pteval_t val) | |||
416 | if (val & _PAGE_PRESENT) { | 417 | if (val & _PAGE_PRESENT) { |
417 | unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; | 418 | unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; |
418 | pteval_t flags = val & PTE_FLAGS_MASK; | 419 | pteval_t flags = val & PTE_FLAGS_MASK; |
419 | unsigned long mfn = pfn_to_mfn(pfn); | 420 | unsigned long mfn; |
420 | 421 | ||
422 | if (!xen_feature(XENFEAT_auto_translated_physmap)) | ||
423 | mfn = get_phys_to_machine(pfn); | ||
424 | else | ||
425 | mfn = pfn; | ||
421 | /* | 426 | /* |
422 | * If there's no mfn for the pfn, then just create an | 427 | * If there's no mfn for the pfn, then just create an |
423 | * empty non-present pte. Unfortunately this loses | 428 | * empty non-present pte. Unfortunately this loses |
@@ -427,8 +432,18 @@ static pteval_t pte_pfn_to_mfn(pteval_t val) | |||
427 | if (unlikely(mfn == INVALID_P2M_ENTRY)) { | 432 | if (unlikely(mfn == INVALID_P2M_ENTRY)) { |
428 | mfn = 0; | 433 | mfn = 0; |
429 | flags = 0; | 434 | flags = 0; |
435 | } else { | ||
436 | /* | ||
437 | * Paramount to do this test _after_ the | ||
438 | * INVALID_P2M_ENTRY as INVALID_P2M_ENTRY & | ||
439 | * IDENTITY_FRAME_BIT resolves to true. | ||
440 | */ | ||
441 | mfn &= ~FOREIGN_FRAME_BIT; | ||
442 | if (mfn & IDENTITY_FRAME_BIT) { | ||
443 | mfn &= ~IDENTITY_FRAME_BIT; | ||
444 | flags |= _PAGE_IOMAP; | ||
445 | } | ||
430 | } | 446 | } |
431 | |||
432 | val = ((pteval_t)mfn << PAGE_SHIFT) | flags; | 447 | val = ((pteval_t)mfn << PAGE_SHIFT) | flags; |
433 | } | 448 | } |
434 | 449 | ||
@@ -532,6 +547,41 @@ pte_t xen_make_pte(pteval_t pte) | |||
532 | } | 547 | } |
533 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte); | 548 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte); |
534 | 549 | ||
550 | #ifdef CONFIG_XEN_DEBUG | ||
551 | pte_t xen_make_pte_debug(pteval_t pte) | ||
552 | { | ||
553 | phys_addr_t addr = (pte & PTE_PFN_MASK); | ||
554 | phys_addr_t other_addr; | ||
555 | bool io_page = false; | ||
556 | pte_t _pte; | ||
557 | |||
558 | if (pte & _PAGE_IOMAP) | ||
559 | io_page = true; | ||
560 | |||
561 | _pte = xen_make_pte(pte); | ||
562 | |||
563 | if (!addr) | ||
564 | return _pte; | ||
565 | |||
566 | if (io_page && | ||
567 | (xen_initial_domain() || addr >= ISA_END_ADDRESS)) { | ||
568 | other_addr = pfn_to_mfn(addr >> PAGE_SHIFT) << PAGE_SHIFT; | ||
569 | WARN(addr != other_addr, | ||
570 | "0x%lx is using VM_IO, but it is 0x%lx!\n", | ||
571 | (unsigned long)addr, (unsigned long)other_addr); | ||
572 | } else { | ||
573 | pteval_t iomap_set = (_pte.pte & PTE_FLAGS_MASK) & _PAGE_IOMAP; | ||
574 | other_addr = (_pte.pte & PTE_PFN_MASK); | ||
575 | WARN((addr == other_addr) && (!io_page) && (!iomap_set), | ||
576 | "0x%lx is missing VM_IO (and wasn't fixed)!\n", | ||
577 | (unsigned long)addr); | ||
578 | } | ||
579 | |||
580 | return _pte; | ||
581 | } | ||
582 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_debug); | ||
583 | #endif | ||
584 | |||
535 | pgd_t xen_make_pgd(pgdval_t pgd) | 585 | pgd_t xen_make_pgd(pgdval_t pgd) |
536 | { | 586 | { |
537 | pgd = pte_pfn_to_mfn(pgd); | 587 | pgd = pte_pfn_to_mfn(pgd); |
@@ -986,10 +1036,9 @@ static void xen_pgd_pin(struct mm_struct *mm) | |||
986 | */ | 1036 | */ |
987 | void xen_mm_pin_all(void) | 1037 | void xen_mm_pin_all(void) |
988 | { | 1038 | { |
989 | unsigned long flags; | ||
990 | struct page *page; | 1039 | struct page *page; |
991 | 1040 | ||
992 | spin_lock_irqsave(&pgd_lock, flags); | 1041 | spin_lock(&pgd_lock); |
993 | 1042 | ||
994 | list_for_each_entry(page, &pgd_list, lru) { | 1043 | list_for_each_entry(page, &pgd_list, lru) { |
995 | if (!PagePinned(page)) { | 1044 | if (!PagePinned(page)) { |
@@ -998,7 +1047,7 @@ void xen_mm_pin_all(void) | |||
998 | } | 1047 | } |
999 | } | 1048 | } |
1000 | 1049 | ||
1001 | spin_unlock_irqrestore(&pgd_lock, flags); | 1050 | spin_unlock(&pgd_lock); |
1002 | } | 1051 | } |
1003 | 1052 | ||
1004 | /* | 1053 | /* |
@@ -1099,10 +1148,9 @@ static void xen_pgd_unpin(struct mm_struct *mm) | |||
1099 | */ | 1148 | */ |
1100 | void xen_mm_unpin_all(void) | 1149 | void xen_mm_unpin_all(void) |
1101 | { | 1150 | { |
1102 | unsigned long flags; | ||
1103 | struct page *page; | 1151 | struct page *page; |
1104 | 1152 | ||
1105 | spin_lock_irqsave(&pgd_lock, flags); | 1153 | spin_lock(&pgd_lock); |
1106 | 1154 | ||
1107 | list_for_each_entry(page, &pgd_list, lru) { | 1155 | list_for_each_entry(page, &pgd_list, lru) { |
1108 | if (PageSavePinned(page)) { | 1156 | if (PageSavePinned(page)) { |
@@ -1112,7 +1160,7 @@ void xen_mm_unpin_all(void) | |||
1112 | } | 1160 | } |
1113 | } | 1161 | } |
1114 | 1162 | ||
1115 | spin_unlock_irqrestore(&pgd_lock, flags); | 1163 | spin_unlock(&pgd_lock); |
1116 | } | 1164 | } |
1117 | 1165 | ||
1118 | void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) | 1166 | void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) |
@@ -1942,6 +1990,9 @@ __init void xen_ident_map_ISA(void) | |||
1942 | 1990 | ||
1943 | static __init void xen_post_allocator_init(void) | 1991 | static __init void xen_post_allocator_init(void) |
1944 | { | 1992 | { |
1993 | #ifdef CONFIG_XEN_DEBUG | ||
1994 | pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug); | ||
1995 | #endif | ||
1945 | pv_mmu_ops.set_pte = xen_set_pte; | 1996 | pv_mmu_ops.set_pte = xen_set_pte; |
1946 | pv_mmu_ops.set_pmd = xen_set_pmd; | 1997 | pv_mmu_ops.set_pmd = xen_set_pmd; |
1947 | pv_mmu_ops.set_pud = xen_set_pud; | 1998 | pv_mmu_ops.set_pud = xen_set_pud; |
@@ -2074,7 +2125,7 @@ static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order, | |||
2074 | in_frames[i] = virt_to_mfn(vaddr); | 2125 | in_frames[i] = virt_to_mfn(vaddr); |
2075 | 2126 | ||
2076 | MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0); | 2127 | MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0); |
2077 | set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY); | 2128 | __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY); |
2078 | 2129 | ||
2079 | if (out_frames) | 2130 | if (out_frames) |
2080 | out_frames[i] = virt_to_pfn(vaddr); | 2131 | out_frames[i] = virt_to_pfn(vaddr); |
@@ -2353,6 +2404,18 @@ EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); | |||
2353 | 2404 | ||
2354 | #ifdef CONFIG_XEN_DEBUG_FS | 2405 | #ifdef CONFIG_XEN_DEBUG_FS |
2355 | 2406 | ||
2407 | static int p2m_dump_open(struct inode *inode, struct file *filp) | ||
2408 | { | ||
2409 | return single_open(filp, p2m_dump_show, NULL); | ||
2410 | } | ||
2411 | |||
2412 | static const struct file_operations p2m_dump_fops = { | ||
2413 | .open = p2m_dump_open, | ||
2414 | .read = seq_read, | ||
2415 | .llseek = seq_lseek, | ||
2416 | .release = single_release, | ||
2417 | }; | ||
2418 | |||
2356 | static struct dentry *d_mmu_debug; | 2419 | static struct dentry *d_mmu_debug; |
2357 | 2420 | ||
2358 | static int __init xen_mmu_debugfs(void) | 2421 | static int __init xen_mmu_debugfs(void) |
@@ -2408,6 +2471,7 @@ static int __init xen_mmu_debugfs(void) | |||
2408 | debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug, | 2471 | debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug, |
2409 | &mmu_stats.prot_commit_batched); | 2472 | &mmu_stats.prot_commit_batched); |
2410 | 2473 | ||
2474 | debugfs_create_file("p2m", 0600, d_mmu_debug, NULL, &p2m_dump_fops); | ||
2411 | return 0; | 2475 | return 0; |
2412 | } | 2476 | } |
2413 | fs_initcall(xen_mmu_debugfs); | 2477 | fs_initcall(xen_mmu_debugfs); |
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index fd12d7ce7ff9..215a3ce61068 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c | |||
@@ -23,6 +23,129 @@ | |||
23 | * P2M_PER_PAGE depends on the architecture, as a mfn is always | 23 | * P2M_PER_PAGE depends on the architecture, as a mfn is always |
24 | * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to | 24 | * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to |
25 | * 512 and 1024 entries respectively. | 25 | * 512 and 1024 entries respectively. |
26 | * | ||
27 | * In short, these structures contain the Machine Frame Number (MFN) of the PFN. | ||
28 | * | ||
29 | * However not all entries are filled with MFNs. Specifically for all other | ||
30 | * leaf entries, or for the top root, or middle one, for which there is a void | ||
31 | * entry, we assume it is "missing". So (for example) | ||
32 | * pfn_to_mfn(0x90909090)=INVALID_P2M_ENTRY. | ||
33 | * | ||
34 | * We also have the possibility of setting 1-1 mappings on certain regions, so | ||
35 | * that: | ||
36 | * pfn_to_mfn(0xc0000)=0xc0000 | ||
37 | * | ||
38 | * The benefit of this is, that we can assume for non-RAM regions (think | ||
39 | * PCI BARs, or ACPI spaces), we can create mappings easily b/c we | ||
40 | * get the PFN value to match the MFN. | ||
41 | * | ||
42 | * For this to work efficiently we have one new page p2m_identity and | ||
43 | * allocate (via reserved_brk) any other pages we need to cover the sides | ||
44 | * (1GB or 4MB boundary violations). All entries in p2m_identity are set to | ||
45 | * INVALID_P2M_ENTRY type (Xen toolstack only recognizes that and MFNs, | ||
46 | * no other fancy value). | ||
47 | * | ||
48 | * On lookup we spot that the entry points to p2m_identity and return the | ||
49 | * identity value instead of dereferencing and returning INVALID_P2M_ENTRY. | ||
50 | * If the entry points to an allocated page, we just proceed as before and | ||
51 | * return the PFN. If the PFN has IDENTITY_FRAME_BIT set we unmask that in | ||
52 | * appropriate functions (pfn_to_mfn). | ||
53 | * | ||
54 | * The reason for having the IDENTITY_FRAME_BIT instead of just returning the | ||
55 | * PFN is that we could find ourselves where pfn_to_mfn(pfn)==pfn for a | ||
56 | * non-identity pfn. To protect ourselves against we elect to set (and get) the | ||
57 | * IDENTITY_FRAME_BIT on all identity mapped PFNs. | ||
58 | * | ||
59 | * This simplistic diagram is used to explain the more subtle piece of code. | ||
60 | * There is also a digram of the P2M at the end that can help. | ||
61 | * Imagine your E820 looking as so: | ||
62 | * | ||
63 | * 1GB 2GB | ||
64 | * /-------------------+---------\/----\ /----------\ /---+-----\ | ||
65 | * | System RAM | Sys RAM ||ACPI| | reserved | | Sys RAM | | ||
66 | * \-------------------+---------/\----/ \----------/ \---+-----/ | ||
67 | * ^- 1029MB ^- 2001MB | ||
68 | * | ||
69 | * [1029MB = 263424 (0x40500), 2001MB = 512256 (0x7D100), | ||
70 | * 2048MB = 524288 (0x80000)] | ||
71 | * | ||
72 | * And dom0_mem=max:3GB,1GB is passed in to the guest, meaning memory past 1GB | ||
73 | * is actually not present (would have to kick the balloon driver to put it in). | ||
74 | * | ||
75 | * When we are told to set the PFNs for identity mapping (see patch: "xen/setup: | ||
76 | * Set identity mapping for non-RAM E820 and E820 gaps.") we pass in the start | ||
77 | * of the PFN and the end PFN (263424 and 512256 respectively). The first step | ||
78 | * is to reserve_brk a top leaf page if the p2m[1] is missing. The top leaf page | ||
79 | * covers 512^2 of page estate (1GB) and in case the start or end PFN is not | ||
80 | * aligned on 512^2*PAGE_SIZE (1GB) we loop on aligned 1GB PFNs from start pfn | ||
81 | * to end pfn. We reserve_brk top leaf pages if they are missing (means they | ||
82 | * point to p2m_mid_missing). | ||
83 | * | ||
84 | * With the E820 example above, 263424 is not 1GB aligned so we allocate a | ||
85 | * reserve_brk page which will cover the PFNs estate from 0x40000 to 0x80000. | ||
86 | * Each entry in the allocate page is "missing" (points to p2m_missing). | ||
87 | * | ||
88 | * Next stage is to determine if we need to do a more granular boundary check | ||
89 | * on the 4MB (or 2MB depending on architecture) off the start and end pfn's. | ||
90 | * We check if the start pfn and end pfn violate that boundary check, and if | ||
91 | * so reserve_brk a middle (p2m[x][y]) leaf page. This way we have a much finer | ||
92 | * granularity of setting which PFNs are missing and which ones are identity. | ||
93 | * In our example 263424 and 512256 both fail the check so we reserve_brk two | ||
94 | * pages. Populate them with INVALID_P2M_ENTRY (so they both have "missing" | ||
95 | * values) and assign them to p2m[1][2] and p2m[1][488] respectively. | ||
96 | * | ||
97 | * At this point we would at minimum reserve_brk one page, but could be up to | ||
98 | * three. Each call to set_phys_range_identity has at maximum a three page | ||
99 | * cost. If we were to query the P2M at this stage, all those entries from | ||
100 | * start PFN through end PFN (so 1029MB -> 2001MB) would return | ||
101 | * INVALID_P2M_ENTRY ("missing"). | ||
102 | * | ||
103 | * The next step is to walk from the start pfn to the end pfn setting | ||
104 | * the IDENTITY_FRAME_BIT on each PFN. This is done in set_phys_range_identity. | ||
105 | * If we find that the middle leaf is pointing to p2m_missing we can swap it | ||
106 | * over to p2m_identity - this way covering 4MB (or 2MB) PFN space. At this | ||
107 | * point we do not need to worry about boundary aligment (so no need to | ||
108 | * reserve_brk a middle page, figure out which PFNs are "missing" and which | ||
109 | * ones are identity), as that has been done earlier. If we find that the | ||
110 | * middle leaf is not occupied by p2m_identity or p2m_missing, we dereference | ||
111 | * that page (which covers 512 PFNs) and set the appropriate PFN with | ||
112 | * IDENTITY_FRAME_BIT. In our example 263424 and 512256 end up there, and we | ||
113 | * set from p2m[1][2][256->511] and p2m[1][488][0->256] with | ||
114 | * IDENTITY_FRAME_BIT set. | ||
115 | * | ||
116 | * All other regions that are void (or not filled) either point to p2m_missing | ||
117 | * (considered missing) or have the default value of INVALID_P2M_ENTRY (also | ||
118 | * considered missing). In our case, p2m[1][2][0->255] and p2m[1][488][257->511] | ||
119 | * contain the INVALID_P2M_ENTRY value and are considered "missing." | ||
120 | * | ||
121 | * This is what the p2m ends up looking (for the E820 above) with this | ||
122 | * fabulous drawing: | ||
123 | * | ||
124 | * p2m /--------------\ | ||
125 | * /-----\ | &mfn_list[0],| /-----------------\ | ||
126 | * | 0 |------>| &mfn_list[1],| /---------------\ | ~0, ~0, .. | | ||
127 | * |-----| | ..., ~0, ~0 | | ~0, ~0, [x]---+----->| IDENTITY [@256] | | ||
128 | * | 1 |---\ \--------------/ | [p2m_identity]+\ | IDENTITY [@257] | | ||
129 | * |-----| \ | [p2m_identity]+\\ | .... | | ||
130 | * | 2 |--\ \-------------------->| ... | \\ \----------------/ | ||
131 | * |-----| \ \---------------/ \\ | ||
132 | * | 3 |\ \ \\ p2m_identity | ||
133 | * |-----| \ \-------------------->/---------------\ /-----------------\ | ||
134 | * | .. +->+ | [p2m_identity]+-->| ~0, ~0, ~0, ... | | ||
135 | * \-----/ / | [p2m_identity]+-->| ..., ~0 | | ||
136 | * / /---------------\ | .... | \-----------------/ | ||
137 | * / | IDENTITY[@0] | /-+-[x], ~0, ~0.. | | ||
138 | * / | IDENTITY[@256]|<----/ \---------------/ | ||
139 | * / | ~0, ~0, .... | | ||
140 | * | \---------------/ | ||
141 | * | | ||
142 | * p2m_missing p2m_missing | ||
143 | * /------------------\ /------------\ | ||
144 | * | [p2m_mid_missing]+---->| ~0, ~0, ~0 | | ||
145 | * | [p2m_mid_missing]+---->| ..., ~0 | | ||
146 | * \------------------/ \------------/ | ||
147 | * | ||
148 | * where ~0 is INVALID_P2M_ENTRY. IDENTITY is (PFN | IDENTITY_BIT) | ||
26 | */ | 149 | */ |
27 | 150 | ||
28 | #include <linux/init.h> | 151 | #include <linux/init.h> |
@@ -30,6 +153,7 @@ | |||
30 | #include <linux/list.h> | 153 | #include <linux/list.h> |
31 | #include <linux/hash.h> | 154 | #include <linux/hash.h> |
32 | #include <linux/sched.h> | 155 | #include <linux/sched.h> |
156 | #include <linux/seq_file.h> | ||
33 | 157 | ||
34 | #include <asm/cache.h> | 158 | #include <asm/cache.h> |
35 | #include <asm/setup.h> | 159 | #include <asm/setup.h> |
@@ -59,9 +183,15 @@ static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE); | |||
59 | static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE); | 183 | static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE); |
60 | static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE); | 184 | static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE); |
61 | 185 | ||
186 | static RESERVE_BRK_ARRAY(unsigned long, p2m_identity, P2M_PER_PAGE); | ||
187 | |||
62 | RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); | 188 | RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); |
63 | RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); | 189 | RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); |
64 | 190 | ||
191 | /* We might hit two boundary violations at the start and end, at max each | ||
192 | * boundary violation will require three middle nodes. */ | ||
193 | RESERVE_BRK(p2m_mid_identity, PAGE_SIZE * 2 * 3); | ||
194 | |||
65 | static inline unsigned p2m_top_index(unsigned long pfn) | 195 | static inline unsigned p2m_top_index(unsigned long pfn) |
66 | { | 196 | { |
67 | BUG_ON(pfn >= MAX_P2M_PFN); | 197 | BUG_ON(pfn >= MAX_P2M_PFN); |
@@ -136,7 +266,7 @@ static void p2m_init(unsigned long *p2m) | |||
136 | * - After resume we're called from within stop_machine, but the mfn | 266 | * - After resume we're called from within stop_machine, but the mfn |
137 | * tree should alreay be completely allocated. | 267 | * tree should alreay be completely allocated. |
138 | */ | 268 | */ |
139 | void xen_build_mfn_list_list(void) | 269 | void __ref xen_build_mfn_list_list(void) |
140 | { | 270 | { |
141 | unsigned long pfn; | 271 | unsigned long pfn; |
142 | 272 | ||
@@ -221,6 +351,9 @@ void __init xen_build_dynamic_phys_to_machine(void) | |||
221 | p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE); | 351 | p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE); |
222 | p2m_top_init(p2m_top); | 352 | p2m_top_init(p2m_top); |
223 | 353 | ||
354 | p2m_identity = extend_brk(PAGE_SIZE, PAGE_SIZE); | ||
355 | p2m_init(p2m_identity); | ||
356 | |||
224 | /* | 357 | /* |
225 | * The domain builder gives us a pre-constructed p2m array in | 358 | * The domain builder gives us a pre-constructed p2m array in |
226 | * mfn_list for all the pages initially given to us, so we just | 359 | * mfn_list for all the pages initially given to us, so we just |
@@ -266,6 +399,14 @@ unsigned long get_phys_to_machine(unsigned long pfn) | |||
266 | mididx = p2m_mid_index(pfn); | 399 | mididx = p2m_mid_index(pfn); |
267 | idx = p2m_index(pfn); | 400 | idx = p2m_index(pfn); |
268 | 401 | ||
402 | /* | ||
403 | * The INVALID_P2M_ENTRY is filled in both p2m_*identity | ||
404 | * and in p2m_*missing, so returning the INVALID_P2M_ENTRY | ||
405 | * would be wrong. | ||
406 | */ | ||
407 | if (p2m_top[topidx][mididx] == p2m_identity) | ||
408 | return IDENTITY_FRAME(pfn); | ||
409 | |||
269 | return p2m_top[topidx][mididx][idx]; | 410 | return p2m_top[topidx][mididx][idx]; |
270 | } | 411 | } |
271 | EXPORT_SYMBOL_GPL(get_phys_to_machine); | 412 | EXPORT_SYMBOL_GPL(get_phys_to_machine); |
@@ -335,9 +476,11 @@ static bool alloc_p2m(unsigned long pfn) | |||
335 | p2m_top_mfn_p[topidx] = mid_mfn; | 476 | p2m_top_mfn_p[topidx] = mid_mfn; |
336 | } | 477 | } |
337 | 478 | ||
338 | if (p2m_top[topidx][mididx] == p2m_missing) { | 479 | if (p2m_top[topidx][mididx] == p2m_identity || |
480 | p2m_top[topidx][mididx] == p2m_missing) { | ||
339 | /* p2m leaf page is missing */ | 481 | /* p2m leaf page is missing */ |
340 | unsigned long *p2m; | 482 | unsigned long *p2m; |
483 | unsigned long *p2m_orig = p2m_top[topidx][mididx]; | ||
341 | 484 | ||
342 | p2m = alloc_p2m_page(); | 485 | p2m = alloc_p2m_page(); |
343 | if (!p2m) | 486 | if (!p2m) |
@@ -345,7 +488,7 @@ static bool alloc_p2m(unsigned long pfn) | |||
345 | 488 | ||
346 | p2m_init(p2m); | 489 | p2m_init(p2m); |
347 | 490 | ||
348 | if (cmpxchg(&mid[mididx], p2m_missing, p2m) != p2m_missing) | 491 | if (cmpxchg(&mid[mididx], p2m_orig, p2m) != p2m_orig) |
349 | free_p2m_page(p2m); | 492 | free_p2m_page(p2m); |
350 | else | 493 | else |
351 | mid_mfn[mididx] = virt_to_mfn(p2m); | 494 | mid_mfn[mididx] = virt_to_mfn(p2m); |
@@ -354,11 +497,91 @@ static bool alloc_p2m(unsigned long pfn) | |||
354 | return true; | 497 | return true; |
355 | } | 498 | } |
356 | 499 | ||
500 | bool __early_alloc_p2m(unsigned long pfn) | ||
501 | { | ||
502 | unsigned topidx, mididx, idx; | ||
503 | |||
504 | topidx = p2m_top_index(pfn); | ||
505 | mididx = p2m_mid_index(pfn); | ||
506 | idx = p2m_index(pfn); | ||
507 | |||
508 | /* Pfff.. No boundary cross-over, lets get out. */ | ||
509 | if (!idx) | ||
510 | return false; | ||
511 | |||
512 | WARN(p2m_top[topidx][mididx] == p2m_identity, | ||
513 | "P2M[%d][%d] == IDENTITY, should be MISSING (or alloced)!\n", | ||
514 | topidx, mididx); | ||
515 | |||
516 | /* | ||
517 | * Could be done by xen_build_dynamic_phys_to_machine.. | ||
518 | */ | ||
519 | if (p2m_top[topidx][mididx] != p2m_missing) | ||
520 | return false; | ||
521 | |||
522 | /* Boundary cross-over for the edges: */ | ||
523 | if (idx) { | ||
524 | unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE); | ||
525 | |||
526 | p2m_init(p2m); | ||
527 | |||
528 | p2m_top[topidx][mididx] = p2m; | ||
529 | |||
530 | } | ||
531 | return idx != 0; | ||
532 | } | ||
533 | unsigned long set_phys_range_identity(unsigned long pfn_s, | ||
534 | unsigned long pfn_e) | ||
535 | { | ||
536 | unsigned long pfn; | ||
537 | |||
538 | if (unlikely(pfn_s >= MAX_P2M_PFN || pfn_e >= MAX_P2M_PFN)) | ||
539 | return 0; | ||
540 | |||
541 | if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) | ||
542 | return pfn_e - pfn_s; | ||
543 | |||
544 | if (pfn_s > pfn_e) | ||
545 | return 0; | ||
546 | |||
547 | for (pfn = (pfn_s & ~(P2M_MID_PER_PAGE * P2M_PER_PAGE - 1)); | ||
548 | pfn < ALIGN(pfn_e, (P2M_MID_PER_PAGE * P2M_PER_PAGE)); | ||
549 | pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE) | ||
550 | { | ||
551 | unsigned topidx = p2m_top_index(pfn); | ||
552 | if (p2m_top[topidx] == p2m_mid_missing) { | ||
553 | unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE); | ||
554 | |||
555 | p2m_mid_init(mid); | ||
556 | |||
557 | p2m_top[topidx] = mid; | ||
558 | } | ||
559 | } | ||
560 | |||
561 | __early_alloc_p2m(pfn_s); | ||
562 | __early_alloc_p2m(pfn_e); | ||
563 | |||
564 | for (pfn = pfn_s; pfn < pfn_e; pfn++) | ||
565 | if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn))) | ||
566 | break; | ||
567 | |||
568 | if (!WARN((pfn - pfn_s) != (pfn_e - pfn_s), | ||
569 | "Identity mapping failed. We are %ld short of 1-1 mappings!\n", | ||
570 | (pfn_e - pfn_s) - (pfn - pfn_s))) | ||
571 | printk(KERN_DEBUG "1-1 mapping on %lx->%lx\n", pfn_s, pfn); | ||
572 | |||
573 | return pfn - pfn_s; | ||
574 | } | ||
575 | |||
357 | /* Try to install p2m mapping; fail if intermediate bits missing */ | 576 | /* Try to install p2m mapping; fail if intermediate bits missing */ |
358 | bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) | 577 | bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) |
359 | { | 578 | { |
360 | unsigned topidx, mididx, idx; | 579 | unsigned topidx, mididx, idx; |
361 | 580 | ||
581 | if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { | ||
582 | BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); | ||
583 | return true; | ||
584 | } | ||
362 | if (unlikely(pfn >= MAX_P2M_PFN)) { | 585 | if (unlikely(pfn >= MAX_P2M_PFN)) { |
363 | BUG_ON(mfn != INVALID_P2M_ENTRY); | 586 | BUG_ON(mfn != INVALID_P2M_ENTRY); |
364 | return true; | 587 | return true; |
@@ -368,6 +591,21 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) | |||
368 | mididx = p2m_mid_index(pfn); | 591 | mididx = p2m_mid_index(pfn); |
369 | idx = p2m_index(pfn); | 592 | idx = p2m_index(pfn); |
370 | 593 | ||
594 | /* For sparse holes were the p2m leaf has real PFN along with | ||
595 | * PCI holes, stick in the PFN as the MFN value. | ||
596 | */ | ||
597 | if (mfn != INVALID_P2M_ENTRY && (mfn & IDENTITY_FRAME_BIT)) { | ||
598 | if (p2m_top[topidx][mididx] == p2m_identity) | ||
599 | return true; | ||
600 | |||
601 | /* Swap over from MISSING to IDENTITY if needed. */ | ||
602 | if (p2m_top[topidx][mididx] == p2m_missing) { | ||
603 | WARN_ON(cmpxchg(&p2m_top[topidx][mididx], p2m_missing, | ||
604 | p2m_identity) != p2m_missing); | ||
605 | return true; | ||
606 | } | ||
607 | } | ||
608 | |||
371 | if (p2m_top[topidx][mididx] == p2m_missing) | 609 | if (p2m_top[topidx][mididx] == p2m_missing) |
372 | return mfn == INVALID_P2M_ENTRY; | 610 | return mfn == INVALID_P2M_ENTRY; |
373 | 611 | ||
@@ -378,11 +616,6 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) | |||
378 | 616 | ||
379 | bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) | 617 | bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) |
380 | { | 618 | { |
381 | if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { | ||
382 | BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); | ||
383 | return true; | ||
384 | } | ||
385 | |||
386 | if (unlikely(!__set_phys_to_machine(pfn, mfn))) { | 619 | if (unlikely(!__set_phys_to_machine(pfn, mfn))) { |
387 | if (!alloc_p2m(pfn)) | 620 | if (!alloc_p2m(pfn)) |
388 | return false; | 621 | return false; |
@@ -421,7 +654,7 @@ int m2p_add_override(unsigned long mfn, struct page *page) | |||
421 | { | 654 | { |
422 | unsigned long flags; | 655 | unsigned long flags; |
423 | unsigned long pfn; | 656 | unsigned long pfn; |
424 | unsigned long address; | 657 | unsigned long uninitialized_var(address); |
425 | unsigned level; | 658 | unsigned level; |
426 | pte_t *ptep = NULL; | 659 | pte_t *ptep = NULL; |
427 | 660 | ||
@@ -455,7 +688,7 @@ int m2p_remove_override(struct page *page) | |||
455 | unsigned long flags; | 688 | unsigned long flags; |
456 | unsigned long mfn; | 689 | unsigned long mfn; |
457 | unsigned long pfn; | 690 | unsigned long pfn; |
458 | unsigned long address; | 691 | unsigned long uninitialized_var(address); |
459 | unsigned level; | 692 | unsigned level; |
460 | pte_t *ptep = NULL; | 693 | pte_t *ptep = NULL; |
461 | 694 | ||
@@ -520,3 +753,80 @@ unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn) | |||
520 | return ret; | 753 | return ret; |
521 | } | 754 | } |
522 | EXPORT_SYMBOL_GPL(m2p_find_override_pfn); | 755 | EXPORT_SYMBOL_GPL(m2p_find_override_pfn); |
756 | |||
757 | #ifdef CONFIG_XEN_DEBUG_FS | ||
758 | |||
759 | int p2m_dump_show(struct seq_file *m, void *v) | ||
760 | { | ||
761 | static const char * const level_name[] = { "top", "middle", | ||
762 | "entry", "abnormal" }; | ||
763 | static const char * const type_name[] = { "identity", "missing", | ||
764 | "pfn", "abnormal"}; | ||
765 | #define TYPE_IDENTITY 0 | ||
766 | #define TYPE_MISSING 1 | ||
767 | #define TYPE_PFN 2 | ||
768 | #define TYPE_UNKNOWN 3 | ||
769 | unsigned long pfn, prev_pfn_type = 0, prev_pfn_level = 0; | ||
770 | unsigned int uninitialized_var(prev_level); | ||
771 | unsigned int uninitialized_var(prev_type); | ||
772 | |||
773 | if (!p2m_top) | ||
774 | return 0; | ||
775 | |||
776 | for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn++) { | ||
777 | unsigned topidx = p2m_top_index(pfn); | ||
778 | unsigned mididx = p2m_mid_index(pfn); | ||
779 | unsigned idx = p2m_index(pfn); | ||
780 | unsigned lvl, type; | ||
781 | |||
782 | lvl = 4; | ||
783 | type = TYPE_UNKNOWN; | ||
784 | if (p2m_top[topidx] == p2m_mid_missing) { | ||
785 | lvl = 0; type = TYPE_MISSING; | ||
786 | } else if (p2m_top[topidx] == NULL) { | ||
787 | lvl = 0; type = TYPE_UNKNOWN; | ||
788 | } else if (p2m_top[topidx][mididx] == NULL) { | ||
789 | lvl = 1; type = TYPE_UNKNOWN; | ||
790 | } else if (p2m_top[topidx][mididx] == p2m_identity) { | ||
791 | lvl = 1; type = TYPE_IDENTITY; | ||
792 | } else if (p2m_top[topidx][mididx] == p2m_missing) { | ||
793 | lvl = 1; type = TYPE_MISSING; | ||
794 | } else if (p2m_top[topidx][mididx][idx] == 0) { | ||
795 | lvl = 2; type = TYPE_UNKNOWN; | ||
796 | } else if (p2m_top[topidx][mididx][idx] == IDENTITY_FRAME(pfn)) { | ||
797 | lvl = 2; type = TYPE_IDENTITY; | ||
798 | } else if (p2m_top[topidx][mididx][idx] == INVALID_P2M_ENTRY) { | ||
799 | lvl = 2; type = TYPE_MISSING; | ||
800 | } else if (p2m_top[topidx][mididx][idx] == pfn) { | ||
801 | lvl = 2; type = TYPE_PFN; | ||
802 | } else if (p2m_top[topidx][mididx][idx] != pfn) { | ||
803 | lvl = 2; type = TYPE_PFN; | ||
804 | } | ||
805 | if (pfn == 0) { | ||
806 | prev_level = lvl; | ||
807 | prev_type = type; | ||
808 | } | ||
809 | if (pfn == MAX_DOMAIN_PAGES-1) { | ||
810 | lvl = 3; | ||
811 | type = TYPE_UNKNOWN; | ||
812 | } | ||
813 | if (prev_type != type) { | ||
814 | seq_printf(m, " [0x%lx->0x%lx] %s\n", | ||
815 | prev_pfn_type, pfn, type_name[prev_type]); | ||
816 | prev_pfn_type = pfn; | ||
817 | prev_type = type; | ||
818 | } | ||
819 | if (prev_level != lvl) { | ||
820 | seq_printf(m, " [0x%lx->0x%lx] level %s\n", | ||
821 | prev_pfn_level, pfn, level_name[prev_level]); | ||
822 | prev_pfn_level = pfn; | ||
823 | prev_level = lvl; | ||
824 | } | ||
825 | } | ||
826 | return 0; | ||
827 | #undef TYPE_IDENTITY | ||
828 | #undef TYPE_MISSING | ||
829 | #undef TYPE_PFN | ||
830 | #undef TYPE_UNKNOWN | ||
831 | } | ||
832 | #endif | ||
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index a8a66a50d446..fa0269a99377 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c | |||
@@ -52,6 +52,8 @@ phys_addr_t xen_extra_mem_start, xen_extra_mem_size; | |||
52 | 52 | ||
53 | static __init void xen_add_extra_mem(unsigned long pages) | 53 | static __init void xen_add_extra_mem(unsigned long pages) |
54 | { | 54 | { |
55 | unsigned long pfn; | ||
56 | |||
55 | u64 size = (u64)pages * PAGE_SIZE; | 57 | u64 size = (u64)pages * PAGE_SIZE; |
56 | u64 extra_start = xen_extra_mem_start + xen_extra_mem_size; | 58 | u64 extra_start = xen_extra_mem_start + xen_extra_mem_size; |
57 | 59 | ||
@@ -66,6 +68,9 @@ static __init void xen_add_extra_mem(unsigned long pages) | |||
66 | xen_extra_mem_size += size; | 68 | xen_extra_mem_size += size; |
67 | 69 | ||
68 | xen_max_p2m_pfn = PFN_DOWN(extra_start + size); | 70 | xen_max_p2m_pfn = PFN_DOWN(extra_start + size); |
71 | |||
72 | for (pfn = PFN_DOWN(extra_start); pfn <= xen_max_p2m_pfn; pfn++) | ||
73 | __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); | ||
69 | } | 74 | } |
70 | 75 | ||
71 | static unsigned long __init xen_release_chunk(phys_addr_t start_addr, | 76 | static unsigned long __init xen_release_chunk(phys_addr_t start_addr, |
@@ -104,7 +109,7 @@ static unsigned long __init xen_release_chunk(phys_addr_t start_addr, | |||
104 | WARN(ret != 1, "Failed to release memory %lx-%lx err=%d\n", | 109 | WARN(ret != 1, "Failed to release memory %lx-%lx err=%d\n", |
105 | start, end, ret); | 110 | start, end, ret); |
106 | if (ret == 1) { | 111 | if (ret == 1) { |
107 | set_phys_to_machine(pfn, INVALID_P2M_ENTRY); | 112 | __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); |
108 | len++; | 113 | len++; |
109 | } | 114 | } |
110 | } | 115 | } |
@@ -138,12 +143,55 @@ static unsigned long __init xen_return_unused_memory(unsigned long max_pfn, | |||
138 | return released; | 143 | return released; |
139 | } | 144 | } |
140 | 145 | ||
146 | static unsigned long __init xen_set_identity(const struct e820entry *list, | ||
147 | ssize_t map_size) | ||
148 | { | ||
149 | phys_addr_t last = xen_initial_domain() ? 0 : ISA_END_ADDRESS; | ||
150 | phys_addr_t start_pci = last; | ||
151 | const struct e820entry *entry; | ||
152 | unsigned long identity = 0; | ||
153 | int i; | ||
154 | |||
155 | for (i = 0, entry = list; i < map_size; i++, entry++) { | ||
156 | phys_addr_t start = entry->addr; | ||
157 | phys_addr_t end = start + entry->size; | ||
158 | |||
159 | if (start < last) | ||
160 | start = last; | ||
161 | |||
162 | if (end <= start) | ||
163 | continue; | ||
164 | |||
165 | /* Skip over the 1MB region. */ | ||
166 | if (last > end) | ||
167 | continue; | ||
168 | |||
169 | if (entry->type == E820_RAM) { | ||
170 | if (start > start_pci) | ||
171 | identity += set_phys_range_identity( | ||
172 | PFN_UP(start_pci), PFN_DOWN(start)); | ||
173 | |||
174 | /* Without saving 'last' we would gooble RAM too | ||
175 | * at the end of the loop. */ | ||
176 | last = end; | ||
177 | start_pci = end; | ||
178 | continue; | ||
179 | } | ||
180 | start_pci = min(start, start_pci); | ||
181 | last = end; | ||
182 | } | ||
183 | if (last > start_pci) | ||
184 | identity += set_phys_range_identity( | ||
185 | PFN_UP(start_pci), PFN_DOWN(last)); | ||
186 | return identity; | ||
187 | } | ||
141 | /** | 188 | /** |
142 | * machine_specific_memory_setup - Hook for machine specific memory setup. | 189 | * machine_specific_memory_setup - Hook for machine specific memory setup. |
143 | **/ | 190 | **/ |
144 | char * __init xen_memory_setup(void) | 191 | char * __init xen_memory_setup(void) |
145 | { | 192 | { |
146 | static struct e820entry map[E820MAX] __initdata; | 193 | static struct e820entry map[E820MAX] __initdata; |
194 | static struct e820entry map_raw[E820MAX] __initdata; | ||
147 | 195 | ||
148 | unsigned long max_pfn = xen_start_info->nr_pages; | 196 | unsigned long max_pfn = xen_start_info->nr_pages; |
149 | unsigned long long mem_end; | 197 | unsigned long long mem_end; |
@@ -151,6 +199,7 @@ char * __init xen_memory_setup(void) | |||
151 | struct xen_memory_map memmap; | 199 | struct xen_memory_map memmap; |
152 | unsigned long extra_pages = 0; | 200 | unsigned long extra_pages = 0; |
153 | unsigned long extra_limit; | 201 | unsigned long extra_limit; |
202 | unsigned long identity_pages = 0; | ||
154 | int i; | 203 | int i; |
155 | int op; | 204 | int op; |
156 | 205 | ||
@@ -176,6 +225,7 @@ char * __init xen_memory_setup(void) | |||
176 | } | 225 | } |
177 | BUG_ON(rc); | 226 | BUG_ON(rc); |
178 | 227 | ||
228 | memcpy(map_raw, map, sizeof(map)); | ||
179 | e820.nr_map = 0; | 229 | e820.nr_map = 0; |
180 | xen_extra_mem_start = mem_end; | 230 | xen_extra_mem_start = mem_end; |
181 | for (i = 0; i < memmap.nr_entries; i++) { | 231 | for (i = 0; i < memmap.nr_entries; i++) { |
@@ -194,6 +244,15 @@ char * __init xen_memory_setup(void) | |||
194 | end -= delta; | 244 | end -= delta; |
195 | 245 | ||
196 | extra_pages += PFN_DOWN(delta); | 246 | extra_pages += PFN_DOWN(delta); |
247 | /* | ||
248 | * Set RAM below 4GB that is not for us to be unusable. | ||
249 | * This prevents "System RAM" address space from being | ||
250 | * used as potential resource for I/O address (happens | ||
251 | * when 'allocate_resource' is called). | ||
252 | */ | ||
253 | if (delta && | ||
254 | (xen_initial_domain() && end < 0x100000000ULL)) | ||
255 | e820_add_region(end, delta, E820_UNUSABLE); | ||
197 | } | 256 | } |
198 | 257 | ||
199 | if (map[i].size > 0 && end > xen_extra_mem_start) | 258 | if (map[i].size > 0 && end > xen_extra_mem_start) |
@@ -251,6 +310,13 @@ char * __init xen_memory_setup(void) | |||
251 | 310 | ||
252 | xen_add_extra_mem(extra_pages); | 311 | xen_add_extra_mem(extra_pages); |
253 | 312 | ||
313 | /* | ||
314 | * Set P2M for all non-RAM pages and E820 gaps to be identity | ||
315 | * type PFNs. We supply it with the non-sanitized version | ||
316 | * of the E820. | ||
317 | */ | ||
318 | identity_pages = xen_set_identity(map_raw, memmap.nr_entries); | ||
319 | printk(KERN_INFO "Set %ld page(s) to 1-1 mapping.\n", identity_pages); | ||
254 | return "Xen"; | 320 | return "Xen"; |
255 | } | 321 | } |
256 | 322 | ||
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 067759e3d6a5..2e2d370a47b1 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c | |||
@@ -397,7 +397,9 @@ void xen_setup_timer(int cpu) | |||
397 | name = "<timer kasprintf failed>"; | 397 | name = "<timer kasprintf failed>"; |
398 | 398 | ||
399 | irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt, | 399 | irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt, |
400 | IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER, | 400 | IRQF_DISABLED|IRQF_PERCPU| |
401 | IRQF_NOBALANCING|IRQF_TIMER| | ||
402 | IRQF_FORCE_RESUME, | ||
401 | name, NULL); | 403 | name, NULL); |
402 | 404 | ||
403 | evt = &per_cpu(xen_clock_events, cpu); | 405 | evt = &per_cpu(xen_clock_events, cpu); |
diff --git a/block/blk-core.c b/block/blk-core.c index 2f4002f79a24..518dd423a5fe 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -352,7 +352,7 @@ void blk_start_queue(struct request_queue *q) | |||
352 | WARN_ON(!irqs_disabled()); | 352 | WARN_ON(!irqs_disabled()); |
353 | 353 | ||
354 | queue_flag_clear(QUEUE_FLAG_STOPPED, q); | 354 | queue_flag_clear(QUEUE_FLAG_STOPPED, q); |
355 | __blk_run_queue(q); | 355 | __blk_run_queue(q, false); |
356 | } | 356 | } |
357 | EXPORT_SYMBOL(blk_start_queue); | 357 | EXPORT_SYMBOL(blk_start_queue); |
358 | 358 | ||
@@ -403,13 +403,14 @@ EXPORT_SYMBOL(blk_sync_queue); | |||
403 | /** | 403 | /** |
404 | * __blk_run_queue - run a single device queue | 404 | * __blk_run_queue - run a single device queue |
405 | * @q: The queue to run | 405 | * @q: The queue to run |
406 | * @force_kblockd: Don't run @q->request_fn directly. Use kblockd. | ||
406 | * | 407 | * |
407 | * Description: | 408 | * Description: |
408 | * See @blk_run_queue. This variant must be called with the queue lock | 409 | * See @blk_run_queue. This variant must be called with the queue lock |
409 | * held and interrupts disabled. | 410 | * held and interrupts disabled. |
410 | * | 411 | * |
411 | */ | 412 | */ |
412 | void __blk_run_queue(struct request_queue *q) | 413 | void __blk_run_queue(struct request_queue *q, bool force_kblockd) |
413 | { | 414 | { |
414 | blk_remove_plug(q); | 415 | blk_remove_plug(q); |
415 | 416 | ||
@@ -423,7 +424,7 @@ void __blk_run_queue(struct request_queue *q) | |||
423 | * Only recurse once to avoid overrunning the stack, let the unplug | 424 | * Only recurse once to avoid overrunning the stack, let the unplug |
424 | * handling reinvoke the handler shortly if we already got there. | 425 | * handling reinvoke the handler shortly if we already got there. |
425 | */ | 426 | */ |
426 | if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { | 427 | if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { |
427 | q->request_fn(q); | 428 | q->request_fn(q); |
428 | queue_flag_clear(QUEUE_FLAG_REENTER, q); | 429 | queue_flag_clear(QUEUE_FLAG_REENTER, q); |
429 | } else { | 430 | } else { |
@@ -446,7 +447,7 @@ void blk_run_queue(struct request_queue *q) | |||
446 | unsigned long flags; | 447 | unsigned long flags; |
447 | 448 | ||
448 | spin_lock_irqsave(q->queue_lock, flags); | 449 | spin_lock_irqsave(q->queue_lock, flags); |
449 | __blk_run_queue(q); | 450 | __blk_run_queue(q, false); |
450 | spin_unlock_irqrestore(q->queue_lock, flags); | 451 | spin_unlock_irqrestore(q->queue_lock, flags); |
451 | } | 452 | } |
452 | EXPORT_SYMBOL(blk_run_queue); | 453 | EXPORT_SYMBOL(blk_run_queue); |
@@ -1053,7 +1054,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq, | |||
1053 | 1054 | ||
1054 | drive_stat_acct(rq, 1); | 1055 | drive_stat_acct(rq, 1); |
1055 | __elv_add_request(q, rq, where, 0); | 1056 | __elv_add_request(q, rq, where, 0); |
1056 | __blk_run_queue(q); | 1057 | __blk_run_queue(q, false); |
1057 | spin_unlock_irqrestore(q->queue_lock, flags); | 1058 | spin_unlock_irqrestore(q->queue_lock, flags); |
1058 | } | 1059 | } |
1059 | EXPORT_SYMBOL(blk_insert_request); | 1060 | EXPORT_SYMBOL(blk_insert_request); |
@@ -2610,13 +2611,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) | |||
2610 | } | 2611 | } |
2611 | EXPORT_SYMBOL(kblockd_schedule_work); | 2612 | EXPORT_SYMBOL(kblockd_schedule_work); |
2612 | 2613 | ||
2613 | int kblockd_schedule_delayed_work(struct request_queue *q, | ||
2614 | struct delayed_work *dwork, unsigned long delay) | ||
2615 | { | ||
2616 | return queue_delayed_work(kblockd_workqueue, dwork, delay); | ||
2617 | } | ||
2618 | EXPORT_SYMBOL(kblockd_schedule_delayed_work); | ||
2619 | |||
2620 | int __init blk_dev_init(void) | 2614 | int __init blk_dev_init(void) |
2621 | { | 2615 | { |
2622 | BUILD_BUG_ON(__REQ_NR_BITS > 8 * | 2616 | BUILD_BUG_ON(__REQ_NR_BITS > 8 * |
diff --git a/block/blk-flush.c b/block/blk-flush.c index 54b123d6563e..b27d0208611b 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c | |||
@@ -66,10 +66,12 @@ static void blk_flush_complete_seq_end_io(struct request_queue *q, | |||
66 | 66 | ||
67 | /* | 67 | /* |
68 | * Moving a request silently to empty queue_head may stall the | 68 | * Moving a request silently to empty queue_head may stall the |
69 | * queue. Kick the queue in those cases. | 69 | * queue. Kick the queue in those cases. This function is called |
70 | * from request completion path and calling directly into | ||
71 | * request_fn may confuse the driver. Always use kblockd. | ||
70 | */ | 72 | */ |
71 | if (was_empty && next_rq) | 73 | if (was_empty && next_rq) |
72 | __blk_run_queue(q); | 74 | __blk_run_queue(q, true); |
73 | } | 75 | } |
74 | 76 | ||
75 | static void pre_flush_end_io(struct request *rq, int error) | 77 | static void pre_flush_end_io(struct request *rq, int error) |
@@ -130,7 +132,7 @@ static struct request *queue_next_fseq(struct request_queue *q) | |||
130 | BUG(); | 132 | BUG(); |
131 | } | 133 | } |
132 | 134 | ||
133 | elv_insert(q, rq, ELEVATOR_INSERT_FRONT); | 135 | elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE); |
134 | return rq; | 136 | return rq; |
135 | } | 137 | } |
136 | 138 | ||
diff --git a/block/blk-lib.c b/block/blk-lib.c index 1a320d2406b0..bd3e8df4d5e2 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c | |||
@@ -109,7 +109,6 @@ struct bio_batch | |||
109 | atomic_t done; | 109 | atomic_t done; |
110 | unsigned long flags; | 110 | unsigned long flags; |
111 | struct completion *wait; | 111 | struct completion *wait; |
112 | bio_end_io_t *end_io; | ||
113 | }; | 112 | }; |
114 | 113 | ||
115 | static void bio_batch_end_io(struct bio *bio, int err) | 114 | static void bio_batch_end_io(struct bio *bio, int err) |
@@ -122,17 +121,14 @@ static void bio_batch_end_io(struct bio *bio, int err) | |||
122 | else | 121 | else |
123 | clear_bit(BIO_UPTODATE, &bb->flags); | 122 | clear_bit(BIO_UPTODATE, &bb->flags); |
124 | } | 123 | } |
125 | if (bb) { | 124 | if (bb) |
126 | if (bb->end_io) | 125 | if (atomic_dec_and_test(&bb->done)) |
127 | bb->end_io(bio, err); | 126 | complete(bb->wait); |
128 | atomic_inc(&bb->done); | ||
129 | complete(bb->wait); | ||
130 | } | ||
131 | bio_put(bio); | 127 | bio_put(bio); |
132 | } | 128 | } |
133 | 129 | ||
134 | /** | 130 | /** |
135 | * blkdev_issue_zeroout generate number of zero filed write bios | 131 | * blkdev_issue_zeroout - generate number of zero filed write bios |
136 | * @bdev: blockdev to issue | 132 | * @bdev: blockdev to issue |
137 | * @sector: start sector | 133 | * @sector: start sector |
138 | * @nr_sects: number of sectors to write | 134 | * @nr_sects: number of sectors to write |
@@ -150,13 +146,12 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, | |||
150 | int ret; | 146 | int ret; |
151 | struct bio *bio; | 147 | struct bio *bio; |
152 | struct bio_batch bb; | 148 | struct bio_batch bb; |
153 | unsigned int sz, issued = 0; | 149 | unsigned int sz; |
154 | DECLARE_COMPLETION_ONSTACK(wait); | 150 | DECLARE_COMPLETION_ONSTACK(wait); |
155 | 151 | ||
156 | atomic_set(&bb.done, 0); | 152 | atomic_set(&bb.done, 1); |
157 | bb.flags = 1 << BIO_UPTODATE; | 153 | bb.flags = 1 << BIO_UPTODATE; |
158 | bb.wait = &wait; | 154 | bb.wait = &wait; |
159 | bb.end_io = NULL; | ||
160 | 155 | ||
161 | submit: | 156 | submit: |
162 | ret = 0; | 157 | ret = 0; |
@@ -185,12 +180,12 @@ submit: | |||
185 | break; | 180 | break; |
186 | } | 181 | } |
187 | ret = 0; | 182 | ret = 0; |
188 | issued++; | 183 | atomic_inc(&bb.done); |
189 | submit_bio(WRITE, bio); | 184 | submit_bio(WRITE, bio); |
190 | } | 185 | } |
191 | 186 | ||
192 | /* Wait for bios in-flight */ | 187 | /* Wait for bios in-flight */ |
193 | while (issued != atomic_read(&bb.done)) | 188 | if (!atomic_dec_and_test(&bb.done)) |
194 | wait_for_completion(&wait); | 189 | wait_for_completion(&wait); |
195 | 190 | ||
196 | if (!test_bit(BIO_UPTODATE, &bb.flags)) | 191 | if (!test_bit(BIO_UPTODATE, &bb.flags)) |
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index a89043a3caa4..e36cc10a346c 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -20,6 +20,11 @@ static int throtl_quantum = 32; | |||
20 | /* Throttling is performed over 100ms slice and after that slice is renewed */ | 20 | /* Throttling is performed over 100ms slice and after that slice is renewed */ |
21 | static unsigned long throtl_slice = HZ/10; /* 100 ms */ | 21 | static unsigned long throtl_slice = HZ/10; /* 100 ms */ |
22 | 22 | ||
23 | /* A workqueue to queue throttle related work */ | ||
24 | static struct workqueue_struct *kthrotld_workqueue; | ||
25 | static void throtl_schedule_delayed_work(struct throtl_data *td, | ||
26 | unsigned long delay); | ||
27 | |||
23 | struct throtl_rb_root { | 28 | struct throtl_rb_root { |
24 | struct rb_root rb; | 29 | struct rb_root rb; |
25 | struct rb_node *left; | 30 | struct rb_node *left; |
@@ -345,10 +350,9 @@ static void throtl_schedule_next_dispatch(struct throtl_data *td) | |||
345 | update_min_dispatch_time(st); | 350 | update_min_dispatch_time(st); |
346 | 351 | ||
347 | if (time_before_eq(st->min_disptime, jiffies)) | 352 | if (time_before_eq(st->min_disptime, jiffies)) |
348 | throtl_schedule_delayed_work(td->queue, 0); | 353 | throtl_schedule_delayed_work(td, 0); |
349 | else | 354 | else |
350 | throtl_schedule_delayed_work(td->queue, | 355 | throtl_schedule_delayed_work(td, (st->min_disptime - jiffies)); |
351 | (st->min_disptime - jiffies)); | ||
352 | } | 356 | } |
353 | 357 | ||
354 | static inline void | 358 | static inline void |
@@ -815,10 +819,10 @@ void blk_throtl_work(struct work_struct *work) | |||
815 | } | 819 | } |
816 | 820 | ||
817 | /* Call with queue lock held */ | 821 | /* Call with queue lock held */ |
818 | void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) | 822 | static void |
823 | throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay) | ||
819 | { | 824 | { |
820 | 825 | ||
821 | struct throtl_data *td = q->td; | ||
822 | struct delayed_work *dwork = &td->throtl_work; | 826 | struct delayed_work *dwork = &td->throtl_work; |
823 | 827 | ||
824 | if (total_nr_queued(td) > 0) { | 828 | if (total_nr_queued(td) > 0) { |
@@ -827,12 +831,11 @@ void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) | |||
827 | * Cancel that and schedule a new one. | 831 | * Cancel that and schedule a new one. |
828 | */ | 832 | */ |
829 | __cancel_delayed_work(dwork); | 833 | __cancel_delayed_work(dwork); |
830 | kblockd_schedule_delayed_work(q, dwork, delay); | 834 | queue_delayed_work(kthrotld_workqueue, dwork, delay); |
831 | throtl_log(td, "schedule work. delay=%lu jiffies=%lu", | 835 | throtl_log(td, "schedule work. delay=%lu jiffies=%lu", |
832 | delay, jiffies); | 836 | delay, jiffies); |
833 | } | 837 | } |
834 | } | 838 | } |
835 | EXPORT_SYMBOL(throtl_schedule_delayed_work); | ||
836 | 839 | ||
837 | static void | 840 | static void |
838 | throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg) | 841 | throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg) |
@@ -920,7 +923,7 @@ static void throtl_update_blkio_group_read_bps(void *key, | |||
920 | smp_mb__after_atomic_inc(); | 923 | smp_mb__after_atomic_inc(); |
921 | 924 | ||
922 | /* Schedule a work now to process the limit change */ | 925 | /* Schedule a work now to process the limit change */ |
923 | throtl_schedule_delayed_work(td->queue, 0); | 926 | throtl_schedule_delayed_work(td, 0); |
924 | } | 927 | } |
925 | 928 | ||
926 | static void throtl_update_blkio_group_write_bps(void *key, | 929 | static void throtl_update_blkio_group_write_bps(void *key, |
@@ -934,7 +937,7 @@ static void throtl_update_blkio_group_write_bps(void *key, | |||
934 | smp_mb__before_atomic_inc(); | 937 | smp_mb__before_atomic_inc(); |
935 | atomic_inc(&td->limits_changed); | 938 | atomic_inc(&td->limits_changed); |
936 | smp_mb__after_atomic_inc(); | 939 | smp_mb__after_atomic_inc(); |
937 | throtl_schedule_delayed_work(td->queue, 0); | 940 | throtl_schedule_delayed_work(td, 0); |
938 | } | 941 | } |
939 | 942 | ||
940 | static void throtl_update_blkio_group_read_iops(void *key, | 943 | static void throtl_update_blkio_group_read_iops(void *key, |
@@ -948,7 +951,7 @@ static void throtl_update_blkio_group_read_iops(void *key, | |||
948 | smp_mb__before_atomic_inc(); | 951 | smp_mb__before_atomic_inc(); |
949 | atomic_inc(&td->limits_changed); | 952 | atomic_inc(&td->limits_changed); |
950 | smp_mb__after_atomic_inc(); | 953 | smp_mb__after_atomic_inc(); |
951 | throtl_schedule_delayed_work(td->queue, 0); | 954 | throtl_schedule_delayed_work(td, 0); |
952 | } | 955 | } |
953 | 956 | ||
954 | static void throtl_update_blkio_group_write_iops(void *key, | 957 | static void throtl_update_blkio_group_write_iops(void *key, |
@@ -962,7 +965,7 @@ static void throtl_update_blkio_group_write_iops(void *key, | |||
962 | smp_mb__before_atomic_inc(); | 965 | smp_mb__before_atomic_inc(); |
963 | atomic_inc(&td->limits_changed); | 966 | atomic_inc(&td->limits_changed); |
964 | smp_mb__after_atomic_inc(); | 967 | smp_mb__after_atomic_inc(); |
965 | throtl_schedule_delayed_work(td->queue, 0); | 968 | throtl_schedule_delayed_work(td, 0); |
966 | } | 969 | } |
967 | 970 | ||
968 | void throtl_shutdown_timer_wq(struct request_queue *q) | 971 | void throtl_shutdown_timer_wq(struct request_queue *q) |
@@ -1135,6 +1138,10 @@ void blk_throtl_exit(struct request_queue *q) | |||
1135 | 1138 | ||
1136 | static int __init throtl_init(void) | 1139 | static int __init throtl_init(void) |
1137 | { | 1140 | { |
1141 | kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0); | ||
1142 | if (!kthrotld_workqueue) | ||
1143 | panic("Failed to create kthrotld\n"); | ||
1144 | |||
1138 | blkio_policy_register(&blkio_policy_throtl); | 1145 | blkio_policy_register(&blkio_policy_throtl); |
1139 | return 0; | 1146 | return 0; |
1140 | } | 1147 | } |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 7be4c7959625..ea83a4f0c27d 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -3355,7 +3355,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
3355 | cfqd->busy_queues > 1) { | 3355 | cfqd->busy_queues > 1) { |
3356 | cfq_del_timer(cfqd, cfqq); | 3356 | cfq_del_timer(cfqd, cfqq); |
3357 | cfq_clear_cfqq_wait_request(cfqq); | 3357 | cfq_clear_cfqq_wait_request(cfqq); |
3358 | __blk_run_queue(cfqd->queue); | 3358 | __blk_run_queue(cfqd->queue, false); |
3359 | } else { | 3359 | } else { |
3360 | cfq_blkiocg_update_idle_time_stats( | 3360 | cfq_blkiocg_update_idle_time_stats( |
3361 | &cfqq->cfqg->blkg); | 3361 | &cfqq->cfqg->blkg); |
@@ -3370,7 +3370,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
3370 | * this new queue is RT and the current one is BE | 3370 | * this new queue is RT and the current one is BE |
3371 | */ | 3371 | */ |
3372 | cfq_preempt_queue(cfqd, cfqq); | 3372 | cfq_preempt_queue(cfqd, cfqq); |
3373 | __blk_run_queue(cfqd->queue); | 3373 | __blk_run_queue(cfqd->queue, false); |
3374 | } | 3374 | } |
3375 | } | 3375 | } |
3376 | 3376 | ||
@@ -3731,7 +3731,7 @@ static void cfq_kick_queue(struct work_struct *work) | |||
3731 | struct request_queue *q = cfqd->queue; | 3731 | struct request_queue *q = cfqd->queue; |
3732 | 3732 | ||
3733 | spin_lock_irq(q->queue_lock); | 3733 | spin_lock_irq(q->queue_lock); |
3734 | __blk_run_queue(cfqd->queue); | 3734 | __blk_run_queue(cfqd->queue, false); |
3735 | spin_unlock_irq(q->queue_lock); | 3735 | spin_unlock_irq(q->queue_lock); |
3736 | } | 3736 | } |
3737 | 3737 | ||
diff --git a/block/elevator.c b/block/elevator.c index 2569512830d3..236e93c1f46c 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -602,7 +602,7 @@ void elv_quiesce_start(struct request_queue *q) | |||
602 | */ | 602 | */ |
603 | elv_drain_elevator(q); | 603 | elv_drain_elevator(q); |
604 | while (q->rq.elvpriv) { | 604 | while (q->rq.elvpriv) { |
605 | __blk_run_queue(q); | 605 | __blk_run_queue(q, false); |
606 | spin_unlock_irq(q->queue_lock); | 606 | spin_unlock_irq(q->queue_lock); |
607 | msleep(10); | 607 | msleep(10); |
608 | spin_lock_irq(q->queue_lock); | 608 | spin_lock_irq(q->queue_lock); |
@@ -651,7 +651,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) | |||
651 | * with anything. There's no point in delaying queue | 651 | * with anything. There's no point in delaying queue |
652 | * processing. | 652 | * processing. |
653 | */ | 653 | */ |
654 | __blk_run_queue(q); | 654 | __blk_run_queue(q, false); |
655 | break; | 655 | break; |
656 | 656 | ||
657 | case ELEVATOR_INSERT_SORT: | 657 | case ELEVATOR_INSERT_SORT: |
diff --git a/block/genhd.c b/block/genhd.c index 6a5b772aa201..cbf1112a885c 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
@@ -1355,7 +1355,7 @@ int invalidate_partition(struct gendisk *disk, int partno) | |||
1355 | struct block_device *bdev = bdget_disk(disk, partno); | 1355 | struct block_device *bdev = bdget_disk(disk, partno); |
1356 | if (bdev) { | 1356 | if (bdev) { |
1357 | fsync_bdev(bdev); | 1357 | fsync_bdev(bdev); |
1358 | res = __invalidate_device(bdev); | 1358 | res = __invalidate_device(bdev, true); |
1359 | bdput(bdev); | 1359 | bdput(bdev); |
1360 | } | 1360 | } |
1361 | return res; | 1361 | return res; |
diff --git a/block/ioctl.c b/block/ioctl.c index 9049d460fa89..1124cd297263 100644 --- a/block/ioctl.c +++ b/block/ioctl.c | |||
@@ -294,9 +294,11 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, | |||
294 | return -EINVAL; | 294 | return -EINVAL; |
295 | if (get_user(n, (int __user *) arg)) | 295 | if (get_user(n, (int __user *) arg)) |
296 | return -EFAULT; | 296 | return -EFAULT; |
297 | if (!(mode & FMODE_EXCL) && | 297 | if (!(mode & FMODE_EXCL)) { |
298 | blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0) | 298 | bdgrab(bdev); |
299 | return -EBUSY; | 299 | if (blkdev_get(bdev, mode | FMODE_EXCL, &bdev) < 0) |
300 | return -EBUSY; | ||
301 | } | ||
300 | ret = set_blocksize(bdev, n); | 302 | ret = set_blocksize(bdev, n); |
301 | if (!(mode & FMODE_EXCL)) | 303 | if (!(mode & FMODE_EXCL)) |
302 | blkdev_put(bdev, mode | FMODE_EXCL); | 304 | blkdev_put(bdev, mode | FMODE_EXCL); |
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index 54784bb42cec..edc25867ad9d 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h | |||
@@ -416,10 +416,15 @@ struct acpi_gpe_handler_info { | |||
416 | u8 originally_enabled; /* True if GPE was originally enabled */ | 416 | u8 originally_enabled; /* True if GPE was originally enabled */ |
417 | }; | 417 | }; |
418 | 418 | ||
419 | struct acpi_gpe_notify_object { | ||
420 | struct acpi_namespace_node *node; | ||
421 | struct acpi_gpe_notify_object *next; | ||
422 | }; | ||
423 | |||
419 | union acpi_gpe_dispatch_info { | 424 | union acpi_gpe_dispatch_info { |
420 | struct acpi_namespace_node *method_node; /* Method node for this GPE level */ | 425 | struct acpi_namespace_node *method_node; /* Method node for this GPE level */ |
421 | struct acpi_gpe_handler_info *handler; /* Installed GPE handler */ | 426 | struct acpi_gpe_handler_info *handler; /* Installed GPE handler */ |
422 | struct acpi_namespace_node *device_node; /* Parent _PRW device for implicit notify */ | 427 | struct acpi_gpe_notify_object device; /* List of _PRW devices for implicit notify */ |
423 | }; | 428 | }; |
424 | 429 | ||
425 | /* | 430 | /* |
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index 14988a86066f..f4725212eb48 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c | |||
@@ -457,6 +457,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) | |||
457 | acpi_status status; | 457 | acpi_status status; |
458 | struct acpi_gpe_event_info *local_gpe_event_info; | 458 | struct acpi_gpe_event_info *local_gpe_event_info; |
459 | struct acpi_evaluate_info *info; | 459 | struct acpi_evaluate_info *info; |
460 | struct acpi_gpe_notify_object *notify_object; | ||
460 | 461 | ||
461 | ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); | 462 | ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); |
462 | 463 | ||
@@ -508,10 +509,18 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) | |||
508 | * from this thread -- because handlers may in turn run other | 509 | * from this thread -- because handlers may in turn run other |
509 | * control methods. | 510 | * control methods. |
510 | */ | 511 | */ |
511 | status = | 512 | status = acpi_ev_queue_notify_request( |
512 | acpi_ev_queue_notify_request(local_gpe_event_info->dispatch. | 513 | local_gpe_event_info->dispatch.device.node, |
513 | device_node, | 514 | ACPI_NOTIFY_DEVICE_WAKE); |
514 | ACPI_NOTIFY_DEVICE_WAKE); | 515 | |
516 | notify_object = local_gpe_event_info->dispatch.device.next; | ||
517 | while (ACPI_SUCCESS(status) && notify_object) { | ||
518 | status = acpi_ev_queue_notify_request( | ||
519 | notify_object->node, | ||
520 | ACPI_NOTIFY_DEVICE_WAKE); | ||
521 | notify_object = notify_object->next; | ||
522 | } | ||
523 | |||
515 | break; | 524 | break; |
516 | 525 | ||
517 | case ACPI_GPE_DISPATCH_METHOD: | 526 | case ACPI_GPE_DISPATCH_METHOD: |
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c index 3b20a3401b64..52aaff3df562 100644 --- a/drivers/acpi/acpica/evxfgpe.c +++ b/drivers/acpi/acpica/evxfgpe.c | |||
@@ -198,7 +198,9 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device, | |||
198 | acpi_status status = AE_BAD_PARAMETER; | 198 | acpi_status status = AE_BAD_PARAMETER; |
199 | struct acpi_gpe_event_info *gpe_event_info; | 199 | struct acpi_gpe_event_info *gpe_event_info; |
200 | struct acpi_namespace_node *device_node; | 200 | struct acpi_namespace_node *device_node; |
201 | struct acpi_gpe_notify_object *notify_object; | ||
201 | acpi_cpu_flags flags; | 202 | acpi_cpu_flags flags; |
203 | u8 gpe_dispatch_mask; | ||
202 | 204 | ||
203 | ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake); | 205 | ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake); |
204 | 206 | ||
@@ -221,27 +223,49 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device, | |||
221 | goto unlock_and_exit; | 223 | goto unlock_and_exit; |
222 | } | 224 | } |
223 | 225 | ||
226 | if (wake_device == ACPI_ROOT_OBJECT) { | ||
227 | goto out; | ||
228 | } | ||
229 | |||
224 | /* | 230 | /* |
225 | * If there is no method or handler for this GPE, then the | 231 | * If there is no method or handler for this GPE, then the |
226 | * wake_device will be notified whenever this GPE fires (aka | 232 | * wake_device will be notified whenever this GPE fires (aka |
227 | * "implicit notify") Note: The GPE is assumed to be | 233 | * "implicit notify") Note: The GPE is assumed to be |
228 | * level-triggered (for windows compatibility). | 234 | * level-triggered (for windows compatibility). |
229 | */ | 235 | */ |
230 | if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == | 236 | gpe_dispatch_mask = gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK; |
231 | ACPI_GPE_DISPATCH_NONE) && (wake_device != ACPI_ROOT_OBJECT)) { | 237 | if (gpe_dispatch_mask != ACPI_GPE_DISPATCH_NONE |
238 | && gpe_dispatch_mask != ACPI_GPE_DISPATCH_NOTIFY) { | ||
239 | goto out; | ||
240 | } | ||
232 | 241 | ||
233 | /* Validate wake_device is of type Device */ | 242 | /* Validate wake_device is of type Device */ |
234 | 243 | ||
235 | device_node = ACPI_CAST_PTR(struct acpi_namespace_node, | 244 | device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device); |
236 | wake_device); | 245 | if (device_node->type != ACPI_TYPE_DEVICE) { |
237 | if (device_node->type != ACPI_TYPE_DEVICE) { | 246 | goto unlock_and_exit; |
238 | goto unlock_and_exit; | 247 | } |
239 | } | 248 | |
249 | if (gpe_dispatch_mask == ACPI_GPE_DISPATCH_NONE) { | ||
240 | gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY | | 250 | gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY | |
241 | ACPI_GPE_LEVEL_TRIGGERED); | 251 | ACPI_GPE_LEVEL_TRIGGERED); |
242 | gpe_event_info->dispatch.device_node = device_node; | 252 | gpe_event_info->dispatch.device.node = device_node; |
253 | gpe_event_info->dispatch.device.next = NULL; | ||
254 | } else { | ||
255 | /* There are multiple devices to notify implicitly. */ | ||
256 | |||
257 | notify_object = ACPI_ALLOCATE_ZEROED(sizeof(*notify_object)); | ||
258 | if (!notify_object) { | ||
259 | status = AE_NO_MEMORY; | ||
260 | goto unlock_and_exit; | ||
261 | } | ||
262 | |||
263 | notify_object->node = device_node; | ||
264 | notify_object->next = gpe_event_info->dispatch.device.next; | ||
265 | gpe_event_info->dispatch.device.next = notify_object; | ||
243 | } | 266 | } |
244 | 267 | ||
268 | out: | ||
245 | gpe_event_info->flags |= ACPI_GPE_CAN_WAKE; | 269 | gpe_event_info->flags |= ACPI_GPE_CAN_WAKE; |
246 | status = AE_OK; | 270 | status = AE_OK; |
247 | 271 | ||
diff --git a/drivers/acpi/debugfs.c b/drivers/acpi/debugfs.c index 5df67f1d6c61..384f7abcff77 100644 --- a/drivers/acpi/debugfs.c +++ b/drivers/acpi/debugfs.c | |||
@@ -26,7 +26,9 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf, | |||
26 | size_t count, loff_t *ppos) | 26 | size_t count, loff_t *ppos) |
27 | { | 27 | { |
28 | static char *buf; | 28 | static char *buf; |
29 | static int uncopied_bytes; | 29 | static u32 max_size; |
30 | static u32 uncopied_bytes; | ||
31 | |||
30 | struct acpi_table_header table; | 32 | struct acpi_table_header table; |
31 | acpi_status status; | 33 | acpi_status status; |
32 | 34 | ||
@@ -37,19 +39,24 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf, | |||
37 | if (copy_from_user(&table, user_buf, | 39 | if (copy_from_user(&table, user_buf, |
38 | sizeof(struct acpi_table_header))) | 40 | sizeof(struct acpi_table_header))) |
39 | return -EFAULT; | 41 | return -EFAULT; |
40 | uncopied_bytes = table.length; | 42 | uncopied_bytes = max_size = table.length; |
41 | buf = kzalloc(uncopied_bytes, GFP_KERNEL); | 43 | buf = kzalloc(max_size, GFP_KERNEL); |
42 | if (!buf) | 44 | if (!buf) |
43 | return -ENOMEM; | 45 | return -ENOMEM; |
44 | } | 46 | } |
45 | 47 | ||
46 | if (uncopied_bytes < count) { | 48 | if (buf == NULL) |
47 | kfree(buf); | 49 | return -EINVAL; |
50 | |||
51 | if ((*ppos > max_size) || | ||
52 | (*ppos + count > max_size) || | ||
53 | (*ppos + count < count) || | ||
54 | (count > uncopied_bytes)) | ||
48 | return -EINVAL; | 55 | return -EINVAL; |
49 | } | ||
50 | 56 | ||
51 | if (copy_from_user(buf + (*ppos), user_buf, count)) { | 57 | if (copy_from_user(buf + (*ppos), user_buf, count)) { |
52 | kfree(buf); | 58 | kfree(buf); |
59 | buf = NULL; | ||
53 | return -EFAULT; | 60 | return -EFAULT; |
54 | } | 61 | } |
55 | 62 | ||
@@ -59,6 +66,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf, | |||
59 | if (!uncopied_bytes) { | 66 | if (!uncopied_bytes) { |
60 | status = acpi_install_method(buf); | 67 | status = acpi_install_method(buf); |
61 | kfree(buf); | 68 | kfree(buf); |
69 | buf = NULL; | ||
62 | if (ACPI_FAILURE(status)) | 70 | if (ACPI_FAILURE(status)) |
63 | return -EINVAL; | 71 | return -EINVAL; |
64 | add_taint(TAINT_OVERRIDDEN_ACPI_TABLE); | 72 | add_taint(TAINT_OVERRIDDEN_ACPI_TABLE); |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index b9ba04fc2b34..77fc76f8aea9 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
@@ -3281,7 +3281,7 @@ static int set_geometry(unsigned int cmd, struct floppy_struct *g, | |||
3281 | struct block_device *bdev = opened_bdev[cnt]; | 3281 | struct block_device *bdev = opened_bdev[cnt]; |
3282 | if (!bdev || ITYPE(drive_state[cnt].fd_device) != type) | 3282 | if (!bdev || ITYPE(drive_state[cnt].fd_device) != type) |
3283 | continue; | 3283 | continue; |
3284 | __invalidate_device(bdev); | 3284 | __invalidate_device(bdev, true); |
3285 | } | 3285 | } |
3286 | mutex_unlock(&open_lock); | 3286 | mutex_unlock(&open_lock); |
3287 | } else { | 3287 | } else { |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 49e6a545eb63..dbf31ec9114d 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -78,7 +78,6 @@ | |||
78 | 78 | ||
79 | #include <asm/uaccess.h> | 79 | #include <asm/uaccess.h> |
80 | 80 | ||
81 | static DEFINE_MUTEX(loop_mutex); | ||
82 | static LIST_HEAD(loop_devices); | 81 | static LIST_HEAD(loop_devices); |
83 | static DEFINE_MUTEX(loop_devices_mutex); | 82 | static DEFINE_MUTEX(loop_devices_mutex); |
84 | 83 | ||
@@ -1501,11 +1500,9 @@ static int lo_open(struct block_device *bdev, fmode_t mode) | |||
1501 | { | 1500 | { |
1502 | struct loop_device *lo = bdev->bd_disk->private_data; | 1501 | struct loop_device *lo = bdev->bd_disk->private_data; |
1503 | 1502 | ||
1504 | mutex_lock(&loop_mutex); | ||
1505 | mutex_lock(&lo->lo_ctl_mutex); | 1503 | mutex_lock(&lo->lo_ctl_mutex); |
1506 | lo->lo_refcnt++; | 1504 | lo->lo_refcnt++; |
1507 | mutex_unlock(&lo->lo_ctl_mutex); | 1505 | mutex_unlock(&lo->lo_ctl_mutex); |
1508 | mutex_unlock(&loop_mutex); | ||
1509 | 1506 | ||
1510 | return 0; | 1507 | return 0; |
1511 | } | 1508 | } |
@@ -1515,7 +1512,6 @@ static int lo_release(struct gendisk *disk, fmode_t mode) | |||
1515 | struct loop_device *lo = disk->private_data; | 1512 | struct loop_device *lo = disk->private_data; |
1516 | int err; | 1513 | int err; |
1517 | 1514 | ||
1518 | mutex_lock(&loop_mutex); | ||
1519 | mutex_lock(&lo->lo_ctl_mutex); | 1515 | mutex_lock(&lo->lo_ctl_mutex); |
1520 | 1516 | ||
1521 | if (--lo->lo_refcnt) | 1517 | if (--lo->lo_refcnt) |
@@ -1540,7 +1536,6 @@ static int lo_release(struct gendisk *disk, fmode_t mode) | |||
1540 | out: | 1536 | out: |
1541 | mutex_unlock(&lo->lo_ctl_mutex); | 1537 | mutex_unlock(&lo->lo_ctl_mutex); |
1542 | out_unlocked: | 1538 | out_unlocked: |
1543 | mutex_unlock(&loop_mutex); | ||
1544 | return 0; | 1539 | return 0; |
1545 | } | 1540 | } |
1546 | 1541 | ||
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 64d9c6dfc634..9cb8668ff5f4 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
@@ -285,7 +285,7 @@ static int blkif_queue_request(struct request *req) | |||
285 | info->shadow[id].request = req; | 285 | info->shadow[id].request = req; |
286 | 286 | ||
287 | ring_req->id = id; | 287 | ring_req->id = id; |
288 | ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req); | 288 | ring_req->u.rw.sector_number = (blkif_sector_t)blk_rq_pos(req); |
289 | ring_req->handle = info->handle; | 289 | ring_req->handle = info->handle; |
290 | 290 | ||
291 | ring_req->operation = rq_data_dir(req) ? | 291 | ring_req->operation = rq_data_dir(req) ? |
@@ -321,7 +321,7 @@ static int blkif_queue_request(struct request *req) | |||
321 | rq_data_dir(req) ); | 321 | rq_data_dir(req) ); |
322 | 322 | ||
323 | info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn); | 323 | info->shadow[id].frame[i] = mfn_to_pfn(buffer_mfn); |
324 | ring_req->seg[i] = | 324 | ring_req->u.rw.seg[i] = |
325 | (struct blkif_request_segment) { | 325 | (struct blkif_request_segment) { |
326 | .gref = ref, | 326 | .gref = ref, |
327 | .first_sect = fsect, | 327 | .first_sect = fsect, |
@@ -684,7 +684,7 @@ static void blkif_completion(struct blk_shadow *s) | |||
684 | { | 684 | { |
685 | int i; | 685 | int i; |
686 | for (i = 0; i < s->req.nr_segments; i++) | 686 | for (i = 0; i < s->req.nr_segments; i++) |
687 | gnttab_end_foreign_access(s->req.seg[i].gref, 0, 0UL); | 687 | gnttab_end_foreign_access(s->req.u.rw.seg[i].gref, 0, 0UL); |
688 | } | 688 | } |
689 | 689 | ||
690 | static irqreturn_t blkif_interrupt(int irq, void *dev_id) | 690 | static irqreturn_t blkif_interrupt(int irq, void *dev_id) |
@@ -1001,7 +1001,7 @@ static int blkif_recover(struct blkfront_info *info) | |||
1001 | /* Rewrite any grant references invalidated by susp/resume. */ | 1001 | /* Rewrite any grant references invalidated by susp/resume. */ |
1002 | for (j = 0; j < req->nr_segments; j++) | 1002 | for (j = 0; j < req->nr_segments; j++) |
1003 | gnttab_grant_foreign_access_ref( | 1003 | gnttab_grant_foreign_access_ref( |
1004 | req->seg[j].gref, | 1004 | req->u.rw.seg[j].gref, |
1005 | info->xbdev->otherend_id, | 1005 | info->xbdev->otherend_id, |
1006 | pfn_to_mfn(info->shadow[req->id].frame[j]), | 1006 | pfn_to_mfn(info->shadow[req->id].frame[j]), |
1007 | rq_data_dir(info->shadow[req->id].request)); | 1007 | rq_data_dir(info->shadow[req->id].request)); |
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index 333c21289d97..6dcd55a74c0a 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c | |||
@@ -41,6 +41,9 @@ static struct usb_device_id ath3k_table[] = { | |||
41 | 41 | ||
42 | /* Atheros AR9285 Malbec with sflash firmware */ | 42 | /* Atheros AR9285 Malbec with sflash firmware */ |
43 | { USB_DEVICE(0x03F0, 0x311D) }, | 43 | { USB_DEVICE(0x03F0, 0x311D) }, |
44 | |||
45 | /* Atheros AR5BBU12 with sflash firmware */ | ||
46 | { USB_DEVICE(0x0489, 0xE02C) }, | ||
44 | { } /* Terminating entry */ | 47 | { } /* Terminating entry */ |
45 | }; | 48 | }; |
46 | 49 | ||
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 4cefa91e6c34..700a3840fddc 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c | |||
@@ -105,6 +105,9 @@ static struct usb_device_id blacklist_table[] = { | |||
105 | /* Atheros AR9285 Malbec with sflash firmware */ | 105 | /* Atheros AR9285 Malbec with sflash firmware */ |
106 | { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, | 106 | { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, |
107 | 107 | ||
108 | /* Atheros AR5BBU12 with sflash firmware */ | ||
109 | { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, | ||
110 | |||
108 | /* Broadcom BCM2035 */ | 111 | /* Broadcom BCM2035 */ |
109 | { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU }, | 112 | { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU }, |
110 | { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU }, | 113 | { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU }, |
@@ -829,7 +832,7 @@ static void btusb_work(struct work_struct *work) | |||
829 | 832 | ||
830 | if (hdev->conn_hash.sco_num > 0) { | 833 | if (hdev->conn_hash.sco_num > 0) { |
831 | if (!test_bit(BTUSB_DID_ISO_RESUME, &data->flags)) { | 834 | if (!test_bit(BTUSB_DID_ISO_RESUME, &data->flags)) { |
832 | err = usb_autopm_get_interface(data->isoc); | 835 | err = usb_autopm_get_interface(data->isoc ? data->isoc : data->intf); |
833 | if (err < 0) { | 836 | if (err < 0) { |
834 | clear_bit(BTUSB_ISOC_RUNNING, &data->flags); | 837 | clear_bit(BTUSB_ISOC_RUNNING, &data->flags); |
835 | usb_kill_anchored_urbs(&data->isoc_anchor); | 838 | usb_kill_anchored_urbs(&data->isoc_anchor); |
@@ -858,7 +861,7 @@ static void btusb_work(struct work_struct *work) | |||
858 | 861 | ||
859 | __set_isoc_interface(hdev, 0); | 862 | __set_isoc_interface(hdev, 0); |
860 | if (test_and_clear_bit(BTUSB_DID_ISO_RESUME, &data->flags)) | 863 | if (test_and_clear_bit(BTUSB_DID_ISO_RESUME, &data->flags)) |
861 | usb_autopm_put_interface(data->isoc); | 864 | usb_autopm_put_interface(data->isoc ? data->isoc : data->intf); |
862 | } | 865 | } |
863 | } | 866 | } |
864 | 867 | ||
@@ -1041,8 +1044,6 @@ static int btusb_probe(struct usb_interface *intf, | |||
1041 | 1044 | ||
1042 | usb_set_intfdata(intf, data); | 1045 | usb_set_intfdata(intf, data); |
1043 | 1046 | ||
1044 | usb_enable_autosuspend(interface_to_usbdev(intf)); | ||
1045 | |||
1046 | return 0; | 1047 | return 0; |
1047 | } | 1048 | } |
1048 | 1049 | ||
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c index 9252e85706ef..780498d76581 100644 --- a/drivers/char/agp/amd64-agp.c +++ b/drivers/char/agp/amd64-agp.c | |||
@@ -773,18 +773,23 @@ int __init agp_amd64_init(void) | |||
773 | #else | 773 | #else |
774 | printk(KERN_INFO PFX "You can boot with agp=try_unsupported\n"); | 774 | printk(KERN_INFO PFX "You can boot with agp=try_unsupported\n"); |
775 | #endif | 775 | #endif |
776 | pci_unregister_driver(&agp_amd64_pci_driver); | ||
776 | return -ENODEV; | 777 | return -ENODEV; |
777 | } | 778 | } |
778 | 779 | ||
779 | /* First check that we have at least one AMD64 NB */ | 780 | /* First check that we have at least one AMD64 NB */ |
780 | if (!pci_dev_present(amd_nb_misc_ids)) | 781 | if (!pci_dev_present(amd_nb_misc_ids)) { |
782 | pci_unregister_driver(&agp_amd64_pci_driver); | ||
781 | return -ENODEV; | 783 | return -ENODEV; |
784 | } | ||
782 | 785 | ||
783 | /* Look for any AGP bridge */ | 786 | /* Look for any AGP bridge */ |
784 | agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table; | 787 | agp_amd64_pci_driver.id_table = agp_amd64_pci_promisc_table; |
785 | err = driver_attach(&agp_amd64_pci_driver.driver); | 788 | err = driver_attach(&agp_amd64_pci_driver.driver); |
786 | if (err == 0 && agp_bridges_found == 0) | 789 | if (err == 0 && agp_bridges_found == 0) { |
790 | pci_unregister_driver(&agp_amd64_pci_driver); | ||
787 | err = -ENODEV; | 791 | err = -ENODEV; |
792 | } | ||
788 | } | 793 | } |
789 | return err; | 794 | return err; |
790 | } | 795 | } |
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h index c195bfeade11..5feebe2800e9 100644 --- a/drivers/char/agp/intel-agp.h +++ b/drivers/char/agp/intel-agp.h | |||
@@ -130,6 +130,7 @@ | |||
130 | #define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) | 130 | #define INTEL_GMCH_GMS_STOLEN_352M (0xd << 4) |
131 | 131 | ||
132 | #define I915_IFPADDR 0x60 | 132 | #define I915_IFPADDR 0x60 |
133 | #define I830_HIC 0x70 | ||
133 | 134 | ||
134 | /* Intel 965G registers */ | 135 | /* Intel 965G registers */ |
135 | #define I965_MSAC 0x62 | 136 | #define I965_MSAC 0x62 |
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index fab3d3265adb..0d09b537bb9a 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/pagemap.h> | 22 | #include <linux/pagemap.h> |
23 | #include <linux/agp_backend.h> | 23 | #include <linux/agp_backend.h> |
24 | #include <linux/delay.h> | ||
24 | #include <asm/smp.h> | 25 | #include <asm/smp.h> |
25 | #include "agp.h" | 26 | #include "agp.h" |
26 | #include "intel-agp.h" | 27 | #include "intel-agp.h" |
@@ -70,12 +71,8 @@ static struct _intel_private { | |||
70 | u32 __iomem *gtt; /* I915G */ | 71 | u32 __iomem *gtt; /* I915G */ |
71 | bool clear_fake_agp; /* on first access via agp, fill with scratch */ | 72 | bool clear_fake_agp; /* on first access via agp, fill with scratch */ |
72 | int num_dcache_entries; | 73 | int num_dcache_entries; |
73 | union { | 74 | void __iomem *i9xx_flush_page; |
74 | void __iomem *i9xx_flush_page; | ||
75 | void *i8xx_flush_page; | ||
76 | }; | ||
77 | char *i81x_gtt_table; | 75 | char *i81x_gtt_table; |
78 | struct page *i8xx_page; | ||
79 | struct resource ifp_resource; | 76 | struct resource ifp_resource; |
80 | int resource_valid; | 77 | int resource_valid; |
81 | struct page *scratch_page; | 78 | struct page *scratch_page; |
@@ -722,28 +719,6 @@ static int intel_fake_agp_fetch_size(void) | |||
722 | 719 | ||
723 | static void i830_cleanup(void) | 720 | static void i830_cleanup(void) |
724 | { | 721 | { |
725 | if (intel_private.i8xx_flush_page) { | ||
726 | kunmap(intel_private.i8xx_flush_page); | ||
727 | intel_private.i8xx_flush_page = NULL; | ||
728 | } | ||
729 | |||
730 | __free_page(intel_private.i8xx_page); | ||
731 | intel_private.i8xx_page = NULL; | ||
732 | } | ||
733 | |||
734 | static void intel_i830_setup_flush(void) | ||
735 | { | ||
736 | /* return if we've already set the flush mechanism up */ | ||
737 | if (intel_private.i8xx_page) | ||
738 | return; | ||
739 | |||
740 | intel_private.i8xx_page = alloc_page(GFP_KERNEL); | ||
741 | if (!intel_private.i8xx_page) | ||
742 | return; | ||
743 | |||
744 | intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page); | ||
745 | if (!intel_private.i8xx_flush_page) | ||
746 | i830_cleanup(); | ||
747 | } | 722 | } |
748 | 723 | ||
749 | /* The chipset_flush interface needs to get data that has already been | 724 | /* The chipset_flush interface needs to get data that has already been |
@@ -758,14 +733,27 @@ static void intel_i830_setup_flush(void) | |||
758 | */ | 733 | */ |
759 | static void i830_chipset_flush(void) | 734 | static void i830_chipset_flush(void) |
760 | { | 735 | { |
761 | unsigned int *pg = intel_private.i8xx_flush_page; | 736 | unsigned long timeout = jiffies + msecs_to_jiffies(1000); |
737 | |||
738 | /* Forcibly evict everything from the CPU write buffers. | ||
739 | * clflush appears to be insufficient. | ||
740 | */ | ||
741 | wbinvd_on_all_cpus(); | ||
742 | |||
743 | /* Now we've only seen documents for this magic bit on 855GM, | ||
744 | * we hope it exists for the other gen2 chipsets... | ||
745 | * | ||
746 | * Also works as advertised on my 845G. | ||
747 | */ | ||
748 | writel(readl(intel_private.registers+I830_HIC) | (1<<31), | ||
749 | intel_private.registers+I830_HIC); | ||
762 | 750 | ||
763 | memset(pg, 0, 1024); | 751 | while (readl(intel_private.registers+I830_HIC) & (1<<31)) { |
752 | if (time_after(jiffies, timeout)) | ||
753 | break; | ||
764 | 754 | ||
765 | if (cpu_has_clflush) | 755 | udelay(50); |
766 | clflush_cache_range(pg, 1024); | 756 | } |
767 | else if (wbinvd_on_all_cpus() != 0) | ||
768 | printk(KERN_ERR "Timed out waiting for cache flush.\n"); | ||
769 | } | 757 | } |
770 | 758 | ||
771 | static void i830_write_entry(dma_addr_t addr, unsigned int entry, | 759 | static void i830_write_entry(dma_addr_t addr, unsigned int entry, |
@@ -849,8 +837,6 @@ static int i830_setup(void) | |||
849 | 837 | ||
850 | intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE; | 838 | intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE; |
851 | 839 | ||
852 | intel_i830_setup_flush(); | ||
853 | |||
854 | return 0; | 840 | return 0; |
855 | } | 841 | } |
856 | 842 | ||
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 7855f9f45b8e..62787e30d508 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
@@ -900,6 +900,14 @@ static void sender(void *send_info, | |||
900 | printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec); | 900 | printk("**Enqueue: %d.%9.9d\n", t.tv_sec, t.tv_usec); |
901 | #endif | 901 | #endif |
902 | 902 | ||
903 | /* | ||
904 | * last_timeout_jiffies is updated here to avoid | ||
905 | * smi_timeout() handler passing very large time_diff | ||
906 | * value to smi_event_handler() that causes | ||
907 | * the send command to abort. | ||
908 | */ | ||
909 | smi_info->last_timeout_jiffies = jiffies; | ||
910 | |||
903 | mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES); | 911 | mod_timer(&smi_info->si_timer, jiffies + SI_TIMEOUT_JIFFIES); |
904 | 912 | ||
905 | if (smi_info->thread) | 913 | if (smi_info->thread) |
diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c index 777181a2e603..bcbbc71febb7 100644 --- a/drivers/char/pcmcia/cm4000_cs.c +++ b/drivers/char/pcmcia/cm4000_cs.c | |||
@@ -830,8 +830,7 @@ static void monitor_card(unsigned long p) | |||
830 | test_bit(IS_ANY_T1, &dev->flags))) { | 830 | test_bit(IS_ANY_T1, &dev->flags))) { |
831 | DEBUGP(4, dev, "Perform AUTOPPS\n"); | 831 | DEBUGP(4, dev, "Perform AUTOPPS\n"); |
832 | set_bit(IS_AUTOPPS_ACT, &dev->flags); | 832 | set_bit(IS_AUTOPPS_ACT, &dev->flags); |
833 | ptsreq.protocol = ptsreq.protocol = | 833 | ptsreq.protocol = (0x01 << dev->proto); |
834 | (0x01 << dev->proto); | ||
835 | ptsreq.flags = 0x01; | 834 | ptsreq.flags = 0x01; |
836 | ptsreq.pts1 = 0x00; | 835 | ptsreq.pts1 = 0x00; |
837 | ptsreq.pts2 = 0x00; | 836 | ptsreq.pts2 = 0x00; |
diff --git a/drivers/char/pcmcia/ipwireless/main.c b/drivers/char/pcmcia/ipwireless/main.c index 94b8eb4d691d..444155a305ae 100644 --- a/drivers/char/pcmcia/ipwireless/main.c +++ b/drivers/char/pcmcia/ipwireless/main.c | |||
@@ -78,7 +78,6 @@ static void signalled_reboot_callback(void *callback_data) | |||
78 | static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data) | 78 | static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data) |
79 | { | 79 | { |
80 | struct ipw_dev *ipw = priv_data; | 80 | struct ipw_dev *ipw = priv_data; |
81 | struct resource *io_resource; | ||
82 | int ret; | 81 | int ret; |
83 | 82 | ||
84 | p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; | 83 | p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; |
@@ -92,9 +91,12 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data) | |||
92 | if (ret) | 91 | if (ret) |
93 | return ret; | 92 | return ret; |
94 | 93 | ||
95 | io_resource = request_region(p_dev->resource[0]->start, | 94 | if (!request_region(p_dev->resource[0]->start, |
96 | resource_size(p_dev->resource[0]), | 95 | resource_size(p_dev->resource[0]), |
97 | IPWIRELESS_PCCARD_NAME); | 96 | IPWIRELESS_PCCARD_NAME)) { |
97 | ret = -EBUSY; | ||
98 | goto exit; | ||
99 | } | ||
98 | 100 | ||
99 | p_dev->resource[2]->flags |= | 101 | p_dev->resource[2]->flags |= |
100 | WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM | WIN_ENABLE; | 102 | WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_CM | WIN_ENABLE; |
@@ -105,22 +107,25 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data) | |||
105 | 107 | ||
106 | ret = pcmcia_map_mem_page(p_dev, p_dev->resource[2], p_dev->card_addr); | 108 | ret = pcmcia_map_mem_page(p_dev, p_dev->resource[2], p_dev->card_addr); |
107 | if (ret != 0) | 109 | if (ret != 0) |
108 | goto exit2; | 110 | goto exit1; |
109 | 111 | ||
110 | ipw->is_v2_card = resource_size(p_dev->resource[2]) == 0x100; | 112 | ipw->is_v2_card = resource_size(p_dev->resource[2]) == 0x100; |
111 | 113 | ||
112 | ipw->attr_memory = ioremap(p_dev->resource[2]->start, | 114 | ipw->common_memory = ioremap(p_dev->resource[2]->start, |
113 | resource_size(p_dev->resource[2])); | 115 | resource_size(p_dev->resource[2])); |
114 | request_mem_region(p_dev->resource[2]->start, | 116 | if (!request_mem_region(p_dev->resource[2]->start, |
115 | resource_size(p_dev->resource[2]), | 117 | resource_size(p_dev->resource[2]), |
116 | IPWIRELESS_PCCARD_NAME); | 118 | IPWIRELESS_PCCARD_NAME)) { |
119 | ret = -EBUSY; | ||
120 | goto exit2; | ||
121 | } | ||
117 | 122 | ||
118 | p_dev->resource[3]->flags |= WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_AM | | 123 | p_dev->resource[3]->flags |= WIN_DATA_WIDTH_16 | WIN_MEMORY_TYPE_AM | |
119 | WIN_ENABLE; | 124 | WIN_ENABLE; |
120 | p_dev->resource[3]->end = 0; /* this used to be 0x1000 */ | 125 | p_dev->resource[3]->end = 0; /* this used to be 0x1000 */ |
121 | ret = pcmcia_request_window(p_dev, p_dev->resource[3], 0); | 126 | ret = pcmcia_request_window(p_dev, p_dev->resource[3], 0); |
122 | if (ret != 0) | 127 | if (ret != 0) |
123 | goto exit2; | 128 | goto exit3; |
124 | 129 | ||
125 | ret = pcmcia_map_mem_page(p_dev, p_dev->resource[3], 0); | 130 | ret = pcmcia_map_mem_page(p_dev, p_dev->resource[3], 0); |
126 | if (ret != 0) | 131 | if (ret != 0) |
@@ -128,23 +133,28 @@ static int ipwireless_probe(struct pcmcia_device *p_dev, void *priv_data) | |||
128 | 133 | ||
129 | ipw->attr_memory = ioremap(p_dev->resource[3]->start, | 134 | ipw->attr_memory = ioremap(p_dev->resource[3]->start, |
130 | resource_size(p_dev->resource[3])); | 135 | resource_size(p_dev->resource[3])); |
131 | request_mem_region(p_dev->resource[3]->start, | 136 | if (!request_mem_region(p_dev->resource[3]->start, |
132 | resource_size(p_dev->resource[3]), | 137 | resource_size(p_dev->resource[3]), |
133 | IPWIRELESS_PCCARD_NAME); | 138 | IPWIRELESS_PCCARD_NAME)) { |
139 | ret = -EBUSY; | ||
140 | goto exit4; | ||
141 | } | ||
134 | 142 | ||
135 | return 0; | 143 | return 0; |
136 | 144 | ||
145 | exit4: | ||
146 | iounmap(ipw->attr_memory); | ||
137 | exit3: | 147 | exit3: |
148 | release_mem_region(p_dev->resource[2]->start, | ||
149 | resource_size(p_dev->resource[2])); | ||
138 | exit2: | 150 | exit2: |
139 | if (ipw->common_memory) { | 151 | iounmap(ipw->common_memory); |
140 | release_mem_region(p_dev->resource[2]->start, | ||
141 | resource_size(p_dev->resource[2])); | ||
142 | iounmap(ipw->common_memory); | ||
143 | } | ||
144 | exit1: | 152 | exit1: |
145 | release_resource(io_resource); | 153 | release_region(p_dev->resource[0]->start, |
154 | resource_size(p_dev->resource[0])); | ||
155 | exit: | ||
146 | pcmcia_disable_device(p_dev); | 156 | pcmcia_disable_device(p_dev); |
147 | return -1; | 157 | return ret; |
148 | } | 158 | } |
149 | 159 | ||
150 | static int config_ipwireless(struct ipw_dev *ipw) | 160 | static int config_ipwireless(struct ipw_dev *ipw) |
@@ -219,6 +229,8 @@ exit: | |||
219 | 229 | ||
220 | static void release_ipwireless(struct ipw_dev *ipw) | 230 | static void release_ipwireless(struct ipw_dev *ipw) |
221 | { | 231 | { |
232 | release_region(ipw->link->resource[0]->start, | ||
233 | resource_size(ipw->link->resource[0])); | ||
222 | if (ipw->common_memory) { | 234 | if (ipw->common_memory) { |
223 | release_mem_region(ipw->link->resource[2]->start, | 235 | release_mem_region(ipw->link->resource[2]->start, |
224 | resource_size(ipw->link->resource[2])); | 236 | resource_size(ipw->link->resource[2])); |
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index 36e0fa161c2b..1f46f1cd9225 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c | |||
@@ -364,14 +364,12 @@ unsigned long tpm_calc_ordinal_duration(struct tpm_chip *chip, | |||
364 | tpm_protected_ordinal_duration[ordinal & | 364 | tpm_protected_ordinal_duration[ordinal & |
365 | TPM_PROTECTED_ORDINAL_MASK]; | 365 | TPM_PROTECTED_ORDINAL_MASK]; |
366 | 366 | ||
367 | if (duration_idx != TPM_UNDEFINED) { | 367 | if (duration_idx != TPM_UNDEFINED) |
368 | duration = chip->vendor.duration[duration_idx]; | 368 | duration = chip->vendor.duration[duration_idx]; |
369 | /* if duration is 0, it's because chip->vendor.duration wasn't */ | 369 | if (duration <= 0) |
370 | /* filled yet, so we set the lowest timeout just to give enough */ | ||
371 | /* time for tpm_get_timeouts() to succeed */ | ||
372 | return (duration <= 0 ? HZ : duration); | ||
373 | } else | ||
374 | return 2 * 60 * HZ; | 370 | return 2 * 60 * HZ; |
371 | else | ||
372 | return duration; | ||
375 | } | 373 | } |
376 | EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration); | 374 | EXPORT_SYMBOL_GPL(tpm_calc_ordinal_duration); |
377 | 375 | ||
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 490393186338..84b164d1eb2b 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
@@ -388,6 +388,10 @@ static void discard_port_data(struct port *port) | |||
388 | unsigned int len; | 388 | unsigned int len; |
389 | int ret; | 389 | int ret; |
390 | 390 | ||
391 | if (!port->portdev) { | ||
392 | /* Device has been unplugged. vqs are already gone. */ | ||
393 | return; | ||
394 | } | ||
391 | vq = port->in_vq; | 395 | vq = port->in_vq; |
392 | if (port->inbuf) | 396 | if (port->inbuf) |
393 | buf = port->inbuf; | 397 | buf = port->inbuf; |
@@ -470,6 +474,10 @@ static void reclaim_consumed_buffers(struct port *port) | |||
470 | void *buf; | 474 | void *buf; |
471 | unsigned int len; | 475 | unsigned int len; |
472 | 476 | ||
477 | if (!port->portdev) { | ||
478 | /* Device has been unplugged. vqs are already gone. */ | ||
479 | return; | ||
480 | } | ||
473 | while ((buf = virtqueue_get_buf(port->out_vq, &len))) { | 481 | while ((buf = virtqueue_get_buf(port->out_vq, &len))) { |
474 | kfree(buf); | 482 | kfree(buf); |
475 | port->outvq_full = false; | 483 | port->outvq_full = false; |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 1109f6848a43..5cb4d09919d6 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -1919,8 +1919,10 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) | |||
1919 | 1919 | ||
1920 | ret = sysdev_driver_register(&cpu_sysdev_class, | 1920 | ret = sysdev_driver_register(&cpu_sysdev_class, |
1921 | &cpufreq_sysdev_driver); | 1921 | &cpufreq_sysdev_driver); |
1922 | if (ret) | ||
1923 | goto err_null_driver; | ||
1922 | 1924 | ||
1923 | if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) { | 1925 | if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) { |
1924 | int i; | 1926 | int i; |
1925 | ret = -ENODEV; | 1927 | ret = -ENODEV; |
1926 | 1928 | ||
@@ -1935,21 +1937,22 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) | |||
1935 | if (ret) { | 1937 | if (ret) { |
1936 | dprintk("no CPU initialized for driver %s\n", | 1938 | dprintk("no CPU initialized for driver %s\n", |
1937 | driver_data->name); | 1939 | driver_data->name); |
1938 | sysdev_driver_unregister(&cpu_sysdev_class, | 1940 | goto err_sysdev_unreg; |
1939 | &cpufreq_sysdev_driver); | ||
1940 | |||
1941 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | ||
1942 | cpufreq_driver = NULL; | ||
1943 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
1944 | } | 1941 | } |
1945 | } | 1942 | } |
1946 | 1943 | ||
1947 | if (!ret) { | 1944 | register_hotcpu_notifier(&cpufreq_cpu_notifier); |
1948 | register_hotcpu_notifier(&cpufreq_cpu_notifier); | 1945 | dprintk("driver %s up and running\n", driver_data->name); |
1949 | dprintk("driver %s up and running\n", driver_data->name); | 1946 | cpufreq_debug_enable_ratelimit(); |
1950 | cpufreq_debug_enable_ratelimit(); | ||
1951 | } | ||
1952 | 1947 | ||
1948 | return 0; | ||
1949 | err_sysdev_unreg: | ||
1950 | sysdev_driver_unregister(&cpu_sysdev_class, | ||
1951 | &cpufreq_sysdev_driver); | ||
1952 | err_null_driver: | ||
1953 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | ||
1954 | cpufreq_driver = NULL; | ||
1955 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
1953 | return ret; | 1956 | return ret; |
1954 | } | 1957 | } |
1955 | EXPORT_SYMBOL_GPL(cpufreq_register_driver); | 1958 | EXPORT_SYMBOL_GPL(cpufreq_register_driver); |
diff --git a/drivers/gpio/ml_ioh_gpio.c b/drivers/gpio/ml_ioh_gpio.c index cead8e6ff345..7f6f01a4b145 100644 --- a/drivers/gpio/ml_ioh_gpio.c +++ b/drivers/gpio/ml_ioh_gpio.c | |||
@@ -326,6 +326,7 @@ static DEFINE_PCI_DEVICE_TABLE(ioh_gpio_pcidev_id) = { | |||
326 | { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x802E) }, | 326 | { PCI_DEVICE(PCI_VENDOR_ID_ROHM, 0x802E) }, |
327 | { 0, } | 327 | { 0, } |
328 | }; | 328 | }; |
329 | MODULE_DEVICE_TABLE(pci, ioh_gpio_pcidev_id); | ||
329 | 330 | ||
330 | static struct pci_driver ioh_gpio_driver = { | 331 | static struct pci_driver ioh_gpio_driver = { |
331 | .name = "ml_ioh_gpio", | 332 | .name = "ml_ioh_gpio", |
diff --git a/drivers/gpio/pch_gpio.c b/drivers/gpio/pch_gpio.c index 0eba0a75c804..2c6af8705103 100644 --- a/drivers/gpio/pch_gpio.c +++ b/drivers/gpio/pch_gpio.c | |||
@@ -286,6 +286,7 @@ static DEFINE_PCI_DEVICE_TABLE(pch_gpio_pcidev_id) = { | |||
286 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803) }, | 286 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x8803) }, |
287 | { 0, } | 287 | { 0, } |
288 | }; | 288 | }; |
289 | MODULE_DEVICE_TABLE(pci, pch_gpio_pcidev_id); | ||
289 | 290 | ||
290 | static struct pci_driver pch_gpio_driver = { | 291 | static struct pci_driver pch_gpio_driver = { |
291 | .name = "pch_gpio", | 292 | .name = "pch_gpio", |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 6977a1ce9d98..f73ef4390db6 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
@@ -672,7 +672,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) | |||
672 | struct drm_crtc_helper_funcs *crtc_funcs; | 672 | struct drm_crtc_helper_funcs *crtc_funcs; |
673 | u16 *red, *green, *blue, *transp; | 673 | u16 *red, *green, *blue, *transp; |
674 | struct drm_crtc *crtc; | 674 | struct drm_crtc *crtc; |
675 | int i, rc = 0; | 675 | int i, j, rc = 0; |
676 | int start; | 676 | int start; |
677 | 677 | ||
678 | for (i = 0; i < fb_helper->crtc_count; i++) { | 678 | for (i = 0; i < fb_helper->crtc_count; i++) { |
@@ -685,7 +685,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) | |||
685 | transp = cmap->transp; | 685 | transp = cmap->transp; |
686 | start = cmap->start; | 686 | start = cmap->start; |
687 | 687 | ||
688 | for (i = 0; i < cmap->len; i++) { | 688 | for (j = 0; j < cmap->len; j++) { |
689 | u16 hred, hgreen, hblue, htransp = 0xffff; | 689 | u16 hred, hgreen, hblue, htransp = 0xffff; |
690 | 690 | ||
691 | hred = *red++; | 691 | hred = *red++; |
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 3dadfa2a8528..28d1d3c24d65 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c | |||
@@ -164,8 +164,10 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc) | |||
164 | * available. In that case we can't account for this and just | 164 | * available. In that case we can't account for this and just |
165 | * hope for the best. | 165 | * hope for the best. |
166 | */ | 166 | */ |
167 | if ((vblrc > 0) && (abs(diff_ns) > 1000000)) | 167 | if ((vblrc > 0) && (abs64(diff_ns) > 1000000)) { |
168 | atomic_inc(&dev->_vblank_count[crtc]); | 168 | atomic_inc(&dev->_vblank_count[crtc]); |
169 | smp_mb__after_atomic_inc(); | ||
170 | } | ||
169 | 171 | ||
170 | /* Invalidate all timestamps while vblank irq's are off. */ | 172 | /* Invalidate all timestamps while vblank irq's are off. */ |
171 | clear_vblank_timestamps(dev, crtc); | 173 | clear_vblank_timestamps(dev, crtc); |
@@ -491,6 +493,12 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc) | |||
491 | /* Dot clock in Hz: */ | 493 | /* Dot clock in Hz: */ |
492 | dotclock = (u64) crtc->hwmode.clock * 1000; | 494 | dotclock = (u64) crtc->hwmode.clock * 1000; |
493 | 495 | ||
496 | /* Fields of interlaced scanout modes are only halve a frame duration. | ||
497 | * Double the dotclock to get halve the frame-/line-/pixelduration. | ||
498 | */ | ||
499 | if (crtc->hwmode.flags & DRM_MODE_FLAG_INTERLACE) | ||
500 | dotclock *= 2; | ||
501 | |||
494 | /* Valid dotclock? */ | 502 | /* Valid dotclock? */ |
495 | if (dotclock > 0) { | 503 | if (dotclock > 0) { |
496 | /* Convert scanline length in pixels and video dot clock to | 504 | /* Convert scanline length in pixels and video dot clock to |
@@ -603,14 +611,6 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, | |||
603 | return -EAGAIN; | 611 | return -EAGAIN; |
604 | } | 612 | } |
605 | 613 | ||
606 | /* Don't know yet how to handle interlaced or | ||
607 | * double scan modes. Just no-op for now. | ||
608 | */ | ||
609 | if (mode->flags & (DRM_MODE_FLAG_INTERLACE | DRM_MODE_FLAG_DBLSCAN)) { | ||
610 | DRM_DEBUG("crtc %d: Noop due to unsupported mode.\n", crtc); | ||
611 | return -ENOTSUPP; | ||
612 | } | ||
613 | |||
614 | /* Get current scanout position with system timestamp. | 614 | /* Get current scanout position with system timestamp. |
615 | * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times | 615 | * Repeat query up to DRM_TIMESTAMP_MAXRETRIES times |
616 | * if single query takes longer than max_error nanoseconds. | 616 | * if single query takes longer than max_error nanoseconds. |
@@ -858,10 +858,11 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc) | |||
858 | if (rc) { | 858 | if (rc) { |
859 | tslot = atomic_read(&dev->_vblank_count[crtc]) + diff; | 859 | tslot = atomic_read(&dev->_vblank_count[crtc]) + diff; |
860 | vblanktimestamp(dev, crtc, tslot) = t_vblank; | 860 | vblanktimestamp(dev, crtc, tslot) = t_vblank; |
861 | smp_wmb(); | ||
862 | } | 861 | } |
863 | 862 | ||
863 | smp_mb__before_atomic_inc(); | ||
864 | atomic_add(diff, &dev->_vblank_count[crtc]); | 864 | atomic_add(diff, &dev->_vblank_count[crtc]); |
865 | smp_mb__after_atomic_inc(); | ||
865 | } | 866 | } |
866 | 867 | ||
867 | /** | 868 | /** |
@@ -1011,7 +1012,8 @@ int drm_modeset_ctl(struct drm_device *dev, void *data, | |||
1011 | struct drm_file *file_priv) | 1012 | struct drm_file *file_priv) |
1012 | { | 1013 | { |
1013 | struct drm_modeset_ctl *modeset = data; | 1014 | struct drm_modeset_ctl *modeset = data; |
1014 | int crtc, ret = 0; | 1015 | int ret = 0; |
1016 | unsigned int crtc; | ||
1015 | 1017 | ||
1016 | /* If drm_vblank_init() hasn't been called yet, just no-op */ | 1018 | /* If drm_vblank_init() hasn't been called yet, just no-op */ |
1017 | if (!dev->num_crtcs) | 1019 | if (!dev->num_crtcs) |
@@ -1293,15 +1295,16 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc) | |||
1293 | * e.g., due to spurious vblank interrupts. We need to | 1295 | * e.g., due to spurious vblank interrupts. We need to |
1294 | * ignore those for accounting. | 1296 | * ignore those for accounting. |
1295 | */ | 1297 | */ |
1296 | if (abs(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) { | 1298 | if (abs64(diff_ns) > DRM_REDUNDANT_VBLIRQ_THRESH_NS) { |
1297 | /* Store new timestamp in ringbuffer. */ | 1299 | /* Store new timestamp in ringbuffer. */ |
1298 | vblanktimestamp(dev, crtc, vblcount + 1) = tvblank; | 1300 | vblanktimestamp(dev, crtc, vblcount + 1) = tvblank; |
1299 | smp_wmb(); | ||
1300 | 1301 | ||
1301 | /* Increment cooked vblank count. This also atomically commits | 1302 | /* Increment cooked vblank count. This also atomically commits |
1302 | * the timestamp computed above. | 1303 | * the timestamp computed above. |
1303 | */ | 1304 | */ |
1305 | smp_mb__before_atomic_inc(); | ||
1304 | atomic_inc(&dev->_vblank_count[crtc]); | 1306 | atomic_inc(&dev->_vblank_count[crtc]); |
1307 | smp_mb__after_atomic_inc(); | ||
1305 | } else { | 1308 | } else { |
1306 | DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n", | 1309 | DRM_DEBUG("crtc %d: Redundant vblirq ignored. diff_ns = %d\n", |
1307 | crtc, (int) diff_ns); | 1310 | crtc, (int) diff_ns); |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 3601466c5502..4ff9b6cc973f 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -865,7 +865,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) | |||
865 | int max_freq; | 865 | int max_freq; |
866 | 866 | ||
867 | /* RPSTAT1 is in the GT power well */ | 867 | /* RPSTAT1 is in the GT power well */ |
868 | __gen6_force_wake_get(dev_priv); | 868 | __gen6_gt_force_wake_get(dev_priv); |
869 | 869 | ||
870 | seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); | 870 | seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); |
871 | seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1)); | 871 | seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1)); |
@@ -888,7 +888,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) | |||
888 | seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", | 888 | seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", |
889 | max_freq * 100); | 889 | max_freq * 100); |
890 | 890 | ||
891 | __gen6_force_wake_put(dev_priv); | 891 | __gen6_gt_force_wake_put(dev_priv); |
892 | } else { | 892 | } else { |
893 | seq_printf(m, "no P-state info available\n"); | 893 | seq_printf(m, "no P-state info available\n"); |
894 | } | 894 | } |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 17bd766f2081..e33d9be7df3b 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -1895,6 +1895,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
1895 | if (IS_GEN2(dev)) | 1895 | if (IS_GEN2(dev)) |
1896 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); | 1896 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); |
1897 | 1897 | ||
1898 | /* 965GM sometimes incorrectly writes to hardware status page (HWS) | ||
1899 | * using 32bit addressing, overwriting memory if HWS is located | ||
1900 | * above 4GB. | ||
1901 | * | ||
1902 | * The documentation also mentions an issue with undefined | ||
1903 | * behaviour if any general state is accessed within a page above 4GB, | ||
1904 | * which also needs to be handled carefully. | ||
1905 | */ | ||
1906 | if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) | ||
1907 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); | ||
1908 | |||
1898 | mmio_bar = IS_GEN2(dev) ? 1 : 0; | 1909 | mmio_bar = IS_GEN2(dev) ? 1 : 0; |
1899 | dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0); | 1910 | dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0); |
1900 | if (!dev_priv->regs) { | 1911 | if (!dev_priv->regs) { |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 0ad533f06af9..22ec066adae6 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -46,6 +46,9 @@ module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); | |||
46 | unsigned int i915_powersave = 1; | 46 | unsigned int i915_powersave = 1; |
47 | module_param_named(powersave, i915_powersave, int, 0600); | 47 | module_param_named(powersave, i915_powersave, int, 0600); |
48 | 48 | ||
49 | unsigned int i915_semaphores = 0; | ||
50 | module_param_named(semaphores, i915_semaphores, int, 0600); | ||
51 | |||
49 | unsigned int i915_enable_rc6 = 0; | 52 | unsigned int i915_enable_rc6 = 0; |
50 | module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); | 53 | module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); |
51 | 54 | ||
@@ -254,7 +257,7 @@ void intel_detect_pch (struct drm_device *dev) | |||
254 | } | 257 | } |
255 | } | 258 | } |
256 | 259 | ||
257 | void __gen6_force_wake_get(struct drm_i915_private *dev_priv) | 260 | void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) |
258 | { | 261 | { |
259 | int count; | 262 | int count; |
260 | 263 | ||
@@ -270,12 +273,22 @@ void __gen6_force_wake_get(struct drm_i915_private *dev_priv) | |||
270 | udelay(10); | 273 | udelay(10); |
271 | } | 274 | } |
272 | 275 | ||
273 | void __gen6_force_wake_put(struct drm_i915_private *dev_priv) | 276 | void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) |
274 | { | 277 | { |
275 | I915_WRITE_NOTRACE(FORCEWAKE, 0); | 278 | I915_WRITE_NOTRACE(FORCEWAKE, 0); |
276 | POSTING_READ(FORCEWAKE); | 279 | POSTING_READ(FORCEWAKE); |
277 | } | 280 | } |
278 | 281 | ||
282 | void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) | ||
283 | { | ||
284 | int loop = 500; | ||
285 | u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); | ||
286 | while (fifo < 20 && loop--) { | ||
287 | udelay(10); | ||
288 | fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); | ||
289 | } | ||
290 | } | ||
291 | |||
279 | static int i915_drm_freeze(struct drm_device *dev) | 292 | static int i915_drm_freeze(struct drm_device *dev) |
280 | { | 293 | { |
281 | struct drm_i915_private *dev_priv = dev->dev_private; | 294 | struct drm_i915_private *dev_priv = dev->dev_private; |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 65dfe81d0035..456f40484838 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -956,6 +956,7 @@ extern struct drm_ioctl_desc i915_ioctls[]; | |||
956 | extern int i915_max_ioctl; | 956 | extern int i915_max_ioctl; |
957 | extern unsigned int i915_fbpercrtc; | 957 | extern unsigned int i915_fbpercrtc; |
958 | extern unsigned int i915_powersave; | 958 | extern unsigned int i915_powersave; |
959 | extern unsigned int i915_semaphores; | ||
959 | extern unsigned int i915_lvds_downclock; | 960 | extern unsigned int i915_lvds_downclock; |
960 | extern unsigned int i915_panel_use_ssc; | 961 | extern unsigned int i915_panel_use_ssc; |
961 | extern unsigned int i915_enable_rc6; | 962 | extern unsigned int i915_enable_rc6; |
@@ -1177,6 +1178,9 @@ void i915_gem_detach_phys_object(struct drm_device *dev, | |||
1177 | void i915_gem_free_all_phys_object(struct drm_device *dev); | 1178 | void i915_gem_free_all_phys_object(struct drm_device *dev); |
1178 | void i915_gem_release(struct drm_device *dev, struct drm_file *file); | 1179 | void i915_gem_release(struct drm_device *dev, struct drm_file *file); |
1179 | 1180 | ||
1181 | uint32_t | ||
1182 | i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj); | ||
1183 | |||
1180 | /* i915_gem_gtt.c */ | 1184 | /* i915_gem_gtt.c */ |
1181 | void i915_gem_restore_gtt_mappings(struct drm_device *dev); | 1185 | void i915_gem_restore_gtt_mappings(struct drm_device *dev); |
1182 | int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); | 1186 | int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); |
@@ -1353,22 +1357,32 @@ __i915_write(64, q) | |||
1353 | * must be set to prevent GT core from power down and stale values being | 1357 | * must be set to prevent GT core from power down and stale values being |
1354 | * returned. | 1358 | * returned. |
1355 | */ | 1359 | */ |
1356 | void __gen6_force_wake_get(struct drm_i915_private *dev_priv); | 1360 | void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); |
1357 | void __gen6_force_wake_put (struct drm_i915_private *dev_priv); | 1361 | void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); |
1358 | static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg) | 1362 | void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); |
1363 | |||
1364 | static inline u32 i915_gt_read(struct drm_i915_private *dev_priv, u32 reg) | ||
1359 | { | 1365 | { |
1360 | u32 val; | 1366 | u32 val; |
1361 | 1367 | ||
1362 | if (dev_priv->info->gen >= 6) { | 1368 | if (dev_priv->info->gen >= 6) { |
1363 | __gen6_force_wake_get(dev_priv); | 1369 | __gen6_gt_force_wake_get(dev_priv); |
1364 | val = I915_READ(reg); | 1370 | val = I915_READ(reg); |
1365 | __gen6_force_wake_put(dev_priv); | 1371 | __gen6_gt_force_wake_put(dev_priv); |
1366 | } else | 1372 | } else |
1367 | val = I915_READ(reg); | 1373 | val = I915_READ(reg); |
1368 | 1374 | ||
1369 | return val; | 1375 | return val; |
1370 | } | 1376 | } |
1371 | 1377 | ||
1378 | static inline void i915_gt_write(struct drm_i915_private *dev_priv, | ||
1379 | u32 reg, u32 val) | ||
1380 | { | ||
1381 | if (dev_priv->info->gen >= 6) | ||
1382 | __gen6_gt_wait_for_fifo(dev_priv); | ||
1383 | I915_WRITE(reg, val); | ||
1384 | } | ||
1385 | |||
1372 | static inline void | 1386 | static inline void |
1373 | i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len) | 1387 | i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len) |
1374 | { | 1388 | { |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index cf4f74c7c6fb..36e66cc5225e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1398,7 +1398,7 @@ i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj) | |||
1398 | * Return the required GTT alignment for an object, only taking into account | 1398 | * Return the required GTT alignment for an object, only taking into account |
1399 | * unfenced tiled surface requirements. | 1399 | * unfenced tiled surface requirements. |
1400 | */ | 1400 | */ |
1401 | static uint32_t | 1401 | uint32_t |
1402 | i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj) | 1402 | i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj) |
1403 | { | 1403 | { |
1404 | struct drm_device *dev = obj->base.dev; | 1404 | struct drm_device *dev = obj->base.dev; |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index d2f445e825f2..50ab1614571c 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -772,8 +772,8 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj, | |||
772 | if (from == NULL || to == from) | 772 | if (from == NULL || to == from) |
773 | return 0; | 773 | return 0; |
774 | 774 | ||
775 | /* XXX gpu semaphores are currently causing hard hangs on SNB mobile */ | 775 | /* XXX gpu semaphores are implicated in various hard hangs on SNB */ |
776 | if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev)) | 776 | if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores) |
777 | return i915_gem_object_wait_rendering(obj, true); | 777 | return i915_gem_object_wait_rendering(obj, true); |
778 | 778 | ||
779 | idx = intel_ring_sync_index(from, to); | 779 | idx = intel_ring_sync_index(from, to); |
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 22a32b9932c5..d64843e18df2 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -349,14 +349,27 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, | |||
349 | (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && | 349 | (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && |
350 | i915_gem_object_fence_ok(obj, args->tiling_mode)); | 350 | i915_gem_object_fence_ok(obj, args->tiling_mode)); |
351 | 351 | ||
352 | obj->tiling_changed = true; | 352 | /* Rebind if we need a change of alignment */ |
353 | obj->tiling_mode = args->tiling_mode; | 353 | if (!obj->map_and_fenceable) { |
354 | obj->stride = args->stride; | 354 | u32 unfenced_alignment = |
355 | i915_gem_get_unfenced_gtt_alignment(obj); | ||
356 | if (obj->gtt_offset & (unfenced_alignment - 1)) | ||
357 | ret = i915_gem_object_unbind(obj); | ||
358 | } | ||
359 | |||
360 | if (ret == 0) { | ||
361 | obj->tiling_changed = true; | ||
362 | obj->tiling_mode = args->tiling_mode; | ||
363 | obj->stride = args->stride; | ||
364 | } | ||
355 | } | 365 | } |
366 | /* we have to maintain this existing ABI... */ | ||
367 | args->stride = obj->stride; | ||
368 | args->tiling_mode = obj->tiling_mode; | ||
356 | drm_gem_object_unreference(&obj->base); | 369 | drm_gem_object_unreference(&obj->base); |
357 | mutex_unlock(&dev->struct_mutex); | 370 | mutex_unlock(&dev->struct_mutex); |
358 | 371 | ||
359 | return 0; | 372 | return ret; |
360 | } | 373 | } |
361 | 374 | ||
362 | /** | 375 | /** |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 97f946dcc1aa..8a9e08bf1cf7 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -316,6 +316,8 @@ static void i915_hotplug_work_func(struct work_struct *work) | |||
316 | struct drm_mode_config *mode_config = &dev->mode_config; | 316 | struct drm_mode_config *mode_config = &dev->mode_config; |
317 | struct intel_encoder *encoder; | 317 | struct intel_encoder *encoder; |
318 | 318 | ||
319 | DRM_DEBUG_KMS("running encoder hotplug functions\n"); | ||
320 | |||
319 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) | 321 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) |
320 | if (encoder->hot_plug) | 322 | if (encoder->hot_plug) |
321 | encoder->hot_plug(encoder); | 323 | encoder->hot_plug(encoder); |
@@ -1649,9 +1651,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1649 | } else { | 1651 | } else { |
1650 | hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | | 1652 | hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | |
1651 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; | 1653 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; |
1652 | hotplug_mask |= SDE_AUX_MASK | SDE_FDI_MASK | SDE_TRANS_MASK; | 1654 | hotplug_mask |= SDE_AUX_MASK; |
1653 | I915_WRITE(FDI_RXA_IMR, 0); | ||
1654 | I915_WRITE(FDI_RXB_IMR, 0); | ||
1655 | } | 1655 | } |
1656 | 1656 | ||
1657 | dev_priv->pch_irq_mask = ~hotplug_mask; | 1657 | dev_priv->pch_irq_mask = ~hotplug_mask; |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 729d4233b763..2abe240dae58 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -1553,7 +1553,17 @@ | |||
1553 | 1553 | ||
1554 | /* Backlight control */ | 1554 | /* Backlight control */ |
1555 | #define BLC_PWM_CTL 0x61254 | 1555 | #define BLC_PWM_CTL 0x61254 |
1556 | #define BACKLIGHT_MODULATION_FREQ_SHIFT (17) | ||
1556 | #define BLC_PWM_CTL2 0x61250 /* 965+ only */ | 1557 | #define BLC_PWM_CTL2 0x61250 /* 965+ only */ |
1558 | #define BLM_COMBINATION_MODE (1 << 30) | ||
1559 | /* | ||
1560 | * This is the most significant 15 bits of the number of backlight cycles in a | ||
1561 | * complete cycle of the modulated backlight control. | ||
1562 | * | ||
1563 | * The actual value is this field multiplied by two. | ||
1564 | */ | ||
1565 | #define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17) | ||
1566 | #define BLM_LEGACY_MODE (1 << 16) | ||
1557 | /* | 1567 | /* |
1558 | * This is the number of cycles out of the backlight modulation cycle for which | 1568 | * This is the number of cycles out of the backlight modulation cycle for which |
1559 | * the backlight is on. | 1569 | * the backlight is on. |
@@ -3261,6 +3271,8 @@ | |||
3261 | #define FORCEWAKE 0xA18C | 3271 | #define FORCEWAKE 0xA18C |
3262 | #define FORCEWAKE_ACK 0x130090 | 3272 | #define FORCEWAKE_ACK 0x130090 |
3263 | 3273 | ||
3274 | #define GT_FIFO_FREE_ENTRIES 0x120008 | ||
3275 | |||
3264 | #define GEN6_RPNSWREQ 0xA008 | 3276 | #define GEN6_RPNSWREQ 0xA008 |
3265 | #define GEN6_TURBO_DISABLE (1<<31) | 3277 | #define GEN6_TURBO_DISABLE (1<<31) |
3266 | #define GEN6_FREQUENCY(x) ((x)<<25) | 3278 | #define GEN6_FREQUENCY(x) ((x)<<25) |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 3b006536b3d2..49fb54fd9a18 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -1219,7 +1219,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev) | |||
1219 | u32 blt_ecoskpd; | 1219 | u32 blt_ecoskpd; |
1220 | 1220 | ||
1221 | /* Make sure blitter notifies FBC of writes */ | 1221 | /* Make sure blitter notifies FBC of writes */ |
1222 | __gen6_force_wake_get(dev_priv); | 1222 | __gen6_gt_force_wake_get(dev_priv); |
1223 | blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); | 1223 | blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); |
1224 | blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << | 1224 | blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << |
1225 | GEN6_BLITTER_LOCK_SHIFT; | 1225 | GEN6_BLITTER_LOCK_SHIFT; |
@@ -1230,7 +1230,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev) | |||
1230 | GEN6_BLITTER_LOCK_SHIFT); | 1230 | GEN6_BLITTER_LOCK_SHIFT); |
1231 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); | 1231 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); |
1232 | POSTING_READ(GEN6_BLITTER_ECOSKPD); | 1232 | POSTING_READ(GEN6_BLITTER_ECOSKPD); |
1233 | __gen6_force_wake_put(dev_priv); | 1233 | __gen6_gt_force_wake_put(dev_priv); |
1234 | } | 1234 | } |
1235 | 1235 | ||
1236 | static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | 1236 | static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) |
@@ -1630,19 +1630,19 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1630 | struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; | 1630 | struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; |
1631 | 1631 | ||
1632 | wait_event(dev_priv->pending_flip_queue, | 1632 | wait_event(dev_priv->pending_flip_queue, |
1633 | atomic_read(&dev_priv->mm.wedged) || | ||
1633 | atomic_read(&obj->pending_flip) == 0); | 1634 | atomic_read(&obj->pending_flip) == 0); |
1634 | 1635 | ||
1635 | /* Big Hammer, we also need to ensure that any pending | 1636 | /* Big Hammer, we also need to ensure that any pending |
1636 | * MI_WAIT_FOR_EVENT inside a user batch buffer on the | 1637 | * MI_WAIT_FOR_EVENT inside a user batch buffer on the |
1637 | * current scanout is retired before unpinning the old | 1638 | * current scanout is retired before unpinning the old |
1638 | * framebuffer. | 1639 | * framebuffer. |
1640 | * | ||
1641 | * This should only fail upon a hung GPU, in which case we | ||
1642 | * can safely continue. | ||
1639 | */ | 1643 | */ |
1640 | ret = i915_gem_object_flush_gpu(obj, false); | 1644 | ret = i915_gem_object_flush_gpu(obj, false); |
1641 | if (ret) { | 1645 | (void) ret; |
1642 | i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); | ||
1643 | mutex_unlock(&dev->struct_mutex); | ||
1644 | return ret; | ||
1645 | } | ||
1646 | } | 1646 | } |
1647 | 1647 | ||
1648 | ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, | 1648 | ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, |
@@ -2045,6 +2045,31 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) | |||
2045 | atomic_read(&obj->pending_flip) == 0); | 2045 | atomic_read(&obj->pending_flip) == 0); |
2046 | } | 2046 | } |
2047 | 2047 | ||
2048 | static bool intel_crtc_driving_pch(struct drm_crtc *crtc) | ||
2049 | { | ||
2050 | struct drm_device *dev = crtc->dev; | ||
2051 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
2052 | struct intel_encoder *encoder; | ||
2053 | |||
2054 | /* | ||
2055 | * If there's a non-PCH eDP on this crtc, it must be DP_A, and that | ||
2056 | * must be driven by its own crtc; no sharing is possible. | ||
2057 | */ | ||
2058 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { | ||
2059 | if (encoder->base.crtc != crtc) | ||
2060 | continue; | ||
2061 | |||
2062 | switch (encoder->type) { | ||
2063 | case INTEL_OUTPUT_EDP: | ||
2064 | if (!intel_encoder_is_pch_edp(&encoder->base)) | ||
2065 | return false; | ||
2066 | continue; | ||
2067 | } | ||
2068 | } | ||
2069 | |||
2070 | return true; | ||
2071 | } | ||
2072 | |||
2048 | static void ironlake_crtc_enable(struct drm_crtc *crtc) | 2073 | static void ironlake_crtc_enable(struct drm_crtc *crtc) |
2049 | { | 2074 | { |
2050 | struct drm_device *dev = crtc->dev; | 2075 | struct drm_device *dev = crtc->dev; |
@@ -2053,6 +2078,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2053 | int pipe = intel_crtc->pipe; | 2078 | int pipe = intel_crtc->pipe; |
2054 | int plane = intel_crtc->plane; | 2079 | int plane = intel_crtc->plane; |
2055 | u32 reg, temp; | 2080 | u32 reg, temp; |
2081 | bool is_pch_port = false; | ||
2056 | 2082 | ||
2057 | if (intel_crtc->active) | 2083 | if (intel_crtc->active) |
2058 | return; | 2084 | return; |
@@ -2066,7 +2092,56 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2066 | I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); | 2092 | I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); |
2067 | } | 2093 | } |
2068 | 2094 | ||
2069 | ironlake_fdi_enable(crtc); | 2095 | is_pch_port = intel_crtc_driving_pch(crtc); |
2096 | |||
2097 | if (is_pch_port) | ||
2098 | ironlake_fdi_enable(crtc); | ||
2099 | else { | ||
2100 | /* disable CPU FDI tx and PCH FDI rx */ | ||
2101 | reg = FDI_TX_CTL(pipe); | ||
2102 | temp = I915_READ(reg); | ||
2103 | I915_WRITE(reg, temp & ~FDI_TX_ENABLE); | ||
2104 | POSTING_READ(reg); | ||
2105 | |||
2106 | reg = FDI_RX_CTL(pipe); | ||
2107 | temp = I915_READ(reg); | ||
2108 | temp &= ~(0x7 << 16); | ||
2109 | temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; | ||
2110 | I915_WRITE(reg, temp & ~FDI_RX_ENABLE); | ||
2111 | |||
2112 | POSTING_READ(reg); | ||
2113 | udelay(100); | ||
2114 | |||
2115 | /* Ironlake workaround, disable clock pointer after downing FDI */ | ||
2116 | if (HAS_PCH_IBX(dev)) | ||
2117 | I915_WRITE(FDI_RX_CHICKEN(pipe), | ||
2118 | I915_READ(FDI_RX_CHICKEN(pipe) & | ||
2119 | ~FDI_RX_PHASE_SYNC_POINTER_ENABLE)); | ||
2120 | |||
2121 | /* still set train pattern 1 */ | ||
2122 | reg = FDI_TX_CTL(pipe); | ||
2123 | temp = I915_READ(reg); | ||
2124 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
2125 | temp |= FDI_LINK_TRAIN_PATTERN_1; | ||
2126 | I915_WRITE(reg, temp); | ||
2127 | |||
2128 | reg = FDI_RX_CTL(pipe); | ||
2129 | temp = I915_READ(reg); | ||
2130 | if (HAS_PCH_CPT(dev)) { | ||
2131 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | ||
2132 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; | ||
2133 | } else { | ||
2134 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
2135 | temp |= FDI_LINK_TRAIN_PATTERN_1; | ||
2136 | } | ||
2137 | /* BPC in FDI rx is consistent with that in PIPECONF */ | ||
2138 | temp &= ~(0x07 << 16); | ||
2139 | temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; | ||
2140 | I915_WRITE(reg, temp); | ||
2141 | |||
2142 | POSTING_READ(reg); | ||
2143 | udelay(100); | ||
2144 | } | ||
2070 | 2145 | ||
2071 | /* Enable panel fitting for LVDS */ | 2146 | /* Enable panel fitting for LVDS */ |
2072 | if (dev_priv->pch_pf_size && | 2147 | if (dev_priv->pch_pf_size && |
@@ -2100,6 +2175,10 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2100 | intel_flush_display_plane(dev, plane); | 2175 | intel_flush_display_plane(dev, plane); |
2101 | } | 2176 | } |
2102 | 2177 | ||
2178 | /* Skip the PCH stuff if possible */ | ||
2179 | if (!is_pch_port) | ||
2180 | goto done; | ||
2181 | |||
2103 | /* For PCH output, training FDI link */ | 2182 | /* For PCH output, training FDI link */ |
2104 | if (IS_GEN6(dev)) | 2183 | if (IS_GEN6(dev)) |
2105 | gen6_fdi_link_train(crtc); | 2184 | gen6_fdi_link_train(crtc); |
@@ -2184,7 +2263,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2184 | I915_WRITE(reg, temp | TRANS_ENABLE); | 2263 | I915_WRITE(reg, temp | TRANS_ENABLE); |
2185 | if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) | 2264 | if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) |
2186 | DRM_ERROR("failed to enable transcoder %d\n", pipe); | 2265 | DRM_ERROR("failed to enable transcoder %d\n", pipe); |
2187 | 2266 | done: | |
2188 | intel_crtc_load_lut(crtc); | 2267 | intel_crtc_load_lut(crtc); |
2189 | intel_update_fbc(dev); | 2268 | intel_update_fbc(dev); |
2190 | intel_crtc_update_cursor(crtc, true); | 2269 | intel_crtc_update_cursor(crtc, true); |
@@ -6203,7 +6282,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) | |||
6203 | * userspace... | 6282 | * userspace... |
6204 | */ | 6283 | */ |
6205 | I915_WRITE(GEN6_RC_STATE, 0); | 6284 | I915_WRITE(GEN6_RC_STATE, 0); |
6206 | __gen6_force_wake_get(dev_priv); | 6285 | __gen6_gt_force_wake_get(dev_priv); |
6207 | 6286 | ||
6208 | /* disable the counters and set deterministic thresholds */ | 6287 | /* disable the counters and set deterministic thresholds */ |
6209 | I915_WRITE(GEN6_RC_CONTROL, 0); | 6288 | I915_WRITE(GEN6_RC_CONTROL, 0); |
@@ -6301,7 +6380,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) | |||
6301 | /* enable all PM interrupts */ | 6380 | /* enable all PM interrupts */ |
6302 | I915_WRITE(GEN6_PMINTRMSK, 0); | 6381 | I915_WRITE(GEN6_PMINTRMSK, 0); |
6303 | 6382 | ||
6304 | __gen6_force_wake_put(dev_priv); | 6383 | __gen6_gt_force_wake_put(dev_priv); |
6305 | } | 6384 | } |
6306 | 6385 | ||
6307 | void intel_enable_clock_gating(struct drm_device *dev) | 6386 | void intel_enable_clock_gating(struct drm_device *dev) |
@@ -6496,7 +6575,7 @@ static void ironlake_disable_rc6(struct drm_device *dev) | |||
6496 | POSTING_READ(RSTDBYCTL); | 6575 | POSTING_READ(RSTDBYCTL); |
6497 | } | 6576 | } |
6498 | 6577 | ||
6499 | ironlake_disable_rc6(dev); | 6578 | ironlake_teardown_rc6(dev); |
6500 | } | 6579 | } |
6501 | 6580 | ||
6502 | static int ironlake_setup_rc6(struct drm_device *dev) | 6581 | static int ironlake_setup_rc6(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index d860abeda70f..f8f86e57df22 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -30,6 +30,8 @@ | |||
30 | 30 | ||
31 | #include "intel_drv.h" | 31 | #include "intel_drv.h" |
32 | 32 | ||
33 | #define PCI_LBPC 0xf4 /* legacy/combination backlight modes */ | ||
34 | |||
33 | void | 35 | void |
34 | intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, | 36 | intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, |
35 | struct drm_display_mode *adjusted_mode) | 37 | struct drm_display_mode *adjusted_mode) |
@@ -110,6 +112,19 @@ done: | |||
110 | dev_priv->pch_pf_size = (width << 16) | height; | 112 | dev_priv->pch_pf_size = (width << 16) | height; |
111 | } | 113 | } |
112 | 114 | ||
115 | static int is_backlight_combination_mode(struct drm_device *dev) | ||
116 | { | ||
117 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
118 | |||
119 | if (INTEL_INFO(dev)->gen >= 4) | ||
120 | return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE; | ||
121 | |||
122 | if (IS_GEN2(dev)) | ||
123 | return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE; | ||
124 | |||
125 | return 0; | ||
126 | } | ||
127 | |||
113 | static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv) | 128 | static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv) |
114 | { | 129 | { |
115 | u32 val; | 130 | u32 val; |
@@ -166,6 +181,9 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev) | |||
166 | if (INTEL_INFO(dev)->gen < 4) | 181 | if (INTEL_INFO(dev)->gen < 4) |
167 | max &= ~1; | 182 | max &= ~1; |
168 | } | 183 | } |
184 | |||
185 | if (is_backlight_combination_mode(dev)) | ||
186 | max *= 0xff; | ||
169 | } | 187 | } |
170 | 188 | ||
171 | DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max); | 189 | DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max); |
@@ -183,6 +201,14 @@ u32 intel_panel_get_backlight(struct drm_device *dev) | |||
183 | val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; | 201 | val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; |
184 | if (IS_PINEVIEW(dev)) | 202 | if (IS_PINEVIEW(dev)) |
185 | val >>= 1; | 203 | val >>= 1; |
204 | |||
205 | if (is_backlight_combination_mode(dev)){ | ||
206 | u8 lbpc; | ||
207 | |||
208 | val &= ~1; | ||
209 | pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc); | ||
210 | val *= lbpc; | ||
211 | } | ||
186 | } | 212 | } |
187 | 213 | ||
188 | DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); | 214 | DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); |
@@ -205,6 +231,16 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level) | |||
205 | 231 | ||
206 | if (HAS_PCH_SPLIT(dev)) | 232 | if (HAS_PCH_SPLIT(dev)) |
207 | return intel_pch_panel_set_backlight(dev, level); | 233 | return intel_pch_panel_set_backlight(dev, level); |
234 | |||
235 | if (is_backlight_combination_mode(dev)){ | ||
236 | u32 max = intel_panel_get_max_backlight(dev); | ||
237 | u8 lbpc; | ||
238 | |||
239 | lbpc = level * 0xfe / max + 1; | ||
240 | level /= lbpc; | ||
241 | pci_write_config_byte(dev->pdev, PCI_LBPC, lbpc); | ||
242 | } | ||
243 | |||
208 | tmp = I915_READ(BLC_PWM_CTL); | 244 | tmp = I915_READ(BLC_PWM_CTL); |
209 | if (IS_PINEVIEW(dev)) { | 245 | if (IS_PINEVIEW(dev)) { |
210 | tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1); | 246 | tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1); |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 6d6fde85a636..34306865a5df 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -14,22 +14,23 @@ struct intel_hw_status_page { | |||
14 | struct drm_i915_gem_object *obj; | 14 | struct drm_i915_gem_object *obj; |
15 | }; | 15 | }; |
16 | 16 | ||
17 | #define I915_RING_READ(reg) i915_safe_read(dev_priv, reg) | 17 | #define I915_RING_READ(reg) i915_gt_read(dev_priv, reg) |
18 | #define I915_RING_WRITE(reg, val) i915_gt_write(dev_priv, reg, val) | ||
18 | 19 | ||
19 | #define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base)) | 20 | #define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base)) |
20 | #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) | 21 | #define I915_WRITE_TAIL(ring, val) I915_RING_WRITE(RING_TAIL((ring)->mmio_base), val) |
21 | 22 | ||
22 | #define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base)) | 23 | #define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base)) |
23 | #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) | 24 | #define I915_WRITE_START(ring, val) I915_RING_WRITE(RING_START((ring)->mmio_base), val) |
24 | 25 | ||
25 | #define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base)) | 26 | #define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base)) |
26 | #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) | 27 | #define I915_WRITE_HEAD(ring, val) I915_RING_WRITE(RING_HEAD((ring)->mmio_base), val) |
27 | 28 | ||
28 | #define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base)) | 29 | #define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base)) |
29 | #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) | 30 | #define I915_WRITE_CTL(ring, val) I915_RING_WRITE(RING_CTL((ring)->mmio_base), val) |
30 | 31 | ||
31 | #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) | ||
32 | #define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base)) | 32 | #define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base)) |
33 | #define I915_WRITE_IMR(ring, val) I915_RING_WRITE(RING_IMR((ring)->mmio_base), val) | ||
33 | 34 | ||
34 | #define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base)) | 35 | #define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base)) |
35 | #define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base)) | 36 | #define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base)) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index d38a4d9f9b0b..a52184007f5f 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -49,7 +49,10 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) | |||
49 | DRM_ERROR("bo %p still attached to GEM object\n", bo); | 49 | DRM_ERROR("bo %p still attached to GEM object\n", bo); |
50 | 50 | ||
51 | nv10_mem_put_tile_region(dev, nvbo->tile, NULL); | 51 | nv10_mem_put_tile_region(dev, nvbo->tile, NULL); |
52 | nouveau_vm_put(&nvbo->vma); | 52 | if (nvbo->vma.node) { |
53 | nouveau_vm_unmap(&nvbo->vma); | ||
54 | nouveau_vm_put(&nvbo->vma); | ||
55 | } | ||
53 | kfree(nvbo); | 56 | kfree(nvbo); |
54 | } | 57 | } |
55 | 58 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c index 65699bfaaaea..b368ed74aad7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.c +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c | |||
@@ -83,7 +83,8 @@ nouveau_dma_init(struct nouveau_channel *chan) | |||
83 | return ret; | 83 | return ret; |
84 | 84 | ||
85 | /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */ | 85 | /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */ |
86 | ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy); | 86 | ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfd0, 0x1000, |
87 | &chan->m2mf_ntfy); | ||
87 | if (ret) | 88 | if (ret) |
88 | return ret; | 89 | return ret; |
89 | 90 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 9821fcacc3d2..982d70b12722 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
@@ -852,7 +852,8 @@ extern const struct ttm_mem_type_manager_func nouveau_vram_manager; | |||
852 | extern int nouveau_notifier_init_channel(struct nouveau_channel *); | 852 | extern int nouveau_notifier_init_channel(struct nouveau_channel *); |
853 | extern void nouveau_notifier_takedown_channel(struct nouveau_channel *); | 853 | extern void nouveau_notifier_takedown_channel(struct nouveau_channel *); |
854 | extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle, | 854 | extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle, |
855 | int cout, uint32_t *offset); | 855 | int cout, uint32_t start, uint32_t end, |
856 | uint32_t *offset); | ||
856 | extern int nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *); | 857 | extern int nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *); |
857 | extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data, | 858 | extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data, |
858 | struct drm_file *); | 859 | struct drm_file *); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 26347b7cd872..b0fb9bdcddb7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c | |||
@@ -725,8 +725,10 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man, | |||
725 | ret = vram->get(dev, mem->num_pages << PAGE_SHIFT, | 725 | ret = vram->get(dev, mem->num_pages << PAGE_SHIFT, |
726 | mem->page_alignment << PAGE_SHIFT, size_nc, | 726 | mem->page_alignment << PAGE_SHIFT, size_nc, |
727 | (nvbo->tile_flags >> 8) & 0xff, &node); | 727 | (nvbo->tile_flags >> 8) & 0xff, &node); |
728 | if (ret) | 728 | if (ret) { |
729 | return ret; | 729 | mem->mm_node = NULL; |
730 | return (ret == -ENOSPC) ? 0 : ret; | ||
731 | } | ||
730 | 732 | ||
731 | node->page_shift = 12; | 733 | node->page_shift = 12; |
732 | if (nvbo->vma.node) | 734 | if (nvbo->vma.node) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c index 8844b50c3e54..7609756b6faf 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mm.c +++ b/drivers/gpu/drm/nouveau/nouveau_mm.c | |||
@@ -123,7 +123,7 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc, | |||
123 | return 0; | 123 | return 0; |
124 | } | 124 | } |
125 | 125 | ||
126 | return -ENOMEM; | 126 | return -ENOSPC; |
127 | } | 127 | } |
128 | 128 | ||
129 | int | 129 | int |
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index fe29d604b820..5ea167623a82 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c | |||
@@ -96,7 +96,8 @@ nouveau_notifier_gpuobj_dtor(struct drm_device *dev, | |||
96 | 96 | ||
97 | int | 97 | int |
98 | nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, | 98 | nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, |
99 | int size, uint32_t *b_offset) | 99 | int size, uint32_t start, uint32_t end, |
100 | uint32_t *b_offset) | ||
100 | { | 101 | { |
101 | struct drm_device *dev = chan->dev; | 102 | struct drm_device *dev = chan->dev; |
102 | struct nouveau_gpuobj *nobj = NULL; | 103 | struct nouveau_gpuobj *nobj = NULL; |
@@ -104,9 +105,10 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, | |||
104 | uint32_t offset; | 105 | uint32_t offset; |
105 | int target, ret; | 106 | int target, ret; |
106 | 107 | ||
107 | mem = drm_mm_search_free(&chan->notifier_heap, size, 0, 0); | 108 | mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0, |
109 | start, end, 0); | ||
108 | if (mem) | 110 | if (mem) |
109 | mem = drm_mm_get_block(mem, size, 0); | 111 | mem = drm_mm_get_block_range(mem, size, 0, start, end); |
110 | if (!mem) { | 112 | if (!mem) { |
111 | NV_ERROR(dev, "Channel %d notifier block full\n", chan->id); | 113 | NV_ERROR(dev, "Channel %d notifier block full\n", chan->id); |
112 | return -ENOMEM; | 114 | return -ENOMEM; |
@@ -177,7 +179,8 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data, | |||
177 | if (IS_ERR(chan)) | 179 | if (IS_ERR(chan)) |
178 | return PTR_ERR(chan); | 180 | return PTR_ERR(chan); |
179 | 181 | ||
180 | ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset); | 182 | ret = nouveau_notifier_alloc(chan, na->handle, na->size, 0, 0x1000, |
183 | &na->offset); | ||
181 | nouveau_channel_put(&chan); | 184 | nouveau_channel_put(&chan); |
182 | return ret; | 185 | return ret; |
183 | } | 186 | } |
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index ea0041810ae3..e57caa2a00e3 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c | |||
@@ -403,16 +403,24 @@ nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj) | |||
403 | void | 403 | void |
404 | nv50_instmem_flush(struct drm_device *dev) | 404 | nv50_instmem_flush(struct drm_device *dev) |
405 | { | 405 | { |
406 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
407 | |||
408 | spin_lock(&dev_priv->ramin_lock); | ||
406 | nv_wr32(dev, 0x00330c, 0x00000001); | 409 | nv_wr32(dev, 0x00330c, 0x00000001); |
407 | if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000)) | 410 | if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000)) |
408 | NV_ERROR(dev, "PRAMIN flush timeout\n"); | 411 | NV_ERROR(dev, "PRAMIN flush timeout\n"); |
412 | spin_unlock(&dev_priv->ramin_lock); | ||
409 | } | 413 | } |
410 | 414 | ||
411 | void | 415 | void |
412 | nv84_instmem_flush(struct drm_device *dev) | 416 | nv84_instmem_flush(struct drm_device *dev) |
413 | { | 417 | { |
418 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
419 | |||
420 | spin_lock(&dev_priv->ramin_lock); | ||
414 | nv_wr32(dev, 0x070000, 0x00000001); | 421 | nv_wr32(dev, 0x070000, 0x00000001); |
415 | if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000)) | 422 | if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000)) |
416 | NV_ERROR(dev, "PRAMIN flush timeout\n"); | 423 | NV_ERROR(dev, "PRAMIN flush timeout\n"); |
424 | spin_unlock(&dev_priv->ramin_lock); | ||
417 | } | 425 | } |
418 | 426 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c index 459ff08241e5..6144156f255a 100644 --- a/drivers/gpu/drm/nouveau/nv50_vm.c +++ b/drivers/gpu/drm/nouveau/nv50_vm.c | |||
@@ -169,7 +169,11 @@ nv50_vm_flush(struct nouveau_vm *vm) | |||
169 | void | 169 | void |
170 | nv50_vm_flush_engine(struct drm_device *dev, int engine) | 170 | nv50_vm_flush_engine(struct drm_device *dev, int engine) |
171 | { | 171 | { |
172 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
173 | |||
174 | spin_lock(&dev_priv->ramin_lock); | ||
172 | nv_wr32(dev, 0x100c80, (engine << 16) | 1); | 175 | nv_wr32(dev, 0x100c80, (engine << 16) | 1); |
173 | if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) | 176 | if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) |
174 | NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); | 177 | NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); |
178 | spin_unlock(&dev_priv->ramin_lock); | ||
175 | } | 179 | } |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index d270b3ff896b..6140ea1de45a 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -2194,7 +2194,6 @@ int evergreen_mc_init(struct radeon_device *rdev) | |||
2194 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; | 2194 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE) * 1024 * 1024; |
2195 | } | 2195 | } |
2196 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | 2196 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
2197 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | ||
2198 | r700_vram_gtt_location(rdev, &rdev->mc); | 2197 | r700_vram_gtt_location(rdev, &rdev->mc); |
2199 | radeon_update_bandwidth_info(rdev); | 2198 | radeon_update_bandwidth_info(rdev); |
2200 | 2199 | ||
@@ -2934,7 +2933,7 @@ static int evergreen_startup(struct radeon_device *rdev) | |||
2934 | /* XXX: ontario has problems blitting to gart at the moment */ | 2933 | /* XXX: ontario has problems blitting to gart at the moment */ |
2935 | if (rdev->family == CHIP_PALM) { | 2934 | if (rdev->family == CHIP_PALM) { |
2936 | rdev->asic->copy = NULL; | 2935 | rdev->asic->copy = NULL; |
2937 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | 2936 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
2938 | } | 2937 | } |
2939 | 2938 | ||
2940 | /* allocate wb buffer */ | 2939 | /* allocate wb buffer */ |
diff --git a/drivers/gpu/drm/radeon/evergreen_blit_kms.c b/drivers/gpu/drm/radeon/evergreen_blit_kms.c index 2adfb03f479b..2be698e78ff2 100644 --- a/drivers/gpu/drm/radeon/evergreen_blit_kms.c +++ b/drivers/gpu/drm/radeon/evergreen_blit_kms.c | |||
@@ -623,7 +623,7 @@ done: | |||
623 | dev_err(rdev->dev, "(%d) pin blit object failed\n", r); | 623 | dev_err(rdev->dev, "(%d) pin blit object failed\n", r); |
624 | return r; | 624 | return r; |
625 | } | 625 | } |
626 | rdev->mc.active_vram_size = rdev->mc.real_vram_size; | 626 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); |
627 | return 0; | 627 | return 0; |
628 | } | 628 | } |
629 | 629 | ||
@@ -631,7 +631,7 @@ void evergreen_blit_fini(struct radeon_device *rdev) | |||
631 | { | 631 | { |
632 | int r; | 632 | int r; |
633 | 633 | ||
634 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | 634 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
635 | if (rdev->r600_blit.shader_obj == NULL) | 635 | if (rdev->r600_blit.shader_obj == NULL) |
636 | return; | 636 | return; |
637 | /* If we can't reserve the bo, unref should be enough to destroy | 637 | /* If we can't reserve the bo, unref should be enough to destroy |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 56deae5bf02e..e372f9e1e5ce 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -70,23 +70,6 @@ MODULE_FIRMWARE(FIRMWARE_R520); | |||
70 | 70 | ||
71 | void r100_pre_page_flip(struct radeon_device *rdev, int crtc) | 71 | void r100_pre_page_flip(struct radeon_device *rdev, int crtc) |
72 | { | 72 | { |
73 | struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc]; | ||
74 | u32 tmp; | ||
75 | |||
76 | /* make sure flip is at vb rather than hb */ | ||
77 | tmp = RREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset); | ||
78 | tmp &= ~RADEON_CRTC_OFFSET_FLIP_CNTL; | ||
79 | /* make sure pending bit is asserted */ | ||
80 | tmp |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN; | ||
81 | WREG32(RADEON_CRTC_OFFSET_CNTL + radeon_crtc->crtc_offset, tmp); | ||
82 | |||
83 | /* set pageflip to happen as late as possible in the vblank interval. | ||
84 | * same field for crtc1/2 | ||
85 | */ | ||
86 | tmp = RREG32(RADEON_CRTC_GEN_CNTL); | ||
87 | tmp &= ~RADEON_CRTC_VSTAT_MODE_MASK; | ||
88 | WREG32(RADEON_CRTC_GEN_CNTL, tmp); | ||
89 | |||
90 | /* enable the pflip int */ | 73 | /* enable the pflip int */ |
91 | radeon_irq_kms_pflip_irq_get(rdev, crtc); | 74 | radeon_irq_kms_pflip_irq_get(rdev, crtc); |
92 | } | 75 | } |
@@ -1041,7 +1024,7 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size) | |||
1041 | return r; | 1024 | return r; |
1042 | } | 1025 | } |
1043 | rdev->cp.ready = true; | 1026 | rdev->cp.ready = true; |
1044 | rdev->mc.active_vram_size = rdev->mc.real_vram_size; | 1027 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); |
1045 | return 0; | 1028 | return 0; |
1046 | } | 1029 | } |
1047 | 1030 | ||
@@ -1059,7 +1042,7 @@ void r100_cp_fini(struct radeon_device *rdev) | |||
1059 | void r100_cp_disable(struct radeon_device *rdev) | 1042 | void r100_cp_disable(struct radeon_device *rdev) |
1060 | { | 1043 | { |
1061 | /* Disable ring */ | 1044 | /* Disable ring */ |
1062 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | 1045 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
1063 | rdev->cp.ready = false; | 1046 | rdev->cp.ready = false; |
1064 | WREG32(RADEON_CP_CSQ_MODE, 0); | 1047 | WREG32(RADEON_CP_CSQ_MODE, 0); |
1065 | WREG32(RADEON_CP_CSQ_CNTL, 0); | 1048 | WREG32(RADEON_CP_CSQ_CNTL, 0); |
@@ -2329,7 +2312,6 @@ void r100_vram_init_sizes(struct radeon_device *rdev) | |||
2329 | /* FIXME we don't use the second aperture yet when we could use it */ | 2312 | /* FIXME we don't use the second aperture yet when we could use it */ |
2330 | if (rdev->mc.visible_vram_size > rdev->mc.aper_size) | 2313 | if (rdev->mc.visible_vram_size > rdev->mc.aper_size) |
2331 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | 2314 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
2332 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | ||
2333 | config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); | 2315 | config_aper_size = RREG32(RADEON_CONFIG_APER_SIZE); |
2334 | if (rdev->flags & RADEON_IS_IGP) { | 2316 | if (rdev->flags & RADEON_IS_IGP) { |
2335 | uint32_t tom; | 2317 | uint32_t tom; |
@@ -3490,7 +3472,7 @@ void r100_cs_track_clear(struct radeon_device *rdev, struct r100_cs_track *track | |||
3490 | track->num_texture = 16; | 3472 | track->num_texture = 16; |
3491 | track->maxy = 4096; | 3473 | track->maxy = 4096; |
3492 | track->separate_cube = 0; | 3474 | track->separate_cube = 0; |
3493 | track->aaresolve = true; | 3475 | track->aaresolve = false; |
3494 | track->aa.robj = NULL; | 3476 | track->aa.robj = NULL; |
3495 | } | 3477 | } |
3496 | 3478 | ||
@@ -3801,8 +3783,6 @@ static int r100_startup(struct radeon_device *rdev) | |||
3801 | r100_mc_program(rdev); | 3783 | r100_mc_program(rdev); |
3802 | /* Resume clock */ | 3784 | /* Resume clock */ |
3803 | r100_clock_startup(rdev); | 3785 | r100_clock_startup(rdev); |
3804 | /* Initialize GPU configuration (# pipes, ...) */ | ||
3805 | // r100_gpu_init(rdev); | ||
3806 | /* Initialize GART (initialize after TTM so we can allocate | 3786 | /* Initialize GART (initialize after TTM so we can allocate |
3807 | * memory through TTM but finalize after TTM) */ | 3787 | * memory through TTM but finalize after TTM) */ |
3808 | r100_enable_bm(rdev); | 3788 | r100_enable_bm(rdev); |
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index de88624d5f87..9b3fad23b76c 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c | |||
@@ -1255,7 +1255,6 @@ int r600_mc_init(struct radeon_device *rdev) | |||
1255 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); | 1255 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); |
1256 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); | 1256 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); |
1257 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | 1257 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
1258 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | ||
1259 | r600_vram_gtt_location(rdev, &rdev->mc); | 1258 | r600_vram_gtt_location(rdev, &rdev->mc); |
1260 | 1259 | ||
1261 | if (rdev->flags & RADEON_IS_IGP) { | 1260 | if (rdev->flags & RADEON_IS_IGP) { |
@@ -1937,7 +1936,7 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v) | |||
1937 | */ | 1936 | */ |
1938 | void r600_cp_stop(struct radeon_device *rdev) | 1937 | void r600_cp_stop(struct radeon_device *rdev) |
1939 | { | 1938 | { |
1940 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | 1939 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
1941 | WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); | 1940 | WREG32(R_0086D8_CP_ME_CNTL, S_0086D8_CP_ME_HALT(1)); |
1942 | WREG32(SCRATCH_UMSK, 0); | 1941 | WREG32(SCRATCH_UMSK, 0); |
1943 | } | 1942 | } |
diff --git a/drivers/gpu/drm/radeon/r600_blit_kms.c b/drivers/gpu/drm/radeon/r600_blit_kms.c index 41f7aafc97c4..df68d91e8190 100644 --- a/drivers/gpu/drm/radeon/r600_blit_kms.c +++ b/drivers/gpu/drm/radeon/r600_blit_kms.c | |||
@@ -558,7 +558,7 @@ done: | |||
558 | dev_err(rdev->dev, "(%d) pin blit object failed\n", r); | 558 | dev_err(rdev->dev, "(%d) pin blit object failed\n", r); |
559 | return r; | 559 | return r; |
560 | } | 560 | } |
561 | rdev->mc.active_vram_size = rdev->mc.real_vram_size; | 561 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.real_vram_size); |
562 | return 0; | 562 | return 0; |
563 | } | 563 | } |
564 | 564 | ||
@@ -566,7 +566,7 @@ void r600_blit_fini(struct radeon_device *rdev) | |||
566 | { | 566 | { |
567 | int r; | 567 | int r; |
568 | 568 | ||
569 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | 569 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
570 | if (rdev->r600_blit.shader_obj == NULL) | 570 | if (rdev->r600_blit.shader_obj == NULL) |
571 | return; | 571 | return; |
572 | /* If we can't reserve the bo, unref should be enough to destroy | 572 | /* If we can't reserve the bo, unref should be enough to destroy |
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 56c48b67ef3d..6b3429495118 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h | |||
@@ -345,7 +345,6 @@ struct radeon_mc { | |||
345 | * about vram size near mc fb location */ | 345 | * about vram size near mc fb location */ |
346 | u64 mc_vram_size; | 346 | u64 mc_vram_size; |
347 | u64 visible_vram_size; | 347 | u64 visible_vram_size; |
348 | u64 active_vram_size; | ||
349 | u64 gtt_size; | 348 | u64 gtt_size; |
350 | u64 gtt_start; | 349 | u64 gtt_start; |
351 | u64 gtt_end; | 350 | u64 gtt_end; |
@@ -1448,6 +1447,7 @@ extern void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *m | |||
1448 | extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); | 1447 | extern void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc); |
1449 | extern int radeon_resume_kms(struct drm_device *dev); | 1448 | extern int radeon_resume_kms(struct drm_device *dev); |
1450 | extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); | 1449 | extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state); |
1450 | extern void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size); | ||
1451 | 1451 | ||
1452 | /* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */ | 1452 | /* r600, rv610, rv630, rv620, rv635, rv670, rs780, rs880 */ |
1453 | extern bool r600_card_posted(struct radeon_device *rdev); | 1453 | extern bool r600_card_posted(struct radeon_device *rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index e75d63b8e21d..793c5e6026ad 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c | |||
@@ -834,6 +834,9 @@ static struct radeon_asic sumo_asic = { | |||
834 | .pm_finish = &evergreen_pm_finish, | 834 | .pm_finish = &evergreen_pm_finish, |
835 | .pm_init_profile = &rs780_pm_init_profile, | 835 | .pm_init_profile = &rs780_pm_init_profile, |
836 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, | 836 | .pm_get_dynpm_state = &r600_pm_get_dynpm_state, |
837 | .pre_page_flip = &evergreen_pre_page_flip, | ||
838 | .page_flip = &evergreen_page_flip, | ||
839 | .post_page_flip = &evergreen_post_page_flip, | ||
837 | }; | 840 | }; |
838 | 841 | ||
839 | static struct radeon_asic btc_asic = { | 842 | static struct radeon_asic btc_asic = { |
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 0e657095de7c..3e7e7f9eb781 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c | |||
@@ -971,7 +971,7 @@ void radeon_compute_pll_legacy(struct radeon_pll *pll, | |||
971 | max_fractional_feed_div = pll->max_frac_feedback_div; | 971 | max_fractional_feed_div = pll->max_frac_feedback_div; |
972 | } | 972 | } |
973 | 973 | ||
974 | for (post_div = min_post_div; post_div <= max_post_div; ++post_div) { | 974 | for (post_div = max_post_div; post_div >= min_post_div; --post_div) { |
975 | uint32_t ref_div; | 975 | uint32_t ref_div; |
976 | 976 | ||
977 | if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) | 977 | if ((pll->flags & RADEON_PLL_NO_ODD_POST_DIV) && (post_div & 1)) |
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c index 66324b5bb5ba..cc44bdfec80f 100644 --- a/drivers/gpu/drm/radeon/radeon_fb.c +++ b/drivers/gpu/drm/radeon/radeon_fb.c | |||
@@ -113,11 +113,14 @@ static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev, | |||
113 | u32 tiling_flags = 0; | 113 | u32 tiling_flags = 0; |
114 | int ret; | 114 | int ret; |
115 | int aligned_size, size; | 115 | int aligned_size, size; |
116 | int height = mode_cmd->height; | ||
116 | 117 | ||
117 | /* need to align pitch with crtc limits */ | 118 | /* need to align pitch with crtc limits */ |
118 | mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8); | 119 | mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8); |
119 | 120 | ||
120 | size = mode_cmd->pitch * mode_cmd->height; | 121 | if (rdev->family >= CHIP_R600) |
122 | height = ALIGN(mode_cmd->height, 8); | ||
123 | size = mode_cmd->pitch * height; | ||
121 | aligned_size = ALIGN(size, PAGE_SIZE); | 124 | aligned_size = ALIGN(size, PAGE_SIZE); |
122 | ret = radeon_gem_object_create(rdev, aligned_size, 0, | 125 | ret = radeon_gem_object_create(rdev, aligned_size, 0, |
123 | RADEON_GEM_DOMAIN_VRAM, | 126 | RADEON_GEM_DOMAIN_VRAM, |
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index df95eb83dac6..1fe95dfe48c9 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c | |||
@@ -156,9 +156,12 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data, | |||
156 | { | 156 | { |
157 | struct radeon_device *rdev = dev->dev_private; | 157 | struct radeon_device *rdev = dev->dev_private; |
158 | struct drm_radeon_gem_info *args = data; | 158 | struct drm_radeon_gem_info *args = data; |
159 | struct ttm_mem_type_manager *man; | ||
160 | |||
161 | man = &rdev->mman.bdev.man[TTM_PL_VRAM]; | ||
159 | 162 | ||
160 | args->vram_size = rdev->mc.real_vram_size; | 163 | args->vram_size = rdev->mc.real_vram_size; |
161 | args->vram_visible = rdev->mc.real_vram_size; | 164 | args->vram_visible = (u64)man->size << PAGE_SHIFT; |
162 | if (rdev->stollen_vga_memory) | 165 | if (rdev->stollen_vga_memory) |
163 | args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory); | 166 | args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory); |
164 | args->vram_visible -= radeon_fbdev_total_size(rdev); | 167 | args->vram_visible -= radeon_fbdev_total_size(rdev); |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c index cf0638c3b7c7..78968b738e88 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c | |||
@@ -443,7 +443,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc, | |||
443 | (target_fb->bits_per_pixel * 8)); | 443 | (target_fb->bits_per_pixel * 8)); |
444 | crtc_pitch |= crtc_pitch << 16; | 444 | crtc_pitch |= crtc_pitch << 16; |
445 | 445 | ||
446 | 446 | crtc_offset_cntl |= RADEON_CRTC_GUI_TRIG_OFFSET_LEFT_EN; | |
447 | if (tiling_flags & RADEON_TILING_MACRO) { | 447 | if (tiling_flags & RADEON_TILING_MACRO) { |
448 | if (ASIC_IS_R300(rdev)) | 448 | if (ASIC_IS_R300(rdev)) |
449 | crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN | | 449 | crtc_offset_cntl |= (R300_CRTC_X_Y_MODE_EN | |
@@ -502,6 +502,7 @@ int radeon_crtc_do_set_base(struct drm_crtc *crtc, | |||
502 | gen_cntl_val = RREG32(gen_cntl_reg); | 502 | gen_cntl_val = RREG32(gen_cntl_reg); |
503 | gen_cntl_val &= ~(0xf << 8); | 503 | gen_cntl_val &= ~(0xf << 8); |
504 | gen_cntl_val |= (format << 8); | 504 | gen_cntl_val |= (format << 8); |
505 | gen_cntl_val &= ~RADEON_CRTC_VSTAT_MODE_MASK; | ||
505 | WREG32(gen_cntl_reg, gen_cntl_val); | 506 | WREG32(gen_cntl_reg, gen_cntl_val); |
506 | 507 | ||
507 | crtc_offset = (u32)base; | 508 | crtc_offset = (u32)base; |
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index e5b2cf10cbf4..8389b4c63d12 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c | |||
@@ -589,6 +589,20 @@ void radeon_ttm_fini(struct radeon_device *rdev) | |||
589 | DRM_INFO("radeon: ttm finalized\n"); | 589 | DRM_INFO("radeon: ttm finalized\n"); |
590 | } | 590 | } |
591 | 591 | ||
592 | /* this should only be called at bootup or when userspace | ||
593 | * isn't running */ | ||
594 | void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size) | ||
595 | { | ||
596 | struct ttm_mem_type_manager *man; | ||
597 | |||
598 | if (!rdev->mman.initialized) | ||
599 | return; | ||
600 | |||
601 | man = &rdev->mman.bdev.man[TTM_PL_VRAM]; | ||
602 | /* this just adjusts TTM size idea, which sets lpfn to the correct value */ | ||
603 | man->size = size >> PAGE_SHIFT; | ||
604 | } | ||
605 | |||
592 | static struct vm_operations_struct radeon_ttm_vm_ops; | 606 | static struct vm_operations_struct radeon_ttm_vm_ops; |
593 | static const struct vm_operations_struct *ttm_vm_ops = NULL; | 607 | static const struct vm_operations_struct *ttm_vm_ops = NULL; |
594 | 608 | ||
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index 5afe294ed51f..8af4679db23e 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c | |||
@@ -751,7 +751,6 @@ void rs600_mc_init(struct radeon_device *rdev) | |||
751 | rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); | 751 | rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE); |
752 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; | 752 | rdev->mc.mc_vram_size = rdev->mc.real_vram_size; |
753 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | 753 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
754 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | ||
755 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); | 754 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); |
756 | base = RREG32_MC(R_000004_MC_FB_LOCATION); | 755 | base = RREG32_MC(R_000004_MC_FB_LOCATION); |
757 | base = G_000004_MC_FB_START(base) << 16; | 756 | base = G_000004_MC_FB_START(base) << 16; |
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 6638c8e4c81b..66c949b7c18c 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c | |||
@@ -157,7 +157,6 @@ void rs690_mc_init(struct radeon_device *rdev) | |||
157 | rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); | 157 | rdev->mc.aper_base = pci_resource_start(rdev->pdev, 0); |
158 | rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); | 158 | rdev->mc.aper_size = pci_resource_len(rdev->pdev, 0); |
159 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | 159 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
160 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | ||
161 | base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); | 160 | base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); |
162 | base = G_000100_MC_FB_START(base) << 16; | 161 | base = G_000100_MC_FB_START(base) << 16; |
163 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); | 162 | rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index d8ba67690656..714ad45757d0 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -307,7 +307,7 @@ static void rv770_mc_program(struct radeon_device *rdev) | |||
307 | */ | 307 | */ |
308 | void r700_cp_stop(struct radeon_device *rdev) | 308 | void r700_cp_stop(struct radeon_device *rdev) |
309 | { | 309 | { |
310 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | 310 | radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); |
311 | WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); | 311 | WREG32(CP_ME_CNTL, (CP_ME_HALT | CP_PFP_HALT)); |
312 | WREG32(SCRATCH_UMSK, 0); | 312 | WREG32(SCRATCH_UMSK, 0); |
313 | } | 313 | } |
@@ -1123,7 +1123,6 @@ int rv770_mc_init(struct radeon_device *rdev) | |||
1123 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); | 1123 | rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); |
1124 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); | 1124 | rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); |
1125 | rdev->mc.visible_vram_size = rdev->mc.aper_size; | 1125 | rdev->mc.visible_vram_size = rdev->mc.aper_size; |
1126 | rdev->mc.active_vram_size = rdev->mc.visible_vram_size; | ||
1127 | r700_vram_gtt_location(rdev, &rdev->mc); | 1126 | r700_vram_gtt_location(rdev, &rdev->mc); |
1128 | radeon_update_bandwidth_info(rdev); | 1127 | radeon_update_bandwidth_info(rdev); |
1129 | 1128 | ||
diff --git a/drivers/hwmon/ad7414.c b/drivers/hwmon/ad7414.c index 86d822aa9bbf..d46c0c758ddf 100644 --- a/drivers/hwmon/ad7414.c +++ b/drivers/hwmon/ad7414.c | |||
@@ -242,6 +242,7 @@ static const struct i2c_device_id ad7414_id[] = { | |||
242 | { "ad7414", 0 }, | 242 | { "ad7414", 0 }, |
243 | {} | 243 | {} |
244 | }; | 244 | }; |
245 | MODULE_DEVICE_TABLE(i2c, ad7414_id); | ||
245 | 246 | ||
246 | static struct i2c_driver ad7414_driver = { | 247 | static struct i2c_driver ad7414_driver = { |
247 | .driver = { | 248 | .driver = { |
diff --git a/drivers/hwmon/adt7411.c b/drivers/hwmon/adt7411.c index f13c843a2964..5cc3e3784b42 100644 --- a/drivers/hwmon/adt7411.c +++ b/drivers/hwmon/adt7411.c | |||
@@ -334,6 +334,7 @@ static const struct i2c_device_id adt7411_id[] = { | |||
334 | { "adt7411", 0 }, | 334 | { "adt7411", 0 }, |
335 | { } | 335 | { } |
336 | }; | 336 | }; |
337 | MODULE_DEVICE_TABLE(i2c, adt7411_id); | ||
337 | 338 | ||
338 | static struct i2c_driver adt7411_driver = { | 339 | static struct i2c_driver adt7411_driver = { |
339 | .driver = { | 340 | .driver = { |
diff --git a/drivers/hwmon/f71882fg.c b/drivers/hwmon/f71882fg.c index 3f49dd376f02..6e06019015a5 100644 --- a/drivers/hwmon/f71882fg.c +++ b/drivers/hwmon/f71882fg.c | |||
@@ -37,7 +37,7 @@ | |||
37 | #define SIO_F71858FG_LD_HWM 0x02 /* Hardware monitor logical device */ | 37 | #define SIO_F71858FG_LD_HWM 0x02 /* Hardware monitor logical device */ |
38 | #define SIO_F71882FG_LD_HWM 0x04 /* Hardware monitor logical device */ | 38 | #define SIO_F71882FG_LD_HWM 0x04 /* Hardware monitor logical device */ |
39 | #define SIO_UNLOCK_KEY 0x87 /* Key to enable Super-I/O */ | 39 | #define SIO_UNLOCK_KEY 0x87 /* Key to enable Super-I/O */ |
40 | #define SIO_LOCK_KEY 0xAA /* Key to diasble Super-I/O */ | 40 | #define SIO_LOCK_KEY 0xAA /* Key to disable Super-I/O */ |
41 | 41 | ||
42 | #define SIO_REG_LDSEL 0x07 /* Logical device select */ | 42 | #define SIO_REG_LDSEL 0x07 /* Logical device select */ |
43 | #define SIO_REG_DEVID 0x20 /* Device ID (2 bytes) */ | 43 | #define SIO_REG_DEVID 0x20 /* Device ID (2 bytes) */ |
@@ -2111,7 +2111,6 @@ static int f71882fg_remove(struct platform_device *pdev) | |||
2111 | int nr_fans = (data->type == f71882fg) ? 4 : 3; | 2111 | int nr_fans = (data->type == f71882fg) ? 4 : 3; |
2112 | u8 start_reg = f71882fg_read8(data, F71882FG_REG_START); | 2112 | u8 start_reg = f71882fg_read8(data, F71882FG_REG_START); |
2113 | 2113 | ||
2114 | platform_set_drvdata(pdev, NULL); | ||
2115 | if (data->hwmon_dev) | 2114 | if (data->hwmon_dev) |
2116 | hwmon_device_unregister(data->hwmon_dev); | 2115 | hwmon_device_unregister(data->hwmon_dev); |
2117 | 2116 | ||
@@ -2178,6 +2177,7 @@ static int f71882fg_remove(struct platform_device *pdev) | |||
2178 | } | 2177 | } |
2179 | } | 2178 | } |
2180 | 2179 | ||
2180 | platform_set_drvdata(pdev, NULL); | ||
2181 | kfree(data); | 2181 | kfree(data); |
2182 | 2182 | ||
2183 | return 0; | 2183 | return 0; |
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c index 2e067dd2ee51..50ea1f43bdc1 100644 --- a/drivers/i2c/busses/i2c-eg20t.c +++ b/drivers/i2c/busses/i2c-eg20t.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/pci.h> | 29 | #include <linux/pci.h> |
30 | #include <linux/mutex.h> | 30 | #include <linux/mutex.h> |
31 | #include <linux/ktime.h> | 31 | #include <linux/ktime.h> |
32 | #include <linux/slab.h> | ||
32 | 33 | ||
33 | #define PCH_EVENT_SET 0 /* I2C Interrupt Event Set Status */ | 34 | #define PCH_EVENT_SET 0 /* I2C Interrupt Event Set Status */ |
34 | #define PCH_EVENT_NONE 1 /* I2C Interrupt Event Clear Status */ | 35 | #define PCH_EVENT_NONE 1 /* I2C Interrupt Event Clear Status */ |
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c index ef3bcb1ce864..61653f079671 100644 --- a/drivers/i2c/busses/i2c-ocores.c +++ b/drivers/i2c/busses/i2c-ocores.c | |||
@@ -249,7 +249,7 @@ static struct i2c_adapter ocores_adapter = { | |||
249 | static int ocores_i2c_of_probe(struct platform_device* pdev, | 249 | static int ocores_i2c_of_probe(struct platform_device* pdev, |
250 | struct ocores_i2c* i2c) | 250 | struct ocores_i2c* i2c) |
251 | { | 251 | { |
252 | __be32* val; | 252 | const __be32* val; |
253 | 253 | ||
254 | val = of_get_property(pdev->dev.of_node, "regstep", NULL); | 254 | val = of_get_property(pdev->dev.of_node, "regstep", NULL); |
255 | if (!val) { | 255 | if (!val) { |
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index b605ff3a1fa0..58a58c7eaa17 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c | |||
@@ -378,9 +378,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev) | |||
378 | * REVISIT: Some wkup sources might not be needed. | 378 | * REVISIT: Some wkup sources might not be needed. |
379 | */ | 379 | */ |
380 | dev->westate = OMAP_I2C_WE_ALL; | 380 | dev->westate = OMAP_I2C_WE_ALL; |
381 | if (dev->rev < OMAP_I2C_REV_ON_4430) | 381 | omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, dev->westate); |
382 | omap_i2c_write_reg(dev, OMAP_I2C_WE_REG, | ||
383 | dev->westate); | ||
384 | } | 382 | } |
385 | } | 383 | } |
386 | omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0); | 384 | omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0); |
@@ -847,11 +845,15 @@ complete: | |||
847 | dev_err(dev->dev, "Arbitration lost\n"); | 845 | dev_err(dev->dev, "Arbitration lost\n"); |
848 | err |= OMAP_I2C_STAT_AL; | 846 | err |= OMAP_I2C_STAT_AL; |
849 | } | 847 | } |
848 | /* | ||
849 | * ProDB0017052: Clear ARDY bit twice | ||
850 | */ | ||
850 | if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK | | 851 | if (stat & (OMAP_I2C_STAT_ARDY | OMAP_I2C_STAT_NACK | |
851 | OMAP_I2C_STAT_AL)) { | 852 | OMAP_I2C_STAT_AL)) { |
852 | omap_i2c_ack_stat(dev, stat & | 853 | omap_i2c_ack_stat(dev, stat & |
853 | (OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR | | 854 | (OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR | |
854 | OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR)); | 855 | OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR | |
856 | OMAP_I2C_STAT_ARDY)); | ||
855 | omap_i2c_complete_cmd(dev, err); | 857 | omap_i2c_complete_cmd(dev, err); |
856 | return IRQ_HANDLED; | 858 | return IRQ_HANDLED; |
857 | } | 859 | } |
@@ -1137,12 +1139,41 @@ omap_i2c_remove(struct platform_device *pdev) | |||
1137 | return 0; | 1139 | return 0; |
1138 | } | 1140 | } |
1139 | 1141 | ||
1142 | #ifdef CONFIG_SUSPEND | ||
1143 | static int omap_i2c_suspend(struct device *dev) | ||
1144 | { | ||
1145 | if (!pm_runtime_suspended(dev)) | ||
1146 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_suspend) | ||
1147 | dev->bus->pm->runtime_suspend(dev); | ||
1148 | |||
1149 | return 0; | ||
1150 | } | ||
1151 | |||
1152 | static int omap_i2c_resume(struct device *dev) | ||
1153 | { | ||
1154 | if (!pm_runtime_suspended(dev)) | ||
1155 | if (dev->bus && dev->bus->pm && dev->bus->pm->runtime_resume) | ||
1156 | dev->bus->pm->runtime_resume(dev); | ||
1157 | |||
1158 | return 0; | ||
1159 | } | ||
1160 | |||
1161 | static struct dev_pm_ops omap_i2c_pm_ops = { | ||
1162 | .suspend = omap_i2c_suspend, | ||
1163 | .resume = omap_i2c_resume, | ||
1164 | }; | ||
1165 | #define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops) | ||
1166 | #else | ||
1167 | #define OMAP_I2C_PM_OPS NULL | ||
1168 | #endif | ||
1169 | |||
1140 | static struct platform_driver omap_i2c_driver = { | 1170 | static struct platform_driver omap_i2c_driver = { |
1141 | .probe = omap_i2c_probe, | 1171 | .probe = omap_i2c_probe, |
1142 | .remove = omap_i2c_remove, | 1172 | .remove = omap_i2c_remove, |
1143 | .driver = { | 1173 | .driver = { |
1144 | .name = "omap_i2c", | 1174 | .name = "omap_i2c", |
1145 | .owner = THIS_MODULE, | 1175 | .owner = THIS_MODULE, |
1176 | .pm = OMAP_I2C_PM_OPS, | ||
1146 | }, | 1177 | }, |
1147 | }; | 1178 | }; |
1148 | 1179 | ||
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c index 495be451d326..266135ddf7fa 100644 --- a/drivers/i2c/busses/i2c-stu300.c +++ b/drivers/i2c/busses/i2c-stu300.c | |||
@@ -942,7 +942,7 @@ stu300_probe(struct platform_device *pdev) | |||
942 | adap->owner = THIS_MODULE; | 942 | adap->owner = THIS_MODULE; |
943 | /* DDC class but actually often used for more generic I2C */ | 943 | /* DDC class but actually often used for more generic I2C */ |
944 | adap->class = I2C_CLASS_DDC; | 944 | adap->class = I2C_CLASS_DDC; |
945 | strncpy(adap->name, "ST Microelectronics DDC I2C adapter", | 945 | strlcpy(adap->name, "ST Microelectronics DDC I2C adapter", |
946 | sizeof(adap->name)); | 946 | sizeof(adap->name)); |
947 | adap->nr = bus_nr; | 947 | adap->nr = bus_nr; |
948 | adap->algo = &stu300_algo; | 948 | adap->algo = &stu300_algo; |
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 1fa091e05690..4a5c4a44ffb1 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c | |||
@@ -62,6 +62,7 @@ | |||
62 | #include <linux/notifier.h> | 62 | #include <linux/notifier.h> |
63 | #include <linux/cpu.h> | 63 | #include <linux/cpu.h> |
64 | #include <asm/mwait.h> | 64 | #include <asm/mwait.h> |
65 | #include <asm/msr.h> | ||
65 | 66 | ||
66 | #define INTEL_IDLE_VERSION "0.4" | 67 | #define INTEL_IDLE_VERSION "0.4" |
67 | #define PREFIX "intel_idle: " | 68 | #define PREFIX "intel_idle: " |
@@ -85,6 +86,12 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state); | |||
85 | static struct cpuidle_state *cpuidle_state_table; | 86 | static struct cpuidle_state *cpuidle_state_table; |
86 | 87 | ||
87 | /* | 88 | /* |
89 | * Hardware C-state auto-demotion may not always be optimal. | ||
90 | * Indicate which enable bits to clear here. | ||
91 | */ | ||
92 | static unsigned long long auto_demotion_disable_flags; | ||
93 | |||
94 | /* | ||
88 | * Set this flag for states where the HW flushes the TLB for us | 95 | * Set this flag for states where the HW flushes the TLB for us |
89 | * and so we don't need cross-calls to keep it consistent. | 96 | * and so we don't need cross-calls to keep it consistent. |
90 | * If this flag is set, SW flushes the TLB, so even if the | 97 | * If this flag is set, SW flushes the TLB, so even if the |
@@ -281,6 +288,15 @@ static struct notifier_block setup_broadcast_notifier = { | |||
281 | .notifier_call = setup_broadcast_cpuhp_notify, | 288 | .notifier_call = setup_broadcast_cpuhp_notify, |
282 | }; | 289 | }; |
283 | 290 | ||
291 | static void auto_demotion_disable(void *dummy) | ||
292 | { | ||
293 | unsigned long long msr_bits; | ||
294 | |||
295 | rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); | ||
296 | msr_bits &= ~auto_demotion_disable_flags; | ||
297 | wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); | ||
298 | } | ||
299 | |||
284 | /* | 300 | /* |
285 | * intel_idle_probe() | 301 | * intel_idle_probe() |
286 | */ | 302 | */ |
@@ -324,11 +340,17 @@ static int intel_idle_probe(void) | |||
324 | case 0x25: /* Westmere */ | 340 | case 0x25: /* Westmere */ |
325 | case 0x2C: /* Westmere */ | 341 | case 0x2C: /* Westmere */ |
326 | cpuidle_state_table = nehalem_cstates; | 342 | cpuidle_state_table = nehalem_cstates; |
343 | auto_demotion_disable_flags = | ||
344 | (NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE); | ||
327 | break; | 345 | break; |
328 | 346 | ||
329 | case 0x1C: /* 28 - Atom Processor */ | 347 | case 0x1C: /* 28 - Atom Processor */ |
348 | cpuidle_state_table = atom_cstates; | ||
349 | break; | ||
350 | |||
330 | case 0x26: /* 38 - Lincroft Atom Processor */ | 351 | case 0x26: /* 38 - Lincroft Atom Processor */ |
331 | cpuidle_state_table = atom_cstates; | 352 | cpuidle_state_table = atom_cstates; |
353 | auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE; | ||
332 | break; | 354 | break; |
333 | 355 | ||
334 | case 0x2A: /* SNB */ | 356 | case 0x2A: /* SNB */ |
@@ -436,6 +458,8 @@ static int intel_idle_cpuidle_devices_init(void) | |||
436 | return -EIO; | 458 | return -EIO; |
437 | } | 459 | } |
438 | } | 460 | } |
461 | if (auto_demotion_disable_flags) | ||
462 | smp_call_function(auto_demotion_disable, NULL, 1); | ||
439 | 463 | ||
440 | return 0; | 464 | return 0; |
441 | } | 465 | } |
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c index 23cf8fc933ec..5b8f59d6c3e8 100644 --- a/drivers/input/gameport/gameport.c +++ b/drivers/input/gameport/gameport.c | |||
@@ -360,7 +360,7 @@ static int gameport_queue_event(void *object, struct module *owner, | |||
360 | event->owner = owner; | 360 | event->owner = owner; |
361 | 361 | ||
362 | list_add_tail(&event->node, &gameport_event_list); | 362 | list_add_tail(&event->node, &gameport_event_list); |
363 | schedule_work(&gameport_event_work); | 363 | queue_work(system_long_wq, &gameport_event_work); |
364 | 364 | ||
365 | out: | 365 | out: |
366 | spin_unlock_irqrestore(&gameport_event_lock, flags); | 366 | spin_unlock_irqrestore(&gameport_event_lock, flags); |
diff --git a/drivers/input/keyboard/tegra-kbc.c b/drivers/input/keyboard/tegra-kbc.c index ac471b77c18e..99ce9032d08c 100644 --- a/drivers/input/keyboard/tegra-kbc.c +++ b/drivers/input/keyboard/tegra-kbc.c | |||
@@ -71,8 +71,9 @@ struct tegra_kbc { | |||
71 | spinlock_t lock; | 71 | spinlock_t lock; |
72 | unsigned int repoll_dly; | 72 | unsigned int repoll_dly; |
73 | unsigned long cp_dly_jiffies; | 73 | unsigned long cp_dly_jiffies; |
74 | bool use_fn_map; | ||
74 | const struct tegra_kbc_platform_data *pdata; | 75 | const struct tegra_kbc_platform_data *pdata; |
75 | unsigned short keycode[KBC_MAX_KEY]; | 76 | unsigned short keycode[KBC_MAX_KEY * 2]; |
76 | unsigned short current_keys[KBC_MAX_KPENT]; | 77 | unsigned short current_keys[KBC_MAX_KPENT]; |
77 | unsigned int num_pressed_keys; | 78 | unsigned int num_pressed_keys; |
78 | struct timer_list timer; | 79 | struct timer_list timer; |
@@ -178,6 +179,40 @@ static const u32 tegra_kbc_default_keymap[] = { | |||
178 | KEY(15, 5, KEY_F2), | 179 | KEY(15, 5, KEY_F2), |
179 | KEY(15, 6, KEY_CAPSLOCK), | 180 | KEY(15, 6, KEY_CAPSLOCK), |
180 | KEY(15, 7, KEY_F6), | 181 | KEY(15, 7, KEY_F6), |
182 | |||
183 | /* Software Handled Function Keys */ | ||
184 | KEY(20, 0, KEY_KP7), | ||
185 | |||
186 | KEY(21, 0, KEY_KP9), | ||
187 | KEY(21, 1, KEY_KP8), | ||
188 | KEY(21, 2, KEY_KP4), | ||
189 | KEY(21, 4, KEY_KP1), | ||
190 | |||
191 | KEY(22, 1, KEY_KPSLASH), | ||
192 | KEY(22, 2, KEY_KP6), | ||
193 | KEY(22, 3, KEY_KP5), | ||
194 | KEY(22, 4, KEY_KP3), | ||
195 | KEY(22, 5, KEY_KP2), | ||
196 | KEY(22, 7, KEY_KP0), | ||
197 | |||
198 | KEY(27, 1, KEY_KPASTERISK), | ||
199 | KEY(27, 3, KEY_KPMINUS), | ||
200 | KEY(27, 4, KEY_KPPLUS), | ||
201 | KEY(27, 5, KEY_KPDOT), | ||
202 | |||
203 | KEY(28, 5, KEY_VOLUMEUP), | ||
204 | |||
205 | KEY(29, 3, KEY_HOME), | ||
206 | KEY(29, 4, KEY_END), | ||
207 | KEY(29, 5, KEY_BRIGHTNESSDOWN), | ||
208 | KEY(29, 6, KEY_VOLUMEDOWN), | ||
209 | KEY(29, 7, KEY_BRIGHTNESSUP), | ||
210 | |||
211 | KEY(30, 0, KEY_NUMLOCK), | ||
212 | KEY(30, 1, KEY_SCROLLLOCK), | ||
213 | KEY(30, 2, KEY_MUTE), | ||
214 | |||
215 | KEY(31, 4, KEY_HELP), | ||
181 | }; | 216 | }; |
182 | 217 | ||
183 | static const struct matrix_keymap_data tegra_kbc_default_keymap_data = { | 218 | static const struct matrix_keymap_data tegra_kbc_default_keymap_data = { |
@@ -224,6 +259,7 @@ static void tegra_kbc_report_keys(struct tegra_kbc *kbc) | |||
224 | unsigned int i; | 259 | unsigned int i; |
225 | unsigned int num_down = 0; | 260 | unsigned int num_down = 0; |
226 | unsigned long flags; | 261 | unsigned long flags; |
262 | bool fn_keypress = false; | ||
227 | 263 | ||
228 | spin_lock_irqsave(&kbc->lock, flags); | 264 | spin_lock_irqsave(&kbc->lock, flags); |
229 | for (i = 0; i < KBC_MAX_KPENT; i++) { | 265 | for (i = 0; i < KBC_MAX_KPENT; i++) { |
@@ -237,11 +273,28 @@ static void tegra_kbc_report_keys(struct tegra_kbc *kbc) | |||
237 | MATRIX_SCAN_CODE(row, col, KBC_ROW_SHIFT); | 273 | MATRIX_SCAN_CODE(row, col, KBC_ROW_SHIFT); |
238 | 274 | ||
239 | scancodes[num_down] = scancode; | 275 | scancodes[num_down] = scancode; |
240 | keycodes[num_down++] = kbc->keycode[scancode]; | 276 | keycodes[num_down] = kbc->keycode[scancode]; |
277 | /* If driver uses Fn map, do not report the Fn key. */ | ||
278 | if ((keycodes[num_down] == KEY_FN) && kbc->use_fn_map) | ||
279 | fn_keypress = true; | ||
280 | else | ||
281 | num_down++; | ||
241 | } | 282 | } |
242 | 283 | ||
243 | val >>= 8; | 284 | val >>= 8; |
244 | } | 285 | } |
286 | |||
287 | /* | ||
288 | * If the platform uses Fn keymaps, translate keys on a Fn keypress. | ||
289 | * Function keycodes are KBC_MAX_KEY apart from the plain keycodes. | ||
290 | */ | ||
291 | if (fn_keypress) { | ||
292 | for (i = 0; i < num_down; i++) { | ||
293 | scancodes[i] += KBC_MAX_KEY; | ||
294 | keycodes[i] = kbc->keycode[scancodes[i]]; | ||
295 | } | ||
296 | } | ||
297 | |||
245 | spin_unlock_irqrestore(&kbc->lock, flags); | 298 | spin_unlock_irqrestore(&kbc->lock, flags); |
246 | 299 | ||
247 | tegra_kbc_report_released_keys(kbc->idev, | 300 | tegra_kbc_report_released_keys(kbc->idev, |
@@ -594,8 +647,11 @@ static int __devinit tegra_kbc_probe(struct platform_device *pdev) | |||
594 | 647 | ||
595 | input_dev->keycode = kbc->keycode; | 648 | input_dev->keycode = kbc->keycode; |
596 | input_dev->keycodesize = sizeof(kbc->keycode[0]); | 649 | input_dev->keycodesize = sizeof(kbc->keycode[0]); |
597 | input_dev->keycodemax = ARRAY_SIZE(kbc->keycode); | 650 | input_dev->keycodemax = KBC_MAX_KEY; |
651 | if (pdata->use_fn_map) | ||
652 | input_dev->keycodemax *= 2; | ||
598 | 653 | ||
654 | kbc->use_fn_map = pdata->use_fn_map; | ||
599 | keymap_data = pdata->keymap_data ?: &tegra_kbc_default_keymap_data; | 655 | keymap_data = pdata->keymap_data ?: &tegra_kbc_default_keymap_data; |
600 | matrix_keypad_build_keymap(keymap_data, KBC_ROW_SHIFT, | 656 | matrix_keypad_build_keymap(keymap_data, KBC_ROW_SHIFT, |
601 | input_dev->keycode, input_dev->keybit); | 657 | input_dev->keycode, input_dev->keybit); |
diff --git a/drivers/input/mouse/synaptics.h b/drivers/input/mouse/synaptics.h index 25e5d042a72c..7453938bf5ef 100644 --- a/drivers/input/mouse/synaptics.h +++ b/drivers/input/mouse/synaptics.h | |||
@@ -51,6 +51,29 @@ | |||
51 | #define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20) | 51 | #define SYN_EXT_CAP_REQUESTS(c) (((c) & 0x700000) >> 20) |
52 | #define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12) | 52 | #define SYN_CAP_MULTI_BUTTON_NO(ec) (((ec) & 0x00f000) >> 12) |
53 | #define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16) | 53 | #define SYN_CAP_PRODUCT_ID(ec) (((ec) & 0xff0000) >> 16) |
54 | |||
55 | /* | ||
56 | * The following describes response for the 0x0c query. | ||
57 | * | ||
58 | * byte mask name meaning | ||
59 | * ---- ---- ------- ------------ | ||
60 | * 1 0x01 adjustable threshold capacitive button sensitivity | ||
61 | * can be adjusted | ||
62 | * 1 0x02 report max query 0x0d gives max coord reported | ||
63 | * 1 0x04 clearpad sensor is ClearPad product | ||
64 | * 1 0x08 advanced gesture not particularly meaningful | ||
65 | * 1 0x10 clickpad bit 0 1-button ClickPad | ||
66 | * 1 0x60 multifinger mode identifies firmware finger counting | ||
67 | * (not reporting!) algorithm. | ||
68 | * Not particularly meaningful | ||
69 | * 1 0x80 covered pad W clipped to 14, 15 == pad mostly covered | ||
70 | * 2 0x01 clickpad bit 1 2-button ClickPad | ||
71 | * 2 0x02 deluxe LED controls touchpad support LED commands | ||
72 | * ala multimedia control bar | ||
73 | * 2 0x04 reduced filtering firmware does less filtering on | ||
74 | * position data, driver should watch | ||
75 | * for noise. | ||
76 | */ | ||
54 | #define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */ | 77 | #define SYN_CAP_CLICKPAD(ex0c) ((ex0c) & 0x100000) /* 1-button ClickPad */ |
55 | #define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */ | 78 | #define SYN_CAP_CLICKPAD2BTN(ex0c) ((ex0c) & 0x000100) /* 2-button ClickPad */ |
56 | #define SYN_CAP_MAX_DIMENSIONS(ex0c) ((ex0c) & 0x020000) | 79 | #define SYN_CAP_MAX_DIMENSIONS(ex0c) ((ex0c) & 0x020000) |
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c index 7c38d1fbabf2..ba70058e2be3 100644 --- a/drivers/input/serio/serio.c +++ b/drivers/input/serio/serio.c | |||
@@ -299,7 +299,7 @@ static int serio_queue_event(void *object, struct module *owner, | |||
299 | event->owner = owner; | 299 | event->owner = owner; |
300 | 300 | ||
301 | list_add_tail(&event->node, &serio_event_list); | 301 | list_add_tail(&event->node, &serio_event_list); |
302 | schedule_work(&serio_event_work); | 302 | queue_work(system_long_wq, &serio_event_work); |
303 | 303 | ||
304 | out: | 304 | out: |
305 | spin_unlock_irqrestore(&serio_event_lock, flags); | 305 | spin_unlock_irqrestore(&serio_event_lock, flags); |
diff --git a/drivers/isdn/hardware/eicon/istream.c b/drivers/isdn/hardware/eicon/istream.c index 18f8798442fa..7bd5baa547be 100644 --- a/drivers/isdn/hardware/eicon/istream.c +++ b/drivers/isdn/hardware/eicon/istream.c | |||
@@ -62,7 +62,7 @@ void diva_xdi_provide_istream_info (ADAPTER* a, | |||
62 | stream interface. | 62 | stream interface. |
63 | If synchronous service was requested, then function | 63 | If synchronous service was requested, then function |
64 | does return amount of data written to stream. | 64 | does return amount of data written to stream. |
65 | 'final' does indicate that pice of data to be written is | 65 | 'final' does indicate that piece of data to be written is |
66 | final part of frame (necessary only by structured datatransfer) | 66 | final part of frame (necessary only by structured datatransfer) |
67 | return 0 if zero lengh packet was written | 67 | return 0 if zero lengh packet was written |
68 | return -1 if stream is full | 68 | return -1 if stream is full |
diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 8a2f767f26d8..0ed7f6bc2a7f 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c | |||
@@ -216,7 +216,6 @@ static int linear_run (mddev_t *mddev) | |||
216 | 216 | ||
217 | if (md_check_no_bitmap(mddev)) | 217 | if (md_check_no_bitmap(mddev)) |
218 | return -EINVAL; | 218 | return -EINVAL; |
219 | mddev->queue->queue_lock = &mddev->queue->__queue_lock; | ||
220 | conf = linear_conf(mddev, mddev->raid_disks); | 219 | conf = linear_conf(mddev, mddev->raid_disks); |
221 | 220 | ||
222 | if (!conf) | 221 | if (!conf) |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 0cc30ecda4c1..818313e277e7 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -553,6 +553,9 @@ static mddev_t * mddev_find(dev_t unit) | |||
553 | { | 553 | { |
554 | mddev_t *mddev, *new = NULL; | 554 | mddev_t *mddev, *new = NULL; |
555 | 555 | ||
556 | if (unit && MAJOR(unit) != MD_MAJOR) | ||
557 | unit &= ~((1<<MdpMinorShift)-1); | ||
558 | |||
556 | retry: | 559 | retry: |
557 | spin_lock(&all_mddevs_lock); | 560 | spin_lock(&all_mddevs_lock); |
558 | 561 | ||
@@ -4138,10 +4141,10 @@ array_size_store(mddev_t *mddev, const char *buf, size_t len) | |||
4138 | } | 4141 | } |
4139 | 4142 | ||
4140 | mddev->array_sectors = sectors; | 4143 | mddev->array_sectors = sectors; |
4141 | set_capacity(mddev->gendisk, mddev->array_sectors); | 4144 | if (mddev->pers) { |
4142 | if (mddev->pers) | 4145 | set_capacity(mddev->gendisk, mddev->array_sectors); |
4143 | revalidate_disk(mddev->gendisk); | 4146 | revalidate_disk(mddev->gendisk); |
4144 | 4147 | } | |
4145 | return len; | 4148 | return len; |
4146 | } | 4149 | } |
4147 | 4150 | ||
@@ -4624,6 +4627,7 @@ static int do_md_run(mddev_t *mddev) | |||
4624 | } | 4627 | } |
4625 | set_capacity(mddev->gendisk, mddev->array_sectors); | 4628 | set_capacity(mddev->gendisk, mddev->array_sectors); |
4626 | revalidate_disk(mddev->gendisk); | 4629 | revalidate_disk(mddev->gendisk); |
4630 | mddev->changed = 1; | ||
4627 | kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); | 4631 | kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); |
4628 | out: | 4632 | out: |
4629 | return err; | 4633 | return err; |
@@ -4712,6 +4716,7 @@ static void md_clean(mddev_t *mddev) | |||
4712 | mddev->sync_speed_min = mddev->sync_speed_max = 0; | 4716 | mddev->sync_speed_min = mddev->sync_speed_max = 0; |
4713 | mddev->recovery = 0; | 4717 | mddev->recovery = 0; |
4714 | mddev->in_sync = 0; | 4718 | mddev->in_sync = 0; |
4719 | mddev->changed = 0; | ||
4715 | mddev->degraded = 0; | 4720 | mddev->degraded = 0; |
4716 | mddev->safemode = 0; | 4721 | mddev->safemode = 0; |
4717 | mddev->bitmap_info.offset = 0; | 4722 | mddev->bitmap_info.offset = 0; |
@@ -4827,6 +4832,7 @@ static int do_md_stop(mddev_t * mddev, int mode, int is_open) | |||
4827 | 4832 | ||
4828 | set_capacity(disk, 0); | 4833 | set_capacity(disk, 0); |
4829 | mutex_unlock(&mddev->open_mutex); | 4834 | mutex_unlock(&mddev->open_mutex); |
4835 | mddev->changed = 1; | ||
4830 | revalidate_disk(disk); | 4836 | revalidate_disk(disk); |
4831 | 4837 | ||
4832 | if (mddev->ro) | 4838 | if (mddev->ro) |
@@ -6011,7 +6017,7 @@ static int md_open(struct block_device *bdev, fmode_t mode) | |||
6011 | atomic_inc(&mddev->openers); | 6017 | atomic_inc(&mddev->openers); |
6012 | mutex_unlock(&mddev->open_mutex); | 6018 | mutex_unlock(&mddev->open_mutex); |
6013 | 6019 | ||
6014 | check_disk_size_change(mddev->gendisk, bdev); | 6020 | check_disk_change(bdev); |
6015 | out: | 6021 | out: |
6016 | return err; | 6022 | return err; |
6017 | } | 6023 | } |
@@ -6026,6 +6032,21 @@ static int md_release(struct gendisk *disk, fmode_t mode) | |||
6026 | 6032 | ||
6027 | return 0; | 6033 | return 0; |
6028 | } | 6034 | } |
6035 | |||
6036 | static int md_media_changed(struct gendisk *disk) | ||
6037 | { | ||
6038 | mddev_t *mddev = disk->private_data; | ||
6039 | |||
6040 | return mddev->changed; | ||
6041 | } | ||
6042 | |||
6043 | static int md_revalidate(struct gendisk *disk) | ||
6044 | { | ||
6045 | mddev_t *mddev = disk->private_data; | ||
6046 | |||
6047 | mddev->changed = 0; | ||
6048 | return 0; | ||
6049 | } | ||
6029 | static const struct block_device_operations md_fops = | 6050 | static const struct block_device_operations md_fops = |
6030 | { | 6051 | { |
6031 | .owner = THIS_MODULE, | 6052 | .owner = THIS_MODULE, |
@@ -6036,6 +6057,8 @@ static const struct block_device_operations md_fops = | |||
6036 | .compat_ioctl = md_compat_ioctl, | 6057 | .compat_ioctl = md_compat_ioctl, |
6037 | #endif | 6058 | #endif |
6038 | .getgeo = md_getgeo, | 6059 | .getgeo = md_getgeo, |
6060 | .media_changed = md_media_changed, | ||
6061 | .revalidate_disk= md_revalidate, | ||
6039 | }; | 6062 | }; |
6040 | 6063 | ||
6041 | static int md_thread(void * arg) | 6064 | static int md_thread(void * arg) |
diff --git a/drivers/md/md.h b/drivers/md/md.h index 7e90b8593b2a..12215d437fcc 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
@@ -274,6 +274,8 @@ struct mddev_s | |||
274 | atomic_t active; /* general refcount */ | 274 | atomic_t active; /* general refcount */ |
275 | atomic_t openers; /* number of active opens */ | 275 | atomic_t openers; /* number of active opens */ |
276 | 276 | ||
277 | int changed; /* True if we might need to | ||
278 | * reread partition info */ | ||
277 | int degraded; /* whether md should consider | 279 | int degraded; /* whether md should consider |
278 | * adding a spare | 280 | * adding a spare |
279 | */ | 281 | */ |
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 6d7ddf32ef2e..3a62d440e27b 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
@@ -435,7 +435,6 @@ static int multipath_run (mddev_t *mddev) | |||
435 | * bookkeeping area. [whatever we allocate in multipath_run(), | 435 | * bookkeeping area. [whatever we allocate in multipath_run(), |
436 | * should be freed in multipath_stop()] | 436 | * should be freed in multipath_stop()] |
437 | */ | 437 | */ |
438 | mddev->queue->queue_lock = &mddev->queue->__queue_lock; | ||
439 | 438 | ||
440 | conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL); | 439 | conf = kzalloc(sizeof(multipath_conf_t), GFP_KERNEL); |
441 | mddev->private = conf; | 440 | mddev->private = conf; |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 637a96855edb..c0ac457f1218 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -361,7 +361,6 @@ static int raid0_run(mddev_t *mddev) | |||
361 | if (md_check_no_bitmap(mddev)) | 361 | if (md_check_no_bitmap(mddev)) |
362 | return -EINVAL; | 362 | return -EINVAL; |
363 | blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); | 363 | blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); |
364 | mddev->queue->queue_lock = &mddev->queue->__queue_lock; | ||
365 | 364 | ||
366 | /* if private is not null, we are here after takeover */ | 365 | /* if private is not null, we are here after takeover */ |
367 | if (mddev->private == NULL) { | 366 | if (mddev->private == NULL) { |
@@ -670,6 +669,7 @@ static void *raid0_takeover_raid1(mddev_t *mddev) | |||
670 | mddev->new_layout = 0; | 669 | mddev->new_layout = 0; |
671 | mddev->new_chunk_sectors = 128; /* by default set chunk size to 64k */ | 670 | mddev->new_chunk_sectors = 128; /* by default set chunk size to 64k */ |
672 | mddev->delta_disks = 1 - mddev->raid_disks; | 671 | mddev->delta_disks = 1 - mddev->raid_disks; |
672 | mddev->raid_disks = 1; | ||
673 | /* make sure it will be not marked as dirty */ | 673 | /* make sure it will be not marked as dirty */ |
674 | mddev->recovery_cp = MaxSector; | 674 | mddev->recovery_cp = MaxSector; |
675 | 675 | ||
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index a23ffa397ba9..06cd712807d0 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -593,7 +593,10 @@ static int flush_pending_writes(conf_t *conf) | |||
593 | if (conf->pending_bio_list.head) { | 593 | if (conf->pending_bio_list.head) { |
594 | struct bio *bio; | 594 | struct bio *bio; |
595 | bio = bio_list_get(&conf->pending_bio_list); | 595 | bio = bio_list_get(&conf->pending_bio_list); |
596 | /* Only take the spinlock to quiet a warning */ | ||
597 | spin_lock(conf->mddev->queue->queue_lock); | ||
596 | blk_remove_plug(conf->mddev->queue); | 598 | blk_remove_plug(conf->mddev->queue); |
599 | spin_unlock(conf->mddev->queue->queue_lock); | ||
597 | spin_unlock_irq(&conf->device_lock); | 600 | spin_unlock_irq(&conf->device_lock); |
598 | /* flush any pending bitmap writes to | 601 | /* flush any pending bitmap writes to |
599 | * disk before proceeding w/ I/O */ | 602 | * disk before proceeding w/ I/O */ |
@@ -959,7 +962,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
959 | atomic_inc(&r1_bio->remaining); | 962 | atomic_inc(&r1_bio->remaining); |
960 | spin_lock_irqsave(&conf->device_lock, flags); | 963 | spin_lock_irqsave(&conf->device_lock, flags); |
961 | bio_list_add(&conf->pending_bio_list, mbio); | 964 | bio_list_add(&conf->pending_bio_list, mbio); |
962 | blk_plug_device(mddev->queue); | 965 | blk_plug_device_unlocked(mddev->queue); |
963 | spin_unlock_irqrestore(&conf->device_lock, flags); | 966 | spin_unlock_irqrestore(&conf->device_lock, flags); |
964 | } | 967 | } |
965 | r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL); | 968 | r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL); |
@@ -2021,7 +2024,6 @@ static int run(mddev_t *mddev) | |||
2021 | if (IS_ERR(conf)) | 2024 | if (IS_ERR(conf)) |
2022 | return PTR_ERR(conf); | 2025 | return PTR_ERR(conf); |
2023 | 2026 | ||
2024 | mddev->queue->queue_lock = &conf->device_lock; | ||
2025 | list_for_each_entry(rdev, &mddev->disks, same_set) { | 2027 | list_for_each_entry(rdev, &mddev->disks, same_set) { |
2026 | disk_stack_limits(mddev->gendisk, rdev->bdev, | 2028 | disk_stack_limits(mddev->gendisk, rdev->bdev, |
2027 | rdev->data_offset << 9); | 2029 | rdev->data_offset << 9); |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 3b607b28741b..747d061d8e05 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -662,7 +662,10 @@ static int flush_pending_writes(conf_t *conf) | |||
662 | if (conf->pending_bio_list.head) { | 662 | if (conf->pending_bio_list.head) { |
663 | struct bio *bio; | 663 | struct bio *bio; |
664 | bio = bio_list_get(&conf->pending_bio_list); | 664 | bio = bio_list_get(&conf->pending_bio_list); |
665 | /* Spinlock only taken to quiet a warning */ | ||
666 | spin_lock(conf->mddev->queue->queue_lock); | ||
665 | blk_remove_plug(conf->mddev->queue); | 667 | blk_remove_plug(conf->mddev->queue); |
668 | spin_unlock(conf->mddev->queue->queue_lock); | ||
666 | spin_unlock_irq(&conf->device_lock); | 669 | spin_unlock_irq(&conf->device_lock); |
667 | /* flush any pending bitmap writes to disk | 670 | /* flush any pending bitmap writes to disk |
668 | * before proceeding w/ I/O */ | 671 | * before proceeding w/ I/O */ |
@@ -971,7 +974,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
971 | atomic_inc(&r10_bio->remaining); | 974 | atomic_inc(&r10_bio->remaining); |
972 | spin_lock_irqsave(&conf->device_lock, flags); | 975 | spin_lock_irqsave(&conf->device_lock, flags); |
973 | bio_list_add(&conf->pending_bio_list, mbio); | 976 | bio_list_add(&conf->pending_bio_list, mbio); |
974 | blk_plug_device(mddev->queue); | 977 | blk_plug_device_unlocked(mddev->queue); |
975 | spin_unlock_irqrestore(&conf->device_lock, flags); | 978 | spin_unlock_irqrestore(&conf->device_lock, flags); |
976 | } | 979 | } |
977 | 980 | ||
@@ -2304,8 +2307,6 @@ static int run(mddev_t *mddev) | |||
2304 | if (!conf) | 2307 | if (!conf) |
2305 | goto out; | 2308 | goto out; |
2306 | 2309 | ||
2307 | mddev->queue->queue_lock = &conf->device_lock; | ||
2308 | |||
2309 | mddev->thread = conf->thread; | 2310 | mddev->thread = conf->thread; |
2310 | conf->thread = NULL; | 2311 | conf->thread = NULL; |
2311 | 2312 | ||
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 702812824195..78536fdbd87f 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -5204,7 +5204,6 @@ static int run(mddev_t *mddev) | |||
5204 | 5204 | ||
5205 | mddev->queue->backing_dev_info.congested_data = mddev; | 5205 | mddev->queue->backing_dev_info.congested_data = mddev; |
5206 | mddev->queue->backing_dev_info.congested_fn = raid5_congested; | 5206 | mddev->queue->backing_dev_info.congested_fn = raid5_congested; |
5207 | mddev->queue->queue_lock = &conf->device_lock; | ||
5208 | mddev->queue->unplug_fn = raid5_unplug_queue; | 5207 | mddev->queue->unplug_fn = raid5_unplug_queue; |
5209 | 5208 | ||
5210 | chunk_size = mddev->chunk_sectors << 9; | 5209 | chunk_size = mddev->chunk_sectors << 9; |
diff --git a/drivers/media/common/tuners/tda8290.c b/drivers/media/common/tuners/tda8290.c index bc6a67768af1..8c4852114eeb 100644 --- a/drivers/media/common/tuners/tda8290.c +++ b/drivers/media/common/tuners/tda8290.c | |||
@@ -658,13 +658,13 @@ static int tda8290_probe(struct tuner_i2c_props *i2c_props) | |||
658 | #define TDA8290_ID 0x89 | 658 | #define TDA8290_ID 0x89 |
659 | u8 reg = 0x1f, id; | 659 | u8 reg = 0x1f, id; |
660 | struct i2c_msg msg_read[] = { | 660 | struct i2c_msg msg_read[] = { |
661 | { .addr = 0x4b, .flags = 0, .len = 1, .buf = ® }, | 661 | { .addr = i2c_props->addr, .flags = 0, .len = 1, .buf = ® }, |
662 | { .addr = 0x4b, .flags = I2C_M_RD, .len = 1, .buf = &id }, | 662 | { .addr = i2c_props->addr, .flags = I2C_M_RD, .len = 1, .buf = &id }, |
663 | }; | 663 | }; |
664 | 664 | ||
665 | /* detect tda8290 */ | 665 | /* detect tda8290 */ |
666 | if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) { | 666 | if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) { |
667 | printk(KERN_WARNING "%s: tda8290 couldn't read register 0x%02x\n", | 667 | printk(KERN_WARNING "%s: couldn't read register 0x%02x\n", |
668 | __func__, reg); | 668 | __func__, reg); |
669 | return -ENODEV; | 669 | return -ENODEV; |
670 | } | 670 | } |
@@ -685,13 +685,13 @@ static int tda8295_probe(struct tuner_i2c_props *i2c_props) | |||
685 | #define TDA8295C2_ID 0x8b | 685 | #define TDA8295C2_ID 0x8b |
686 | u8 reg = 0x2f, id; | 686 | u8 reg = 0x2f, id; |
687 | struct i2c_msg msg_read[] = { | 687 | struct i2c_msg msg_read[] = { |
688 | { .addr = 0x4b, .flags = 0, .len = 1, .buf = ® }, | 688 | { .addr = i2c_props->addr, .flags = 0, .len = 1, .buf = ® }, |
689 | { .addr = 0x4b, .flags = I2C_M_RD, .len = 1, .buf = &id }, | 689 | { .addr = i2c_props->addr, .flags = I2C_M_RD, .len = 1, .buf = &id }, |
690 | }; | 690 | }; |
691 | 691 | ||
692 | /* detect tda8290 */ | 692 | /* detect tda8295 */ |
693 | if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) { | 693 | if (i2c_transfer(i2c_props->adap, msg_read, 2) != 2) { |
694 | printk(KERN_WARNING "%s: tda8290 couldn't read register 0x%02x\n", | 694 | printk(KERN_WARNING "%s: couldn't read register 0x%02x\n", |
695 | __func__, reg); | 695 | __func__, reg); |
696 | return -ENODEV; | 696 | return -ENODEV; |
697 | } | 697 | } |
diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c index defd83964ce2..193cdb77b76a 100644 --- a/drivers/media/dvb/dvb-usb/dib0700_devices.c +++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c | |||
@@ -870,6 +870,23 @@ static int dib7070p_tuner_attach(struct dvb_usb_adapter *adap) | |||
870 | return 0; | 870 | return 0; |
871 | } | 871 | } |
872 | 872 | ||
873 | static int stk7700p_pid_filter(struct dvb_usb_adapter *adapter, int index, | ||
874 | u16 pid, int onoff) | ||
875 | { | ||
876 | struct dib0700_state *st = adapter->dev->priv; | ||
877 | if (st->is_dib7000pc) | ||
878 | return dib7000p_pid_filter(adapter->fe, index, pid, onoff); | ||
879 | return dib7000m_pid_filter(adapter->fe, index, pid, onoff); | ||
880 | } | ||
881 | |||
882 | static int stk7700p_pid_filter_ctrl(struct dvb_usb_adapter *adapter, int onoff) | ||
883 | { | ||
884 | struct dib0700_state *st = adapter->dev->priv; | ||
885 | if (st->is_dib7000pc) | ||
886 | return dib7000p_pid_filter_ctrl(adapter->fe, onoff); | ||
887 | return dib7000m_pid_filter_ctrl(adapter->fe, onoff); | ||
888 | } | ||
889 | |||
873 | static int stk70x0p_pid_filter(struct dvb_usb_adapter *adapter, int index, u16 pid, int onoff) | 890 | static int stk70x0p_pid_filter(struct dvb_usb_adapter *adapter, int index, u16 pid, int onoff) |
874 | { | 891 | { |
875 | return dib7000p_pid_filter(adapter->fe, index, pid, onoff); | 892 | return dib7000p_pid_filter(adapter->fe, index, pid, onoff); |
@@ -1875,8 +1892,8 @@ struct dvb_usb_device_properties dib0700_devices[] = { | |||
1875 | { | 1892 | { |
1876 | .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, | 1893 | .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, |
1877 | .pid_filter_count = 32, | 1894 | .pid_filter_count = 32, |
1878 | .pid_filter = stk70x0p_pid_filter, | 1895 | .pid_filter = stk7700p_pid_filter, |
1879 | .pid_filter_ctrl = stk70x0p_pid_filter_ctrl, | 1896 | .pid_filter_ctrl = stk7700p_pid_filter_ctrl, |
1880 | .frontend_attach = stk7700p_frontend_attach, | 1897 | .frontend_attach = stk7700p_frontend_attach, |
1881 | .tuner_attach = stk7700p_tuner_attach, | 1898 | .tuner_attach = stk7700p_tuner_attach, |
1882 | 1899 | ||
diff --git a/drivers/media/dvb/dvb-usb/lmedm04.c b/drivers/media/dvb/dvb-usb/lmedm04.c index 9eea4188303b..46ccd01a7696 100644 --- a/drivers/media/dvb/dvb-usb/lmedm04.c +++ b/drivers/media/dvb/dvb-usb/lmedm04.c | |||
@@ -659,7 +659,7 @@ static int lme2510_download_firmware(struct usb_device *dev, | |||
659 | } | 659 | } |
660 | 660 | ||
661 | /* Default firmware for LME2510C */ | 661 | /* Default firmware for LME2510C */ |
662 | const char lme_firmware[50] = "dvb-usb-lme2510c-s7395.fw"; | 662 | char lme_firmware[50] = "dvb-usb-lme2510c-s7395.fw"; |
663 | 663 | ||
664 | static void lme_coldreset(struct usb_device *dev) | 664 | static void lme_coldreset(struct usb_device *dev) |
665 | { | 665 | { |
@@ -1006,7 +1006,7 @@ static struct dvb_usb_device_properties lme2510c_properties = { | |||
1006 | .caps = DVB_USB_IS_AN_I2C_ADAPTER, | 1006 | .caps = DVB_USB_IS_AN_I2C_ADAPTER, |
1007 | .usb_ctrl = DEVICE_SPECIFIC, | 1007 | .usb_ctrl = DEVICE_SPECIFIC, |
1008 | .download_firmware = lme2510_download_firmware, | 1008 | .download_firmware = lme2510_download_firmware, |
1009 | .firmware = lme_firmware, | 1009 | .firmware = (const char *)&lme_firmware, |
1010 | .size_of_priv = sizeof(struct lme2510_state), | 1010 | .size_of_priv = sizeof(struct lme2510_state), |
1011 | .num_adapters = 1, | 1011 | .num_adapters = 1, |
1012 | .adapter = { | 1012 | .adapter = { |
@@ -1109,5 +1109,5 @@ module_exit(lme2510_module_exit); | |||
1109 | 1109 | ||
1110 | MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>"); | 1110 | MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>"); |
1111 | MODULE_DESCRIPTION("LME2510(C) DVB-S USB2.0"); | 1111 | MODULE_DESCRIPTION("LME2510(C) DVB-S USB2.0"); |
1112 | MODULE_VERSION("1.74"); | 1112 | MODULE_VERSION("1.75"); |
1113 | MODULE_LICENSE("GPL"); | 1113 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/media/dvb/frontends/dib7000m.c b/drivers/media/dvb/frontends/dib7000m.c index c7f5ccf54aa5..289a79837f24 100644 --- a/drivers/media/dvb/frontends/dib7000m.c +++ b/drivers/media/dvb/frontends/dib7000m.c | |||
@@ -1285,6 +1285,25 @@ struct i2c_adapter * dib7000m_get_i2c_master(struct dvb_frontend *demod, enum di | |||
1285 | } | 1285 | } |
1286 | EXPORT_SYMBOL(dib7000m_get_i2c_master); | 1286 | EXPORT_SYMBOL(dib7000m_get_i2c_master); |
1287 | 1287 | ||
1288 | int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff) | ||
1289 | { | ||
1290 | struct dib7000m_state *state = fe->demodulator_priv; | ||
1291 | u16 val = dib7000m_read_word(state, 294 + state->reg_offs) & 0xffef; | ||
1292 | val |= (onoff & 0x1) << 4; | ||
1293 | dprintk("PID filter enabled %d", onoff); | ||
1294 | return dib7000m_write_word(state, 294 + state->reg_offs, val); | ||
1295 | } | ||
1296 | EXPORT_SYMBOL(dib7000m_pid_filter_ctrl); | ||
1297 | |||
1298 | int dib7000m_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff) | ||
1299 | { | ||
1300 | struct dib7000m_state *state = fe->demodulator_priv; | ||
1301 | dprintk("PID filter: index %x, PID %d, OnOff %d", id, pid, onoff); | ||
1302 | return dib7000m_write_word(state, 300 + state->reg_offs + id, | ||
1303 | onoff ? (1 << 13) | pid : 0); | ||
1304 | } | ||
1305 | EXPORT_SYMBOL(dib7000m_pid_filter); | ||
1306 | |||
1288 | #if 0 | 1307 | #if 0 |
1289 | /* used with some prototype boards */ | 1308 | /* used with some prototype boards */ |
1290 | int dib7000m_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, | 1309 | int dib7000m_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, |
diff --git a/drivers/media/dvb/frontends/dib7000m.h b/drivers/media/dvb/frontends/dib7000m.h index 113819ce9f0d..81fcf2241c64 100644 --- a/drivers/media/dvb/frontends/dib7000m.h +++ b/drivers/media/dvb/frontends/dib7000m.h | |||
@@ -46,6 +46,8 @@ extern struct dvb_frontend *dib7000m_attach(struct i2c_adapter *i2c_adap, | |||
46 | extern struct i2c_adapter *dib7000m_get_i2c_master(struct dvb_frontend *, | 46 | extern struct i2c_adapter *dib7000m_get_i2c_master(struct dvb_frontend *, |
47 | enum dibx000_i2c_interface, | 47 | enum dibx000_i2c_interface, |
48 | int); | 48 | int); |
49 | extern int dib7000m_pid_filter(struct dvb_frontend *, u8 id, u16 pid, u8 onoff); | ||
50 | extern int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff); | ||
49 | #else | 51 | #else |
50 | static inline | 52 | static inline |
51 | struct dvb_frontend *dib7000m_attach(struct i2c_adapter *i2c_adap, | 53 | struct dvb_frontend *dib7000m_attach(struct i2c_adapter *i2c_adap, |
@@ -63,6 +65,19 @@ struct i2c_adapter *dib7000m_get_i2c_master(struct dvb_frontend *demod, | |||
63 | printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); | 65 | printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); |
64 | return NULL; | 66 | return NULL; |
65 | } | 67 | } |
68 | static inline int dib7000m_pid_filter(struct dvb_frontend *fe, u8 id, | ||
69 | u16 pid, u8 onoff) | ||
70 | { | ||
71 | printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); | ||
72 | return -ENODEV; | ||
73 | } | ||
74 | |||
75 | static inline int dib7000m_pid_filter_ctrl(struct dvb_frontend *fe, | ||
76 | uint8_t onoff) | ||
77 | { | ||
78 | printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); | ||
79 | return -ENODEV; | ||
80 | } | ||
66 | #endif | 81 | #endif |
67 | 82 | ||
68 | /* TODO | 83 | /* TODO |
diff --git a/drivers/media/dvb/mantis/mantis_pci.c b/drivers/media/dvb/mantis/mantis_pci.c index 59feeb84aec7..10a432a79d00 100644 --- a/drivers/media/dvb/mantis/mantis_pci.c +++ b/drivers/media/dvb/mantis/mantis_pci.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/moduleparam.h> | 22 | #include <linux/moduleparam.h> |
23 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
24 | #include <asm/io.h> | 24 | #include <asm/io.h> |
25 | #include <asm/pgtable.h> | ||
26 | #include <asm/page.h> | 25 | #include <asm/page.h> |
27 | #include <linux/kmod.h> | 26 | #include <linux/kmod.h> |
28 | #include <linux/vmalloc.h> | 27 | #include <linux/vmalloc.h> |
diff --git a/drivers/media/rc/ir-raw.c b/drivers/media/rc/ir-raw.c index 73230ff93b8a..01f258a2a57a 100644 --- a/drivers/media/rc/ir-raw.c +++ b/drivers/media/rc/ir-raw.c | |||
@@ -112,7 +112,7 @@ int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type) | |||
112 | { | 112 | { |
113 | ktime_t now; | 113 | ktime_t now; |
114 | s64 delta; /* ns */ | 114 | s64 delta; /* ns */ |
115 | struct ir_raw_event ev; | 115 | DEFINE_IR_RAW_EVENT(ev); |
116 | int rc = 0; | 116 | int rc = 0; |
117 | 117 | ||
118 | if (!dev->raw) | 118 | if (!dev->raw) |
@@ -125,7 +125,6 @@ int ir_raw_event_store_edge(struct rc_dev *dev, enum raw_event_type type) | |||
125 | * being called for the first time, note that delta can't | 125 | * being called for the first time, note that delta can't |
126 | * possibly be negative. | 126 | * possibly be negative. |
127 | */ | 127 | */ |
128 | ev.duration = 0; | ||
129 | if (delta > IR_MAX_DURATION || !dev->raw->last_type) | 128 | if (delta > IR_MAX_DURATION || !dev->raw->last_type) |
130 | type |= IR_START_EVENT; | 129 | type |= IR_START_EVENT; |
131 | else | 130 | else |
diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c index 6df0a4980645..e4f8eac7f717 100644 --- a/drivers/media/rc/mceusb.c +++ b/drivers/media/rc/mceusb.c | |||
@@ -148,6 +148,7 @@ enum mceusb_model_type { | |||
148 | MCE_GEN2_TX_INV, | 148 | MCE_GEN2_TX_INV, |
149 | POLARIS_EVK, | 149 | POLARIS_EVK, |
150 | CX_HYBRID_TV, | 150 | CX_HYBRID_TV, |
151 | MULTIFUNCTION, | ||
151 | }; | 152 | }; |
152 | 153 | ||
153 | struct mceusb_model { | 154 | struct mceusb_model { |
@@ -155,9 +156,10 @@ struct mceusb_model { | |||
155 | u32 mce_gen2:1; | 156 | u32 mce_gen2:1; |
156 | u32 mce_gen3:1; | 157 | u32 mce_gen3:1; |
157 | u32 tx_mask_normal:1; | 158 | u32 tx_mask_normal:1; |
158 | u32 is_polaris:1; | ||
159 | u32 no_tx:1; | 159 | u32 no_tx:1; |
160 | 160 | ||
161 | int ir_intfnum; | ||
162 | |||
161 | const char *rc_map; /* Allow specify a per-board map */ | 163 | const char *rc_map; /* Allow specify a per-board map */ |
162 | const char *name; /* per-board name */ | 164 | const char *name; /* per-board name */ |
163 | }; | 165 | }; |
@@ -179,7 +181,6 @@ static const struct mceusb_model mceusb_model[] = { | |||
179 | .tx_mask_normal = 1, | 181 | .tx_mask_normal = 1, |
180 | }, | 182 | }, |
181 | [POLARIS_EVK] = { | 183 | [POLARIS_EVK] = { |
182 | .is_polaris = 1, | ||
183 | /* | 184 | /* |
184 | * In fact, the EVK is shipped without | 185 | * In fact, the EVK is shipped without |
185 | * remotes, but we should have something handy, | 186 | * remotes, but we should have something handy, |
@@ -189,10 +190,13 @@ static const struct mceusb_model mceusb_model[] = { | |||
189 | .name = "Conexant Hybrid TV (cx231xx) MCE IR", | 190 | .name = "Conexant Hybrid TV (cx231xx) MCE IR", |
190 | }, | 191 | }, |
191 | [CX_HYBRID_TV] = { | 192 | [CX_HYBRID_TV] = { |
192 | .is_polaris = 1, | ||
193 | .no_tx = 1, /* tx isn't wired up at all */ | 193 | .no_tx = 1, /* tx isn't wired up at all */ |
194 | .name = "Conexant Hybrid TV (cx231xx) MCE IR", | 194 | .name = "Conexant Hybrid TV (cx231xx) MCE IR", |
195 | }, | 195 | }, |
196 | [MULTIFUNCTION] = { | ||
197 | .mce_gen2 = 1, | ||
198 | .ir_intfnum = 2, | ||
199 | }, | ||
196 | }; | 200 | }; |
197 | 201 | ||
198 | static struct usb_device_id mceusb_dev_table[] = { | 202 | static struct usb_device_id mceusb_dev_table[] = { |
@@ -216,8 +220,9 @@ static struct usb_device_id mceusb_dev_table[] = { | |||
216 | { USB_DEVICE(VENDOR_PHILIPS, 0x206c) }, | 220 | { USB_DEVICE(VENDOR_PHILIPS, 0x206c) }, |
217 | /* Philips/Spinel plus IR transceiver for ASUS */ | 221 | /* Philips/Spinel plus IR transceiver for ASUS */ |
218 | { USB_DEVICE(VENDOR_PHILIPS, 0x2088) }, | 222 | { USB_DEVICE(VENDOR_PHILIPS, 0x2088) }, |
219 | /* Realtek MCE IR Receiver */ | 223 | /* Realtek MCE IR Receiver and card reader */ |
220 | { USB_DEVICE(VENDOR_REALTEK, 0x0161) }, | 224 | { USB_DEVICE(VENDOR_REALTEK, 0x0161), |
225 | .driver_info = MULTIFUNCTION }, | ||
221 | /* SMK/Toshiba G83C0004D410 */ | 226 | /* SMK/Toshiba G83C0004D410 */ |
222 | { USB_DEVICE(VENDOR_SMK, 0x031d), | 227 | { USB_DEVICE(VENDOR_SMK, 0x031d), |
223 | .driver_info = MCE_GEN2_TX_INV }, | 228 | .driver_info = MCE_GEN2_TX_INV }, |
@@ -1101,7 +1106,7 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf, | |||
1101 | bool is_gen3; | 1106 | bool is_gen3; |
1102 | bool is_microsoft_gen1; | 1107 | bool is_microsoft_gen1; |
1103 | bool tx_mask_normal; | 1108 | bool tx_mask_normal; |
1104 | bool is_polaris; | 1109 | int ir_intfnum; |
1105 | 1110 | ||
1106 | dev_dbg(&intf->dev, "%s called\n", __func__); | 1111 | dev_dbg(&intf->dev, "%s called\n", __func__); |
1107 | 1112 | ||
@@ -1110,13 +1115,11 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf, | |||
1110 | is_gen3 = mceusb_model[model].mce_gen3; | 1115 | is_gen3 = mceusb_model[model].mce_gen3; |
1111 | is_microsoft_gen1 = mceusb_model[model].mce_gen1; | 1116 | is_microsoft_gen1 = mceusb_model[model].mce_gen1; |
1112 | tx_mask_normal = mceusb_model[model].tx_mask_normal; | 1117 | tx_mask_normal = mceusb_model[model].tx_mask_normal; |
1113 | is_polaris = mceusb_model[model].is_polaris; | 1118 | ir_intfnum = mceusb_model[model].ir_intfnum; |
1114 | 1119 | ||
1115 | if (is_polaris) { | 1120 | /* There are multi-function devices with non-IR interfaces */ |
1116 | /* Interface 0 is IR */ | 1121 | if (idesc->desc.bInterfaceNumber != ir_intfnum) |
1117 | if (idesc->desc.bInterfaceNumber) | 1122 | return -ENODEV; |
1118 | return -ENODEV; | ||
1119 | } | ||
1120 | 1123 | ||
1121 | /* step through the endpoints to find first bulk in and out endpoint */ | 1124 | /* step through the endpoints to find first bulk in and out endpoint */ |
1122 | for (i = 0; i < idesc->desc.bNumEndpoints; ++i) { | 1125 | for (i = 0; i < idesc->desc.bNumEndpoints; ++i) { |
diff --git a/drivers/media/rc/nuvoton-cir.c b/drivers/media/rc/nuvoton-cir.c index 273d9d674792..d4d64492a057 100644 --- a/drivers/media/rc/nuvoton-cir.c +++ b/drivers/media/rc/nuvoton-cir.c | |||
@@ -385,8 +385,9 @@ static void nvt_cir_regs_init(struct nvt_dev *nvt) | |||
385 | 385 | ||
386 | static void nvt_cir_wake_regs_init(struct nvt_dev *nvt) | 386 | static void nvt_cir_wake_regs_init(struct nvt_dev *nvt) |
387 | { | 387 | { |
388 | /* set number of bytes needed for wake key comparison (default 67) */ | 388 | /* set number of bytes needed for wake from s3 (default 65) */ |
389 | nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFO_LEN, CIR_WAKE_FIFO_CMP_DEEP); | 389 | nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFO_CMP_BYTES, |
390 | CIR_WAKE_FIFO_CMP_DEEP); | ||
390 | 391 | ||
391 | /* set tolerance/variance allowed per byte during wake compare */ | 392 | /* set tolerance/variance allowed per byte during wake compare */ |
392 | nvt_cir_wake_reg_write(nvt, CIR_WAKE_CMP_TOLERANCE, | 393 | nvt_cir_wake_reg_write(nvt, CIR_WAKE_CMP_TOLERANCE, |
diff --git a/drivers/media/rc/nuvoton-cir.h b/drivers/media/rc/nuvoton-cir.h index 1df82351cb03..048135eea702 100644 --- a/drivers/media/rc/nuvoton-cir.h +++ b/drivers/media/rc/nuvoton-cir.h | |||
@@ -305,8 +305,11 @@ struct nvt_dev { | |||
305 | #define CIR_WAKE_IRFIFOSTS_RX_EMPTY 0x20 | 305 | #define CIR_WAKE_IRFIFOSTS_RX_EMPTY 0x20 |
306 | #define CIR_WAKE_IRFIFOSTS_RX_FULL 0x10 | 306 | #define CIR_WAKE_IRFIFOSTS_RX_FULL 0x10 |
307 | 307 | ||
308 | /* CIR Wake FIFO buffer is 67 bytes long */ | 308 | /* |
309 | #define CIR_WAKE_FIFO_LEN 67 | 309 | * The CIR Wake FIFO buffer is 67 bytes long, but the stock remote wakes |
310 | * the system comparing only 65 bytes (fails with this set to 67) | ||
311 | */ | ||
312 | #define CIR_WAKE_FIFO_CMP_BYTES 65 | ||
310 | /* CIR Wake byte comparison tolerance */ | 313 | /* CIR Wake byte comparison tolerance */ |
311 | #define CIR_WAKE_CMP_TOLERANCE 5 | 314 | #define CIR_WAKE_CMP_TOLERANCE 5 |
312 | 315 | ||
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index 512a2f4ada0e..5b4422ef4e6d 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c | |||
@@ -850,7 +850,7 @@ static ssize_t store_protocols(struct device *device, | |||
850 | count++; | 850 | count++; |
851 | } else { | 851 | } else { |
852 | for (i = 0; i < ARRAY_SIZE(proto_names); i++) { | 852 | for (i = 0; i < ARRAY_SIZE(proto_names); i++) { |
853 | if (!strncasecmp(tmp, proto_names[i].name, strlen(proto_names[i].name))) { | 853 | if (!strcasecmp(tmp, proto_names[i].name)) { |
854 | tmp += strlen(proto_names[i].name); | 854 | tmp += strlen(proto_names[i].name); |
855 | mask = proto_names[i].type; | 855 | mask = proto_names[i].type; |
856 | break; | 856 | break; |
diff --git a/drivers/media/video/au0828/au0828-video.c b/drivers/media/video/au0828/au0828-video.c index e41e4ad5cc40..9c475c600fc9 100644 --- a/drivers/media/video/au0828/au0828-video.c +++ b/drivers/media/video/au0828/au0828-video.c | |||
@@ -1758,7 +1758,12 @@ static int vidioc_reqbufs(struct file *file, void *priv, | |||
1758 | if (rc < 0) | 1758 | if (rc < 0) |
1759 | return rc; | 1759 | return rc; |
1760 | 1760 | ||
1761 | return videobuf_reqbufs(&fh->vb_vidq, rb); | 1761 | if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) |
1762 | rc = videobuf_reqbufs(&fh->vb_vidq, rb); | ||
1763 | else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) | ||
1764 | rc = videobuf_reqbufs(&fh->vb_vbiq, rb); | ||
1765 | |||
1766 | return rc; | ||
1762 | } | 1767 | } |
1763 | 1768 | ||
1764 | static int vidioc_querybuf(struct file *file, void *priv, | 1769 | static int vidioc_querybuf(struct file *file, void *priv, |
@@ -1772,7 +1777,12 @@ static int vidioc_querybuf(struct file *file, void *priv, | |||
1772 | if (rc < 0) | 1777 | if (rc < 0) |
1773 | return rc; | 1778 | return rc; |
1774 | 1779 | ||
1775 | return videobuf_querybuf(&fh->vb_vidq, b); | 1780 | if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) |
1781 | rc = videobuf_querybuf(&fh->vb_vidq, b); | ||
1782 | else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) | ||
1783 | rc = videobuf_querybuf(&fh->vb_vbiq, b); | ||
1784 | |||
1785 | return rc; | ||
1776 | } | 1786 | } |
1777 | 1787 | ||
1778 | static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *b) | 1788 | static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *b) |
@@ -1785,7 +1795,12 @@ static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *b) | |||
1785 | if (rc < 0) | 1795 | if (rc < 0) |
1786 | return rc; | 1796 | return rc; |
1787 | 1797 | ||
1788 | return videobuf_qbuf(&fh->vb_vidq, b); | 1798 | if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) |
1799 | rc = videobuf_qbuf(&fh->vb_vidq, b); | ||
1800 | else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) | ||
1801 | rc = videobuf_qbuf(&fh->vb_vbiq, b); | ||
1802 | |||
1803 | return rc; | ||
1789 | } | 1804 | } |
1790 | 1805 | ||
1791 | static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b) | 1806 | static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b) |
@@ -1806,7 +1821,12 @@ static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b) | |||
1806 | dev->greenscreen_detected = 0; | 1821 | dev->greenscreen_detected = 0; |
1807 | } | 1822 | } |
1808 | 1823 | ||
1809 | return videobuf_dqbuf(&fh->vb_vidq, b, file->f_flags & O_NONBLOCK); | 1824 | if (fh->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) |
1825 | rc = videobuf_dqbuf(&fh->vb_vidq, b, file->f_flags & O_NONBLOCK); | ||
1826 | else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) | ||
1827 | rc = videobuf_dqbuf(&fh->vb_vbiq, b, file->f_flags & O_NONBLOCK); | ||
1828 | |||
1829 | return rc; | ||
1810 | } | 1830 | } |
1811 | 1831 | ||
1812 | static struct v4l2_file_operations au0828_v4l_fops = { | 1832 | static struct v4l2_file_operations au0828_v4l_fops = { |
diff --git a/drivers/media/video/cx18/cx18-cards.c b/drivers/media/video/cx18/cx18-cards.c index 87177733cf92..68ad1963f421 100644 --- a/drivers/media/video/cx18/cx18-cards.c +++ b/drivers/media/video/cx18/cx18-cards.c | |||
@@ -95,6 +95,53 @@ static const struct cx18_card cx18_card_hvr1600_esmt = { | |||
95 | .i2c = &cx18_i2c_std, | 95 | .i2c = &cx18_i2c_std, |
96 | }; | 96 | }; |
97 | 97 | ||
98 | static const struct cx18_card cx18_card_hvr1600_s5h1411 = { | ||
99 | .type = CX18_CARD_HVR_1600_S5H1411, | ||
100 | .name = "Hauppauge HVR-1600", | ||
101 | .comment = "Simultaneous Digital and Analog TV capture supported\n", | ||
102 | .v4l2_capabilities = CX18_CAP_ENCODER, | ||
103 | .hw_audio_ctrl = CX18_HW_418_AV, | ||
104 | .hw_muxer = CX18_HW_CS5345, | ||
105 | .hw_all = CX18_HW_TVEEPROM | CX18_HW_418_AV | CX18_HW_TUNER | | ||
106 | CX18_HW_CS5345 | CX18_HW_DVB | CX18_HW_GPIO_RESET_CTRL | | ||
107 | CX18_HW_Z8F0811_IR_HAUP, | ||
108 | .video_inputs = { | ||
109 | { CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE7 }, | ||
110 | { CX18_CARD_INPUT_SVIDEO1, 1, CX18_AV_SVIDEO1 }, | ||
111 | { CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE3 }, | ||
112 | { CX18_CARD_INPUT_SVIDEO2, 2, CX18_AV_SVIDEO2 }, | ||
113 | { CX18_CARD_INPUT_COMPOSITE2, 2, CX18_AV_COMPOSITE4 }, | ||
114 | }, | ||
115 | .audio_inputs = { | ||
116 | { CX18_CARD_INPUT_AUD_TUNER, | ||
117 | CX18_AV_AUDIO8, CS5345_IN_1 | CS5345_MCLK_1_5 }, | ||
118 | { CX18_CARD_INPUT_LINE_IN1, | ||
119 | CX18_AV_AUDIO_SERIAL1, CS5345_IN_2 }, | ||
120 | { CX18_CARD_INPUT_LINE_IN2, | ||
121 | CX18_AV_AUDIO_SERIAL1, CS5345_IN_3 }, | ||
122 | }, | ||
123 | .radio_input = { CX18_CARD_INPUT_AUD_TUNER, | ||
124 | CX18_AV_AUDIO_SERIAL1, CS5345_IN_4 }, | ||
125 | .ddr = { | ||
126 | /* ESMT M13S128324A-5B memory */ | ||
127 | .chip_config = 0x003, | ||
128 | .refresh = 0x30c, | ||
129 | .timing1 = 0x44220e82, | ||
130 | .timing2 = 0x08, | ||
131 | .tune_lane = 0, | ||
132 | .initial_emrs = 0, | ||
133 | }, | ||
134 | .gpio_init.initial_value = 0x3001, | ||
135 | .gpio_init.direction = 0x3001, | ||
136 | .gpio_i2c_slave_reset = { | ||
137 | .active_lo_mask = 0x3001, | ||
138 | .msecs_asserted = 10, | ||
139 | .msecs_recovery = 40, | ||
140 | .ir_reset_mask = 0x0001, | ||
141 | }, | ||
142 | .i2c = &cx18_i2c_std, | ||
143 | }; | ||
144 | |||
98 | static const struct cx18_card cx18_card_hvr1600_samsung = { | 145 | static const struct cx18_card cx18_card_hvr1600_samsung = { |
99 | .type = CX18_CARD_HVR_1600_SAMSUNG, | 146 | .type = CX18_CARD_HVR_1600_SAMSUNG, |
100 | .name = "Hauppauge HVR-1600 (Preproduction)", | 147 | .name = "Hauppauge HVR-1600 (Preproduction)", |
@@ -523,7 +570,8 @@ static const struct cx18_card *cx18_card_list[] = { | |||
523 | &cx18_card_toshiba_qosmio_dvbt, | 570 | &cx18_card_toshiba_qosmio_dvbt, |
524 | &cx18_card_leadtek_pvr2100, | 571 | &cx18_card_leadtek_pvr2100, |
525 | &cx18_card_leadtek_dvr3100h, | 572 | &cx18_card_leadtek_dvr3100h, |
526 | &cx18_card_gotview_dvd3 | 573 | &cx18_card_gotview_dvd3, |
574 | &cx18_card_hvr1600_s5h1411 | ||
527 | }; | 575 | }; |
528 | 576 | ||
529 | const struct cx18_card *cx18_get_card(u16 index) | 577 | const struct cx18_card *cx18_get_card(u16 index) |
diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c index 944af8adbe0c..b1c3cbd92743 100644 --- a/drivers/media/video/cx18/cx18-driver.c +++ b/drivers/media/video/cx18/cx18-driver.c | |||
@@ -157,6 +157,7 @@ MODULE_PARM_DESC(cardtype, | |||
157 | "\t\t\t 7 = Leadtek WinFast PVR2100\n" | 157 | "\t\t\t 7 = Leadtek WinFast PVR2100\n" |
158 | "\t\t\t 8 = Leadtek WinFast DVR3100 H\n" | 158 | "\t\t\t 8 = Leadtek WinFast DVR3100 H\n" |
159 | "\t\t\t 9 = GoTView PCI DVD3 Hybrid\n" | 159 | "\t\t\t 9 = GoTView PCI DVD3 Hybrid\n" |
160 | "\t\t\t 10 = Hauppauge HVR 1600 (S5H1411)\n" | ||
160 | "\t\t\t 0 = Autodetect (default)\n" | 161 | "\t\t\t 0 = Autodetect (default)\n" |
161 | "\t\t\t-1 = Ignore this card\n\t\t"); | 162 | "\t\t\t-1 = Ignore this card\n\t\t"); |
162 | MODULE_PARM_DESC(pal, "Set PAL standard: B, G, H, D, K, I, M, N, Nc, 60"); | 163 | MODULE_PARM_DESC(pal, "Set PAL standard: B, G, H, D, K, I, M, N, Nc, 60"); |
@@ -337,6 +338,7 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv) | |||
337 | switch (cx->card->type) { | 338 | switch (cx->card->type) { |
338 | case CX18_CARD_HVR_1600_ESMT: | 339 | case CX18_CARD_HVR_1600_ESMT: |
339 | case CX18_CARD_HVR_1600_SAMSUNG: | 340 | case CX18_CARD_HVR_1600_SAMSUNG: |
341 | case CX18_CARD_HVR_1600_S5H1411: | ||
340 | tveeprom_hauppauge_analog(&c, tv, eedata); | 342 | tveeprom_hauppauge_analog(&c, tv, eedata); |
341 | break; | 343 | break; |
342 | case CX18_CARD_YUAN_MPC718: | 344 | case CX18_CARD_YUAN_MPC718: |
@@ -365,7 +367,25 @@ static void cx18_process_eeprom(struct cx18 *cx) | |||
365 | from the model number. Use the cardtype module option if you | 367 | from the model number. Use the cardtype module option if you |
366 | have one of these preproduction models. */ | 368 | have one of these preproduction models. */ |
367 | switch (tv.model) { | 369 | switch (tv.model) { |
368 | case 74000 ... 74999: | 370 | case 74301: /* Retail models */ |
371 | case 74321: | ||
372 | case 74351: /* OEM models */ | ||
373 | case 74361: | ||
374 | /* Digital side is s5h1411/tda18271 */ | ||
375 | cx->card = cx18_get_card(CX18_CARD_HVR_1600_S5H1411); | ||
376 | break; | ||
377 | case 74021: /* Retail models */ | ||
378 | case 74031: | ||
379 | case 74041: | ||
380 | case 74141: | ||
381 | case 74541: /* OEM models */ | ||
382 | case 74551: | ||
383 | case 74591: | ||
384 | case 74651: | ||
385 | case 74691: | ||
386 | case 74751: | ||
387 | case 74891: | ||
388 | /* Digital side is s5h1409/mxl5005s */ | ||
369 | cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT); | 389 | cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT); |
370 | break; | 390 | break; |
371 | case 0x718: | 391 | case 0x718: |
@@ -377,7 +397,8 @@ static void cx18_process_eeprom(struct cx18 *cx) | |||
377 | CX18_ERR("Invalid EEPROM\n"); | 397 | CX18_ERR("Invalid EEPROM\n"); |
378 | return; | 398 | return; |
379 | default: | 399 | default: |
380 | CX18_ERR("Unknown model %d, defaulting to HVR-1600\n", tv.model); | 400 | CX18_ERR("Unknown model %d, defaulting to original HVR-1600 " |
401 | "(cardtype=1)\n", tv.model); | ||
381 | cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT); | 402 | cx->card = cx18_get_card(CX18_CARD_HVR_1600_ESMT); |
382 | break; | 403 | break; |
383 | } | 404 | } |
diff --git a/drivers/media/video/cx18/cx18-driver.h b/drivers/media/video/cx18/cx18-driver.h index 306caac6d3fc..f736679d2517 100644 --- a/drivers/media/video/cx18/cx18-driver.h +++ b/drivers/media/video/cx18/cx18-driver.h | |||
@@ -85,7 +85,8 @@ | |||
85 | #define CX18_CARD_LEADTEK_PVR2100 6 /* Leadtek WinFast PVR2100 */ | 85 | #define CX18_CARD_LEADTEK_PVR2100 6 /* Leadtek WinFast PVR2100 */ |
86 | #define CX18_CARD_LEADTEK_DVR3100H 7 /* Leadtek WinFast DVR3100 H */ | 86 | #define CX18_CARD_LEADTEK_DVR3100H 7 /* Leadtek WinFast DVR3100 H */ |
87 | #define CX18_CARD_GOTVIEW_PCI_DVD3 8 /* GoTView PCI DVD3 Hybrid */ | 87 | #define CX18_CARD_GOTVIEW_PCI_DVD3 8 /* GoTView PCI DVD3 Hybrid */ |
88 | #define CX18_CARD_LAST 8 | 88 | #define CX18_CARD_HVR_1600_S5H1411 9 /* Hauppauge HVR 1600 s5h1411/tda18271*/ |
89 | #define CX18_CARD_LAST 9 | ||
89 | 90 | ||
90 | #define CX18_ENC_STREAM_TYPE_MPG 0 | 91 | #define CX18_ENC_STREAM_TYPE_MPG 0 |
91 | #define CX18_ENC_STREAM_TYPE_TS 1 | 92 | #define CX18_ENC_STREAM_TYPE_TS 1 |
diff --git a/drivers/media/video/cx18/cx18-dvb.c b/drivers/media/video/cx18/cx18-dvb.c index f0381d62518d..f41922bd4020 100644 --- a/drivers/media/video/cx18/cx18-dvb.c +++ b/drivers/media/video/cx18/cx18-dvb.c | |||
@@ -29,6 +29,8 @@ | |||
29 | #include "cx18-gpio.h" | 29 | #include "cx18-gpio.h" |
30 | #include "s5h1409.h" | 30 | #include "s5h1409.h" |
31 | #include "mxl5005s.h" | 31 | #include "mxl5005s.h" |
32 | #include "s5h1411.h" | ||
33 | #include "tda18271.h" | ||
32 | #include "zl10353.h" | 34 | #include "zl10353.h" |
33 | 35 | ||
34 | #include <linux/firmware.h> | 36 | #include <linux/firmware.h> |
@@ -77,6 +79,32 @@ static struct s5h1409_config hauppauge_hvr1600_config = { | |||
77 | }; | 79 | }; |
78 | 80 | ||
79 | /* | 81 | /* |
82 | * CX18_CARD_HVR_1600_S5H1411 | ||
83 | */ | ||
84 | static struct s5h1411_config hcw_s5h1411_config = { | ||
85 | .output_mode = S5H1411_SERIAL_OUTPUT, | ||
86 | .gpio = S5H1411_GPIO_OFF, | ||
87 | .vsb_if = S5H1411_IF_44000, | ||
88 | .qam_if = S5H1411_IF_4000, | ||
89 | .inversion = S5H1411_INVERSION_ON, | ||
90 | .status_mode = S5H1411_DEMODLOCKING, | ||
91 | .mpeg_timing = S5H1411_MPEGTIMING_CONTINOUS_NONINVERTING_CLOCK, | ||
92 | }; | ||
93 | |||
94 | static struct tda18271_std_map hauppauge_tda18271_std_map = { | ||
95 | .atsc_6 = { .if_freq = 5380, .agc_mode = 3, .std = 3, | ||
96 | .if_lvl = 6, .rfagc_top = 0x37 }, | ||
97 | .qam_6 = { .if_freq = 4000, .agc_mode = 3, .std = 0, | ||
98 | .if_lvl = 6, .rfagc_top = 0x37 }, | ||
99 | }; | ||
100 | |||
101 | static struct tda18271_config hauppauge_tda18271_config = { | ||
102 | .std_map = &hauppauge_tda18271_std_map, | ||
103 | .gate = TDA18271_GATE_DIGITAL, | ||
104 | .output_opt = TDA18271_OUTPUT_LT_OFF, | ||
105 | }; | ||
106 | |||
107 | /* | ||
80 | * CX18_CARD_LEADTEK_DVR3100H | 108 | * CX18_CARD_LEADTEK_DVR3100H |
81 | */ | 109 | */ |
82 | /* Information/confirmation of proper config values provided by Terry Wu */ | 110 | /* Information/confirmation of proper config values provided by Terry Wu */ |
@@ -244,6 +272,7 @@ static int cx18_dvb_start_feed(struct dvb_demux_feed *feed) | |||
244 | switch (cx->card->type) { | 272 | switch (cx->card->type) { |
245 | case CX18_CARD_HVR_1600_ESMT: | 273 | case CX18_CARD_HVR_1600_ESMT: |
246 | case CX18_CARD_HVR_1600_SAMSUNG: | 274 | case CX18_CARD_HVR_1600_SAMSUNG: |
275 | case CX18_CARD_HVR_1600_S5H1411: | ||
247 | v = cx18_read_reg(cx, CX18_REG_DMUX_NUM_PORT_0_CONTROL); | 276 | v = cx18_read_reg(cx, CX18_REG_DMUX_NUM_PORT_0_CONTROL); |
248 | v |= 0x00400000; /* Serial Mode */ | 277 | v |= 0x00400000; /* Serial Mode */ |
249 | v |= 0x00002000; /* Data Length - Byte */ | 278 | v |= 0x00002000; /* Data Length - Byte */ |
@@ -455,6 +484,15 @@ static int dvb_register(struct cx18_stream *stream) | |||
455 | ret = 0; | 484 | ret = 0; |
456 | } | 485 | } |
457 | break; | 486 | break; |
487 | case CX18_CARD_HVR_1600_S5H1411: | ||
488 | dvb->fe = dvb_attach(s5h1411_attach, | ||
489 | &hcw_s5h1411_config, | ||
490 | &cx->i2c_adap[0]); | ||
491 | if (dvb->fe != NULL) | ||
492 | dvb_attach(tda18271_attach, dvb->fe, | ||
493 | 0x60, &cx->i2c_adap[0], | ||
494 | &hauppauge_tda18271_config); | ||
495 | break; | ||
458 | case CX18_CARD_LEADTEK_DVR3100H: | 496 | case CX18_CARD_LEADTEK_DVR3100H: |
459 | dvb->fe = dvb_attach(zl10353_attach, | 497 | dvb->fe = dvb_attach(zl10353_attach, |
460 | &leadtek_dvr3100h_demod, | 498 | &leadtek_dvr3100h_demod, |
diff --git a/drivers/media/video/cx23885/cx23885-i2c.c b/drivers/media/video/cx23885/cx23885-i2c.c index ed3d8f55029b..307ff543c254 100644 --- a/drivers/media/video/cx23885/cx23885-i2c.c +++ b/drivers/media/video/cx23885/cx23885-i2c.c | |||
@@ -122,10 +122,6 @@ static int i2c_sendbytes(struct i2c_adapter *i2c_adap, | |||
122 | 122 | ||
123 | if (!i2c_wait_done(i2c_adap)) | 123 | if (!i2c_wait_done(i2c_adap)) |
124 | goto eio; | 124 | goto eio; |
125 | if (!i2c_slave_did_ack(i2c_adap)) { | ||
126 | retval = -ENXIO; | ||
127 | goto err; | ||
128 | } | ||
129 | if (i2c_debug) { | 125 | if (i2c_debug) { |
130 | printk(" <W %02x %02x", msg->addr << 1, msg->buf[0]); | 126 | printk(" <W %02x %02x", msg->addr << 1, msg->buf[0]); |
131 | if (!(ctrl & I2C_NOSTOP)) | 127 | if (!(ctrl & I2C_NOSTOP)) |
@@ -158,7 +154,6 @@ static int i2c_sendbytes(struct i2c_adapter *i2c_adap, | |||
158 | 154 | ||
159 | eio: | 155 | eio: |
160 | retval = -EIO; | 156 | retval = -EIO; |
161 | err: | ||
162 | if (i2c_debug) | 157 | if (i2c_debug) |
163 | printk(KERN_ERR " ERR: %d\n", retval); | 158 | printk(KERN_ERR " ERR: %d\n", retval); |
164 | return retval; | 159 | return retval; |
@@ -209,10 +204,6 @@ static int i2c_readbytes(struct i2c_adapter *i2c_adap, | |||
209 | 204 | ||
210 | if (!i2c_wait_done(i2c_adap)) | 205 | if (!i2c_wait_done(i2c_adap)) |
211 | goto eio; | 206 | goto eio; |
212 | if (cnt == 0 && !i2c_slave_did_ack(i2c_adap)) { | ||
213 | retval = -ENXIO; | ||
214 | goto err; | ||
215 | } | ||
216 | msg->buf[cnt] = cx_read(bus->reg_rdata) & 0xff; | 207 | msg->buf[cnt] = cx_read(bus->reg_rdata) & 0xff; |
217 | if (i2c_debug) { | 208 | if (i2c_debug) { |
218 | dprintk(1, " %02x", msg->buf[cnt]); | 209 | dprintk(1, " %02x", msg->buf[cnt]); |
@@ -224,7 +215,6 @@ static int i2c_readbytes(struct i2c_adapter *i2c_adap, | |||
224 | 215 | ||
225 | eio: | 216 | eio: |
226 | retval = -EIO; | 217 | retval = -EIO; |
227 | err: | ||
228 | if (i2c_debug) | 218 | if (i2c_debug) |
229 | printk(KERN_ERR " ERR: %d\n", retval); | 219 | printk(KERN_ERR " ERR: %d\n", retval); |
230 | return retval; | 220 | return retval; |
diff --git a/drivers/media/video/cx25840/cx25840-core.c b/drivers/media/video/cx25840/cx25840-core.c index 6fc09dd41b9d..35796e035247 100644 --- a/drivers/media/video/cx25840/cx25840-core.c +++ b/drivers/media/video/cx25840/cx25840-core.c | |||
@@ -2015,7 +2015,8 @@ static int cx25840_probe(struct i2c_client *client, | |||
2015 | kfree(state); | 2015 | kfree(state); |
2016 | return err; | 2016 | return err; |
2017 | } | 2017 | } |
2018 | v4l2_ctrl_cluster(2, &state->volume); | 2018 | if (!is_cx2583x(state)) |
2019 | v4l2_ctrl_cluster(2, &state->volume); | ||
2019 | v4l2_ctrl_handler_setup(&state->hdl); | 2020 | v4l2_ctrl_handler_setup(&state->hdl); |
2020 | 2021 | ||
2021 | if (client->dev.platform_data) { | 2022 | if (client->dev.platform_data) { |
diff --git a/drivers/media/video/ivtv/ivtv-irq.c b/drivers/media/video/ivtv/ivtv-irq.c index 9b4faf009196..9c29e964d400 100644 --- a/drivers/media/video/ivtv/ivtv-irq.c +++ b/drivers/media/video/ivtv/ivtv-irq.c | |||
@@ -628,22 +628,66 @@ static void ivtv_irq_enc_pio_complete(struct ivtv *itv) | |||
628 | static void ivtv_irq_dma_err(struct ivtv *itv) | 628 | static void ivtv_irq_dma_err(struct ivtv *itv) |
629 | { | 629 | { |
630 | u32 data[CX2341X_MBOX_MAX_DATA]; | 630 | u32 data[CX2341X_MBOX_MAX_DATA]; |
631 | u32 status; | ||
631 | 632 | ||
632 | del_timer(&itv->dma_timer); | 633 | del_timer(&itv->dma_timer); |
634 | |||
633 | ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data); | 635 | ivtv_api_get_data(&itv->enc_mbox, IVTV_MBOX_DMA_END, 2, data); |
636 | status = read_reg(IVTV_REG_DMASTATUS); | ||
634 | IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1], | 637 | IVTV_DEBUG_WARN("DMA ERROR %08x %08x %08x %d\n", data[0], data[1], |
635 | read_reg(IVTV_REG_DMASTATUS), itv->cur_dma_stream); | 638 | status, itv->cur_dma_stream); |
636 | write_reg(read_reg(IVTV_REG_DMASTATUS) & 3, IVTV_REG_DMASTATUS); | 639 | /* |
640 | * We do *not* write back to the IVTV_REG_DMASTATUS register to | ||
641 | * clear the error status, if either the encoder write (0x02) or | ||
642 | * decoder read (0x01) bus master DMA operation do not indicate | ||
643 | * completed. We can race with the DMA engine, which may have | ||
644 | * transitioned to completed status *after* we read the register. | ||
645 | * Setting a IVTV_REG_DMASTATUS flag back to "busy" status, after the | ||
646 | * DMA engine has completed, will cause the DMA engine to stop working. | ||
647 | */ | ||
648 | status &= 0x3; | ||
649 | if (status == 0x3) | ||
650 | write_reg(status, IVTV_REG_DMASTATUS); | ||
651 | |||
637 | if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && | 652 | if (!test_bit(IVTV_F_I_UDMA, &itv->i_flags) && |
638 | itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) { | 653 | itv->cur_dma_stream >= 0 && itv->cur_dma_stream < IVTV_MAX_STREAMS) { |
639 | struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream]; | 654 | struct ivtv_stream *s = &itv->streams[itv->cur_dma_stream]; |
640 | 655 | ||
641 | /* retry */ | 656 | if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) { |
642 | if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) | 657 | /* retry */ |
658 | /* | ||
659 | * FIXME - handle cases of DMA error similar to | ||
660 | * encoder below, except conditioned on status & 0x1 | ||
661 | */ | ||
643 | ivtv_dma_dec_start(s); | 662 | ivtv_dma_dec_start(s); |
644 | else | 663 | return; |
645 | ivtv_dma_enc_start(s); | 664 | } else { |
646 | return; | 665 | if ((status & 0x2) == 0) { |
666 | /* | ||
667 | * CX2341x Bus Master DMA write is ongoing. | ||
668 | * Reset the timer and let it complete. | ||
669 | */ | ||
670 | itv->dma_timer.expires = | ||
671 | jiffies + msecs_to_jiffies(600); | ||
672 | add_timer(&itv->dma_timer); | ||
673 | return; | ||
674 | } | ||
675 | |||
676 | if (itv->dma_retries < 3) { | ||
677 | /* | ||
678 | * CX2341x Bus Master DMA write has ended. | ||
679 | * Retry the write, starting with the first | ||
680 | * xfer segment. Just retrying the current | ||
681 | * segment is not sufficient. | ||
682 | */ | ||
683 | s->sg_processed = 0; | ||
684 | itv->dma_retries++; | ||
685 | ivtv_dma_enc_start_xfer(s); | ||
686 | return; | ||
687 | } | ||
688 | /* Too many retries, give up on this one */ | ||
689 | } | ||
690 | |||
647 | } | 691 | } |
648 | if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) { | 692 | if (test_bit(IVTV_F_I_UDMA, &itv->i_flags)) { |
649 | ivtv_udma_start(itv); | 693 | ivtv_udma_start(itv); |
diff --git a/drivers/media/video/mem2mem_testdev.c b/drivers/media/video/mem2mem_testdev.c index c179041d91f8..e7e717800ee2 100644 --- a/drivers/media/video/mem2mem_testdev.c +++ b/drivers/media/video/mem2mem_testdev.c | |||
@@ -1011,7 +1011,6 @@ static int m2mtest_remove(struct platform_device *pdev) | |||
1011 | v4l2_m2m_release(dev->m2m_dev); | 1011 | v4l2_m2m_release(dev->m2m_dev); |
1012 | del_timer_sync(&dev->timer); | 1012 | del_timer_sync(&dev->timer); |
1013 | video_unregister_device(dev->vfd); | 1013 | video_unregister_device(dev->vfd); |
1014 | video_device_release(dev->vfd); | ||
1015 | v4l2_device_unregister(&dev->v4l2_dev); | 1014 | v4l2_device_unregister(&dev->v4l2_dev); |
1016 | kfree(dev); | 1015 | kfree(dev); |
1017 | 1016 | ||
diff --git a/drivers/media/video/s2255drv.c b/drivers/media/video/s2255drv.c index b63f8cafa671..561909b65ce6 100644 --- a/drivers/media/video/s2255drv.c +++ b/drivers/media/video/s2255drv.c | |||
@@ -57,7 +57,7 @@ | |||
57 | #include <linux/usb.h> | 57 | #include <linux/usb.h> |
58 | 58 | ||
59 | #define S2255_MAJOR_VERSION 1 | 59 | #define S2255_MAJOR_VERSION 1 |
60 | #define S2255_MINOR_VERSION 20 | 60 | #define S2255_MINOR_VERSION 21 |
61 | #define S2255_RELEASE 0 | 61 | #define S2255_RELEASE 0 |
62 | #define S2255_VERSION KERNEL_VERSION(S2255_MAJOR_VERSION, \ | 62 | #define S2255_VERSION KERNEL_VERSION(S2255_MAJOR_VERSION, \ |
63 | S2255_MINOR_VERSION, \ | 63 | S2255_MINOR_VERSION, \ |
@@ -312,9 +312,9 @@ struct s2255_fh { | |||
312 | }; | 312 | }; |
313 | 313 | ||
314 | /* current cypress EEPROM firmware version */ | 314 | /* current cypress EEPROM firmware version */ |
315 | #define S2255_CUR_USB_FWVER ((3 << 8) | 6) | 315 | #define S2255_CUR_USB_FWVER ((3 << 8) | 11) |
316 | /* current DSP FW version */ | 316 | /* current DSP FW version */ |
317 | #define S2255_CUR_DSP_FWVER 8 | 317 | #define S2255_CUR_DSP_FWVER 10102 |
318 | /* Need DSP version 5+ for video status feature */ | 318 | /* Need DSP version 5+ for video status feature */ |
319 | #define S2255_MIN_DSP_STATUS 5 | 319 | #define S2255_MIN_DSP_STATUS 5 |
320 | #define S2255_MIN_DSP_COLORFILTER 8 | 320 | #define S2255_MIN_DSP_COLORFILTER 8 |
@@ -492,9 +492,11 @@ static void planar422p_to_yuv_packed(const unsigned char *in, | |||
492 | 492 | ||
493 | static void s2255_reset_dsppower(struct s2255_dev *dev) | 493 | static void s2255_reset_dsppower(struct s2255_dev *dev) |
494 | { | 494 | { |
495 | s2255_vendor_req(dev, 0x40, 0x0b0b, 0x0b0b, NULL, 0, 1); | 495 | s2255_vendor_req(dev, 0x40, 0x0b0b, 0x0b01, NULL, 0, 1); |
496 | msleep(10); | 496 | msleep(10); |
497 | s2255_vendor_req(dev, 0x50, 0x0000, 0x0000, NULL, 0, 1); | 497 | s2255_vendor_req(dev, 0x50, 0x0000, 0x0000, NULL, 0, 1); |
498 | msleep(600); | ||
499 | s2255_vendor_req(dev, 0x10, 0x0000, 0x0000, NULL, 0, 1); | ||
498 | return; | 500 | return; |
499 | } | 501 | } |
500 | 502 | ||
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c index 6a1f94042612..c45e6305b26f 100644 --- a/drivers/mfd/asic3.c +++ b/drivers/mfd/asic3.c | |||
@@ -143,9 +143,9 @@ static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc) | |||
143 | unsigned long flags; | 143 | unsigned long flags; |
144 | struct asic3 *asic; | 144 | struct asic3 *asic; |
145 | 145 | ||
146 | desc->chip->ack(irq); | 146 | desc->irq_data.chip->irq_ack(&desc->irq_data); |
147 | 147 | ||
148 | asic = desc->handler_data; | 148 | asic = get_irq_data(irq); |
149 | 149 | ||
150 | for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) { | 150 | for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) { |
151 | u32 status; | 151 | u32 status; |
diff --git a/drivers/mfd/davinci_voicecodec.c b/drivers/mfd/davinci_voicecodec.c index 33c923d215c7..fdd8a1b8bc67 100644 --- a/drivers/mfd/davinci_voicecodec.c +++ b/drivers/mfd/davinci_voicecodec.c | |||
@@ -118,12 +118,12 @@ static int __init davinci_vc_probe(struct platform_device *pdev) | |||
118 | 118 | ||
119 | /* Voice codec interface client */ | 119 | /* Voice codec interface client */ |
120 | cell = &davinci_vc->cells[DAVINCI_VC_VCIF_CELL]; | 120 | cell = &davinci_vc->cells[DAVINCI_VC_VCIF_CELL]; |
121 | cell->name = "davinci_vcif"; | 121 | cell->name = "davinci-vcif"; |
122 | cell->driver_data = davinci_vc; | 122 | cell->driver_data = davinci_vc; |
123 | 123 | ||
124 | /* Voice codec CQ93VC client */ | 124 | /* Voice codec CQ93VC client */ |
125 | cell = &davinci_vc->cells[DAVINCI_VC_CQ93VC_CELL]; | 125 | cell = &davinci_vc->cells[DAVINCI_VC_CQ93VC_CELL]; |
126 | cell->name = "cq93vc"; | 126 | cell->name = "cq93vc-codec"; |
127 | cell->driver_data = davinci_vc; | 127 | cell->driver_data = davinci_vc; |
128 | 128 | ||
129 | ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells, | 129 | ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells, |
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c index 627cf577b16d..e9018d1394ee 100644 --- a/drivers/mfd/tps6586x.c +++ b/drivers/mfd/tps6586x.c | |||
@@ -150,12 +150,12 @@ static inline int __tps6586x_write(struct i2c_client *client, | |||
150 | static inline int __tps6586x_writes(struct i2c_client *client, int reg, | 150 | static inline int __tps6586x_writes(struct i2c_client *client, int reg, |
151 | int len, uint8_t *val) | 151 | int len, uint8_t *val) |
152 | { | 152 | { |
153 | int ret; | 153 | int ret, i; |
154 | 154 | ||
155 | ret = i2c_smbus_write_i2c_block_data(client, reg, len, val); | 155 | for (i = 0; i < len; i++) { |
156 | if (ret < 0) { | 156 | ret = __tps6586x_write(client, reg + i, *(val + i)); |
157 | dev_err(&client->dev, "failed writings to 0x%02x\n", reg); | 157 | if (ret < 0) |
158 | return ret; | 158 | return ret; |
159 | } | 159 | } |
160 | 160 | ||
161 | return 0; | 161 | return 0; |
diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c index 000cb414a78a..92b85e28a15e 100644 --- a/drivers/mfd/ucb1x00-ts.c +++ b/drivers/mfd/ucb1x00-ts.c | |||
@@ -385,12 +385,18 @@ static int ucb1x00_ts_add(struct ucb1x00_dev *dev) | |||
385 | idev->close = ucb1x00_ts_close; | 385 | idev->close = ucb1x00_ts_close; |
386 | 386 | ||
387 | __set_bit(EV_ABS, idev->evbit); | 387 | __set_bit(EV_ABS, idev->evbit); |
388 | __set_bit(ABS_X, idev->absbit); | ||
389 | __set_bit(ABS_Y, idev->absbit); | ||
390 | __set_bit(ABS_PRESSURE, idev->absbit); | ||
391 | 388 | ||
392 | input_set_drvdata(idev, ts); | 389 | input_set_drvdata(idev, ts); |
393 | 390 | ||
391 | ucb1x00_adc_enable(ts->ucb); | ||
392 | ts->x_res = ucb1x00_ts_read_xres(ts); | ||
393 | ts->y_res = ucb1x00_ts_read_yres(ts); | ||
394 | ucb1x00_adc_disable(ts->ucb); | ||
395 | |||
396 | input_set_abs_params(idev, ABS_X, 0, ts->x_res, 0, 0); | ||
397 | input_set_abs_params(idev, ABS_Y, 0, ts->y_res, 0, 0); | ||
398 | input_set_abs_params(idev, ABS_PRESSURE, 0, 0, 0, 0); | ||
399 | |||
394 | err = input_register_device(idev); | 400 | err = input_register_device(idev); |
395 | if (err) | 401 | if (err) |
396 | goto fail; | 402 | goto fail; |
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c index 41233c7fa581..f4016a075fd6 100644 --- a/drivers/mfd/wm8994-core.c +++ b/drivers/mfd/wm8994-core.c | |||
@@ -246,6 +246,16 @@ static int wm8994_suspend(struct device *dev) | |||
246 | struct wm8994 *wm8994 = dev_get_drvdata(dev); | 246 | struct wm8994 *wm8994 = dev_get_drvdata(dev); |
247 | int ret; | 247 | int ret; |
248 | 248 | ||
249 | /* Don't actually go through with the suspend if the CODEC is | ||
250 | * still active (eg, for audio passthrough from CP. */ | ||
251 | ret = wm8994_reg_read(wm8994, WM8994_POWER_MANAGEMENT_1); | ||
252 | if (ret < 0) { | ||
253 | dev_err(dev, "Failed to read power status: %d\n", ret); | ||
254 | } else if (ret & WM8994_VMID_SEL_MASK) { | ||
255 | dev_dbg(dev, "CODEC still active, ignoring suspend\n"); | ||
256 | return 0; | ||
257 | } | ||
258 | |||
249 | /* GPIO configuration state is saved here since we may be configuring | 259 | /* GPIO configuration state is saved here since we may be configuring |
250 | * the GPIO alternate functions even if we're not using the gpiolib | 260 | * the GPIO alternate functions even if we're not using the gpiolib |
251 | * driver for them. | 261 | * driver for them. |
@@ -261,6 +271,8 @@ static int wm8994_suspend(struct device *dev) | |||
261 | if (ret < 0) | 271 | if (ret < 0) |
262 | dev_err(dev, "Failed to save LDO registers: %d\n", ret); | 272 | dev_err(dev, "Failed to save LDO registers: %d\n", ret); |
263 | 273 | ||
274 | wm8994->suspended = true; | ||
275 | |||
264 | ret = regulator_bulk_disable(wm8994->num_supplies, | 276 | ret = regulator_bulk_disable(wm8994->num_supplies, |
265 | wm8994->supplies); | 277 | wm8994->supplies); |
266 | if (ret != 0) { | 278 | if (ret != 0) { |
@@ -276,6 +288,10 @@ static int wm8994_resume(struct device *dev) | |||
276 | struct wm8994 *wm8994 = dev_get_drvdata(dev); | 288 | struct wm8994 *wm8994 = dev_get_drvdata(dev); |
277 | int ret; | 289 | int ret; |
278 | 290 | ||
291 | /* We may have lied to the PM core about suspending */ | ||
292 | if (!wm8994->suspended) | ||
293 | return 0; | ||
294 | |||
279 | ret = regulator_bulk_enable(wm8994->num_supplies, | 295 | ret = regulator_bulk_enable(wm8994->num_supplies, |
280 | wm8994->supplies); | 296 | wm8994->supplies); |
281 | if (ret != 0) { | 297 | if (ret != 0) { |
@@ -298,6 +314,8 @@ static int wm8994_resume(struct device *dev) | |||
298 | if (ret < 0) | 314 | if (ret < 0) |
299 | dev_err(dev, "Failed to restore GPIO registers: %d\n", ret); | 315 | dev_err(dev, "Failed to restore GPIO registers: %d\n", ret); |
300 | 316 | ||
317 | wm8994->suspended = false; | ||
318 | |||
301 | return 0; | 319 | return 0; |
302 | } | 320 | } |
303 | #endif | 321 | #endif |
diff --git a/drivers/misc/bmp085.c b/drivers/misc/bmp085.c index 63ee4c1a5315..b6e1c9a6679e 100644 --- a/drivers/misc/bmp085.c +++ b/drivers/misc/bmp085.c | |||
@@ -449,6 +449,7 @@ static const struct i2c_device_id bmp085_id[] = { | |||
449 | { "bmp085", 0 }, | 449 | { "bmp085", 0 }, |
450 | { } | 450 | { } |
451 | }; | 451 | }; |
452 | MODULE_DEVICE_TABLE(i2c, bmp085_id); | ||
452 | 453 | ||
453 | static struct i2c_driver bmp085_driver = { | 454 | static struct i2c_driver bmp085_driver = { |
454 | .driver = { | 455 | .driver = { |
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 6625c057be05..150b5f3cd401 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c | |||
@@ -1529,7 +1529,7 @@ void mmc_rescan(struct work_struct *work) | |||
1529 | * still present | 1529 | * still present |
1530 | */ | 1530 | */ |
1531 | if (host->bus_ops && host->bus_ops->detect && !host->bus_dead | 1531 | if (host->bus_ops && host->bus_ops->detect && !host->bus_dead |
1532 | && mmc_card_is_removable(host)) | 1532 | && !(host->caps & MMC_CAP_NONREMOVABLE)) |
1533 | host->bus_ops->detect(host); | 1533 | host->bus_ops->detect(host); |
1534 | 1534 | ||
1535 | /* | 1535 | /* |
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index 5c4a54d9b6a4..ebc62ad4cc56 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c | |||
@@ -792,7 +792,6 @@ int mmc_attach_sdio(struct mmc_host *host) | |||
792 | */ | 792 | */ |
793 | mmc_release_host(host); | 793 | mmc_release_host(host); |
794 | err = mmc_add_card(host->card); | 794 | err = mmc_add_card(host->card); |
795 | mmc_claim_host(host); | ||
796 | if (err) | 795 | if (err) |
797 | goto remove_added; | 796 | goto remove_added; |
798 | 797 | ||
@@ -805,12 +804,12 @@ int mmc_attach_sdio(struct mmc_host *host) | |||
805 | goto remove_added; | 804 | goto remove_added; |
806 | } | 805 | } |
807 | 806 | ||
807 | mmc_claim_host(host); | ||
808 | return 0; | 808 | return 0; |
809 | 809 | ||
810 | 810 | ||
811 | remove_added: | 811 | remove_added: |
812 | /* Remove without lock if the device has been added. */ | 812 | /* Remove without lock if the device has been added. */ |
813 | mmc_release_host(host); | ||
814 | mmc_sdio_remove(host); | 813 | mmc_sdio_remove(host); |
815 | mmc_claim_host(host); | 814 | mmc_claim_host(host); |
816 | remove: | 815 | remove: |
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c index a8c3e1c9b02a..4aaa88f8ab5f 100644 --- a/drivers/mtd/chips/cfi_cmdset_0001.c +++ b/drivers/mtd/chips/cfi_cmdset_0001.c | |||
@@ -1230,10 +1230,32 @@ static int inval_cache_and_wait_for_operation( | |||
1230 | sleep_time = chip_op_time / 2; | 1230 | sleep_time = chip_op_time / 2; |
1231 | 1231 | ||
1232 | for (;;) { | 1232 | for (;;) { |
1233 | if (chip->state != chip_state) { | ||
1234 | /* Someone's suspended the operation: sleep */ | ||
1235 | DECLARE_WAITQUEUE(wait, current); | ||
1236 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
1237 | add_wait_queue(&chip->wq, &wait); | ||
1238 | mutex_unlock(&chip->mutex); | ||
1239 | schedule(); | ||
1240 | remove_wait_queue(&chip->wq, &wait); | ||
1241 | mutex_lock(&chip->mutex); | ||
1242 | continue; | ||
1243 | } | ||
1244 | |||
1233 | status = map_read(map, cmd_adr); | 1245 | status = map_read(map, cmd_adr); |
1234 | if (map_word_andequal(map, status, status_OK, status_OK)) | 1246 | if (map_word_andequal(map, status, status_OK, status_OK)) |
1235 | break; | 1247 | break; |
1236 | 1248 | ||
1249 | if (chip->erase_suspended && chip_state == FL_ERASING) { | ||
1250 | /* Erase suspend occured while sleep: reset timeout */ | ||
1251 | timeo = reset_timeo; | ||
1252 | chip->erase_suspended = 0; | ||
1253 | } | ||
1254 | if (chip->write_suspended && chip_state == FL_WRITING) { | ||
1255 | /* Write suspend occured while sleep: reset timeout */ | ||
1256 | timeo = reset_timeo; | ||
1257 | chip->write_suspended = 0; | ||
1258 | } | ||
1237 | if (!timeo) { | 1259 | if (!timeo) { |
1238 | map_write(map, CMD(0x70), cmd_adr); | 1260 | map_write(map, CMD(0x70), cmd_adr); |
1239 | chip->state = FL_STATUS; | 1261 | chip->state = FL_STATUS; |
@@ -1257,27 +1279,6 @@ static int inval_cache_and_wait_for_operation( | |||
1257 | timeo--; | 1279 | timeo--; |
1258 | } | 1280 | } |
1259 | mutex_lock(&chip->mutex); | 1281 | mutex_lock(&chip->mutex); |
1260 | |||
1261 | while (chip->state != chip_state) { | ||
1262 | /* Someone's suspended the operation: sleep */ | ||
1263 | DECLARE_WAITQUEUE(wait, current); | ||
1264 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
1265 | add_wait_queue(&chip->wq, &wait); | ||
1266 | mutex_unlock(&chip->mutex); | ||
1267 | schedule(); | ||
1268 | remove_wait_queue(&chip->wq, &wait); | ||
1269 | mutex_lock(&chip->mutex); | ||
1270 | } | ||
1271 | if (chip->erase_suspended && chip_state == FL_ERASING) { | ||
1272 | /* Erase suspend occured while sleep: reset timeout */ | ||
1273 | timeo = reset_timeo; | ||
1274 | chip->erase_suspended = 0; | ||
1275 | } | ||
1276 | if (chip->write_suspended && chip_state == FL_WRITING) { | ||
1277 | /* Write suspend occured while sleep: reset timeout */ | ||
1278 | timeo = reset_timeo; | ||
1279 | chip->write_suspended = 0; | ||
1280 | } | ||
1281 | } | 1282 | } |
1282 | 1283 | ||
1283 | /* Done and happy. */ | 1284 | /* Done and happy. */ |
diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c index d72a5fb2d041..4e1be51cc122 100644 --- a/drivers/mtd/chips/jedec_probe.c +++ b/drivers/mtd/chips/jedec_probe.c | |||
@@ -1935,14 +1935,14 @@ static void jedec_reset(u32 base, struct map_info *map, struct cfi_private *cfi) | |||
1935 | } | 1935 | } |
1936 | 1936 | ||
1937 | 1937 | ||
1938 | static int cfi_jedec_setup(struct cfi_private *p_cfi, int index) | 1938 | static int cfi_jedec_setup(struct map_info *map, struct cfi_private *cfi, int index) |
1939 | { | 1939 | { |
1940 | int i,num_erase_regions; | 1940 | int i,num_erase_regions; |
1941 | uint8_t uaddr; | 1941 | uint8_t uaddr; |
1942 | 1942 | ||
1943 | if (! (jedec_table[index].devtypes & p_cfi->device_type)) { | 1943 | if (!(jedec_table[index].devtypes & cfi->device_type)) { |
1944 | DEBUG(MTD_DEBUG_LEVEL1, "Rejecting potential %s with incompatible %d-bit device type\n", | 1944 | DEBUG(MTD_DEBUG_LEVEL1, "Rejecting potential %s with incompatible %d-bit device type\n", |
1945 | jedec_table[index].name, 4 * (1<<p_cfi->device_type)); | 1945 | jedec_table[index].name, 4 * (1<<cfi->device_type)); |
1946 | return 0; | 1946 | return 0; |
1947 | } | 1947 | } |
1948 | 1948 | ||
@@ -1950,27 +1950,28 @@ static int cfi_jedec_setup(struct cfi_private *p_cfi, int index) | |||
1950 | 1950 | ||
1951 | num_erase_regions = jedec_table[index].nr_regions; | 1951 | num_erase_regions = jedec_table[index].nr_regions; |
1952 | 1952 | ||
1953 | p_cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL); | 1953 | cfi->cfiq = kmalloc(sizeof(struct cfi_ident) + num_erase_regions * 4, GFP_KERNEL); |
1954 | if (!p_cfi->cfiq) { | 1954 | if (!cfi->cfiq) { |
1955 | //xx printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name); | 1955 | //xx printk(KERN_WARNING "%s: kmalloc failed for CFI ident structure\n", map->name); |
1956 | return 0; | 1956 | return 0; |
1957 | } | 1957 | } |
1958 | 1958 | ||
1959 | memset(p_cfi->cfiq,0,sizeof(struct cfi_ident)); | 1959 | memset(cfi->cfiq, 0, sizeof(struct cfi_ident)); |
1960 | 1960 | ||
1961 | p_cfi->cfiq->P_ID = jedec_table[index].cmd_set; | 1961 | cfi->cfiq->P_ID = jedec_table[index].cmd_set; |
1962 | p_cfi->cfiq->NumEraseRegions = jedec_table[index].nr_regions; | 1962 | cfi->cfiq->NumEraseRegions = jedec_table[index].nr_regions; |
1963 | p_cfi->cfiq->DevSize = jedec_table[index].dev_size; | 1963 | cfi->cfiq->DevSize = jedec_table[index].dev_size; |
1964 | p_cfi->cfi_mode = CFI_MODE_JEDEC; | 1964 | cfi->cfi_mode = CFI_MODE_JEDEC; |
1965 | cfi->sector_erase_cmd = CMD(0x30); | ||
1965 | 1966 | ||
1966 | for (i=0; i<num_erase_regions; i++){ | 1967 | for (i=0; i<num_erase_regions; i++){ |
1967 | p_cfi->cfiq->EraseRegionInfo[i] = jedec_table[index].regions[i]; | 1968 | cfi->cfiq->EraseRegionInfo[i] = jedec_table[index].regions[i]; |
1968 | } | 1969 | } |
1969 | p_cfi->cmdset_priv = NULL; | 1970 | cfi->cmdset_priv = NULL; |
1970 | 1971 | ||
1971 | /* This may be redundant for some cases, but it doesn't hurt */ | 1972 | /* This may be redundant for some cases, but it doesn't hurt */ |
1972 | p_cfi->mfr = jedec_table[index].mfr_id; | 1973 | cfi->mfr = jedec_table[index].mfr_id; |
1973 | p_cfi->id = jedec_table[index].dev_id; | 1974 | cfi->id = jedec_table[index].dev_id; |
1974 | 1975 | ||
1975 | uaddr = jedec_table[index].uaddr; | 1976 | uaddr = jedec_table[index].uaddr; |
1976 | 1977 | ||
@@ -1978,8 +1979,8 @@ static int cfi_jedec_setup(struct cfi_private *p_cfi, int index) | |||
1978 | our brains explode when we see the datasheets talking about address | 1979 | our brains explode when we see the datasheets talking about address |
1979 | lines numbered from A-1 to A18. The CFI table has unlock addresses | 1980 | lines numbered from A-1 to A18. The CFI table has unlock addresses |
1980 | in device-words according to the mode the device is connected in */ | 1981 | in device-words according to the mode the device is connected in */ |
1981 | p_cfi->addr_unlock1 = unlock_addrs[uaddr].addr1 / p_cfi->device_type; | 1982 | cfi->addr_unlock1 = unlock_addrs[uaddr].addr1 / cfi->device_type; |
1982 | p_cfi->addr_unlock2 = unlock_addrs[uaddr].addr2 / p_cfi->device_type; | 1983 | cfi->addr_unlock2 = unlock_addrs[uaddr].addr2 / cfi->device_type; |
1983 | 1984 | ||
1984 | return 1; /* ok */ | 1985 | return 1; /* ok */ |
1985 | } | 1986 | } |
@@ -2175,7 +2176,7 @@ static int jedec_probe_chip(struct map_info *map, __u32 base, | |||
2175 | "MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n", | 2176 | "MTD %s(): matched device 0x%x,0x%x unlock_addrs: 0x%.4x 0x%.4x\n", |
2176 | __func__, cfi->mfr, cfi->id, | 2177 | __func__, cfi->mfr, cfi->id, |
2177 | cfi->addr_unlock1, cfi->addr_unlock2 ); | 2178 | cfi->addr_unlock1, cfi->addr_unlock2 ); |
2178 | if (!cfi_jedec_setup(cfi, i)) | 2179 | if (!cfi_jedec_setup(map, cfi, i)) |
2179 | return 0; | 2180 | return 0; |
2180 | goto ok_out; | 2181 | goto ok_out; |
2181 | } | 2182 | } |
diff --git a/drivers/mtd/maps/amd76xrom.c b/drivers/mtd/maps/amd76xrom.c index 77d64ce19e9f..92de7e3a49a5 100644 --- a/drivers/mtd/maps/amd76xrom.c +++ b/drivers/mtd/maps/amd76xrom.c | |||
@@ -151,6 +151,7 @@ static int __devinit amd76xrom_init_one (struct pci_dev *pdev, | |||
151 | printk(KERN_ERR MOD_NAME | 151 | printk(KERN_ERR MOD_NAME |
152 | " %s(): Unable to register resource %pR - kernel bug?\n", | 152 | " %s(): Unable to register resource %pR - kernel bug?\n", |
153 | __func__, &window->rsrc); | 153 | __func__, &window->rsrc); |
154 | return -EBUSY; | ||
154 | } | 155 | } |
155 | 156 | ||
156 | 157 | ||
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index cb20c67995d8..e0a2373bf0e2 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c | |||
@@ -413,7 +413,6 @@ error3: | |||
413 | error2: | 413 | error2: |
414 | list_del(&new->list); | 414 | list_del(&new->list); |
415 | error1: | 415 | error1: |
416 | kfree(new); | ||
417 | return ret; | 416 | return ret; |
418 | } | 417 | } |
419 | 418 | ||
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c index 15682ec8530e..28af71c61834 100644 --- a/drivers/mtd/nand/omap2.c +++ b/drivers/mtd/nand/omap2.c | |||
@@ -968,6 +968,6 @@ static void __exit omap_nand_exit(void) | |||
968 | module_init(omap_nand_init); | 968 | module_init(omap_nand_init); |
969 | module_exit(omap_nand_exit); | 969 | module_exit(omap_nand_exit); |
970 | 970 | ||
971 | MODULE_ALIAS(DRIVER_NAME); | 971 | MODULE_ALIAS("platform:" DRIVER_NAME); |
972 | MODULE_LICENSE("GPL"); | 972 | MODULE_LICENSE("GPL"); |
973 | MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards"); | 973 | MODULE_DESCRIPTION("Glue layer for NAND flash on TI OMAP boards"); |
diff --git a/drivers/mtd/onenand/generic.c b/drivers/mtd/onenand/generic.c index e78914938c5c..ac08750748a3 100644 --- a/drivers/mtd/onenand/generic.c +++ b/drivers/mtd/onenand/generic.c | |||
@@ -131,7 +131,7 @@ static struct platform_driver generic_onenand_driver = { | |||
131 | .remove = __devexit_p(generic_onenand_remove), | 131 | .remove = __devexit_p(generic_onenand_remove), |
132 | }; | 132 | }; |
133 | 133 | ||
134 | MODULE_ALIAS(DRIVER_NAME); | 134 | MODULE_ALIAS("platform:" DRIVER_NAME); |
135 | 135 | ||
136 | static int __init generic_onenand_init(void) | 136 | static int __init generic_onenand_init(void) |
137 | { | 137 | { |
diff --git a/drivers/mtd/onenand/omap2.c b/drivers/mtd/onenand/omap2.c index ac31f461cc1c..c849cacf4b2f 100644 --- a/drivers/mtd/onenand/omap2.c +++ b/drivers/mtd/onenand/omap2.c | |||
@@ -860,7 +860,7 @@ static void __exit omap2_onenand_exit(void) | |||
860 | module_init(omap2_onenand_init); | 860 | module_init(omap2_onenand_init); |
861 | module_exit(omap2_onenand_exit); | 861 | module_exit(omap2_onenand_exit); |
862 | 862 | ||
863 | MODULE_ALIAS(DRIVER_NAME); | 863 | MODULE_ALIAS("platform:" DRIVER_NAME); |
864 | MODULE_LICENSE("GPL"); | 864 | MODULE_LICENSE("GPL"); |
865 | MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>"); | 865 | MODULE_AUTHOR("Jarkko Lavinen <jarkko.lavinen@nokia.com>"); |
866 | MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3"); | 866 | MODULE_DESCRIPTION("Glue layer for OneNAND flash on OMAP2 / OMAP3"); |
diff --git a/drivers/net/ariadne.c b/drivers/net/ariadne.c index 39214e512452..7ca0eded2561 100644 --- a/drivers/net/ariadne.c +++ b/drivers/net/ariadne.c | |||
@@ -425,11 +425,6 @@ static irqreturn_t ariadne_interrupt(int irq, void *data) | |||
425 | int csr0, boguscnt; | 425 | int csr0, boguscnt; |
426 | int handled = 0; | 426 | int handled = 0; |
427 | 427 | ||
428 | if (dev == NULL) { | ||
429 | printk(KERN_WARNING "ariadne_interrupt(): irq for unknown device.\n"); | ||
430 | return IRQ_NONE; | ||
431 | } | ||
432 | |||
433 | lance->RAP = CSR0; /* PCnet-ISA Controller Status */ | 428 | lance->RAP = CSR0; /* PCnet-ISA Controller Status */ |
434 | 429 | ||
435 | if (!(lance->RDP & INTR)) /* Check if any interrupt has been */ | 430 | if (!(lance->RDP & INTR)) /* Check if any interrupt has been */ |
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index 653c62475cb6..8849699c66c4 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h | |||
@@ -22,7 +22,7 @@ | |||
22 | * (you will need to reboot afterwards) */ | 22 | * (you will need to reboot afterwards) */ |
23 | /* #define BNX2X_STOP_ON_ERROR */ | 23 | /* #define BNX2X_STOP_ON_ERROR */ |
24 | 24 | ||
25 | #define DRV_MODULE_VERSION "1.62.00-5" | 25 | #define DRV_MODULE_VERSION "1.62.00-6" |
26 | #define DRV_MODULE_RELDATE "2011/01/30" | 26 | #define DRV_MODULE_RELDATE "2011/01/30" |
27 | #define BNX2X_BC_VER 0x040200 | 27 | #define BNX2X_BC_VER 0x040200 |
28 | 28 | ||
@@ -1211,6 +1211,7 @@ struct bnx2x { | |||
1211 | /* DCBX Negotation results */ | 1211 | /* DCBX Negotation results */ |
1212 | struct dcbx_features dcbx_local_feat; | 1212 | struct dcbx_features dcbx_local_feat; |
1213 | u32 dcbx_error; | 1213 | u32 dcbx_error; |
1214 | u32 pending_max; | ||
1214 | }; | 1215 | }; |
1215 | 1216 | ||
1216 | /** | 1217 | /** |
@@ -1613,19 +1614,23 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, | |||
1613 | #define BNX2X_BTR 4 | 1614 | #define BNX2X_BTR 4 |
1614 | #define MAX_SPQ_PENDING 8 | 1615 | #define MAX_SPQ_PENDING 8 |
1615 | 1616 | ||
1616 | 1617 | /* CMNG constants, as derived from system spec calculations */ | |
1617 | /* CMNG constants | 1618 | /* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */ |
1618 | derived from lab experiments, and not from system spec calculations !!! */ | 1619 | #define DEF_MIN_RATE 100 |
1619 | #define DEF_MIN_RATE 100 | 1620 | /* resolution of the rate shaping timer - 400 usec */ |
1620 | /* resolution of the rate shaping timer - 100 usec */ | 1621 | #define RS_PERIODIC_TIMEOUT_USEC 400 |
1621 | #define RS_PERIODIC_TIMEOUT_USEC 100 | ||
1622 | /* resolution of fairness algorithm in usecs - | ||
1623 | coefficient for calculating the actual t fair */ | ||
1624 | #define T_FAIR_COEF 10000000 | ||
1625 | /* number of bytes in single QM arbitration cycle - | 1622 | /* number of bytes in single QM arbitration cycle - |
1626 | coefficient for calculating the fairness timer */ | 1623 | * coefficient for calculating the fairness timer */ |
1627 | #define QM_ARB_BYTES 40000 | 1624 | #define QM_ARB_BYTES 160000 |
1628 | #define FAIR_MEM 2 | 1625 | /* resolution of Min algorithm 1:100 */ |
1626 | #define MIN_RES 100 | ||
1627 | /* how many bytes above threshold for the minimal credit of Min algorithm*/ | ||
1628 | #define MIN_ABOVE_THRESH 32768 | ||
1629 | /* Fairness algorithm integration time coefficient - | ||
1630 | * for calculating the actual Tfair */ | ||
1631 | #define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES) | ||
1632 | /* Memory of fairness algorithm . 2 cycles */ | ||
1633 | #define FAIR_MEM 2 | ||
1629 | 1634 | ||
1630 | 1635 | ||
1631 | #define ATTN_NIG_FOR_FUNC (1L << 8) | 1636 | #define ATTN_NIG_FOR_FUNC (1L << 8) |
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c index 710ce5d04c53..a71b32940533 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.c +++ b/drivers/net/bnx2x/bnx2x_cmn.c | |||
@@ -259,10 +259,44 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, | |||
259 | #endif | 259 | #endif |
260 | } | 260 | } |
261 | 261 | ||
262 | /* Timestamp option length allowed for TPA aggregation: | ||
263 | * | ||
264 | * nop nop kind length echo val | ||
265 | */ | ||
266 | #define TPA_TSTAMP_OPT_LEN 12 | ||
267 | /** | ||
268 | * Calculate the approximate value of the MSS for this | ||
269 | * aggregation using the first packet of it. | ||
270 | * | ||
271 | * @param bp | ||
272 | * @param parsing_flags Parsing flags from the START CQE | ||
273 | * @param len_on_bd Total length of the first packet for the | ||
274 | * aggregation. | ||
275 | */ | ||
276 | static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags, | ||
277 | u16 len_on_bd) | ||
278 | { | ||
279 | /* TPA arrgregation won't have an IP options and TCP options | ||
280 | * other than timestamp. | ||
281 | */ | ||
282 | u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr); | ||
283 | |||
284 | |||
285 | /* Check if there was a TCP timestamp, if there is it's will | ||
286 | * always be 12 bytes length: nop nop kind length echo val. | ||
287 | * | ||
288 | * Otherwise FW would close the aggregation. | ||
289 | */ | ||
290 | if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG) | ||
291 | hdrs_len += TPA_TSTAMP_OPT_LEN; | ||
292 | |||
293 | return len_on_bd - hdrs_len; | ||
294 | } | ||
295 | |||
262 | static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, | 296 | static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, |
263 | struct sk_buff *skb, | 297 | struct sk_buff *skb, |
264 | struct eth_fast_path_rx_cqe *fp_cqe, | 298 | struct eth_fast_path_rx_cqe *fp_cqe, |
265 | u16 cqe_idx) | 299 | u16 cqe_idx, u16 parsing_flags) |
266 | { | 300 | { |
267 | struct sw_rx_page *rx_pg, old_rx_pg; | 301 | struct sw_rx_page *rx_pg, old_rx_pg; |
268 | u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd); | 302 | u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd); |
@@ -275,8 +309,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
275 | 309 | ||
276 | /* This is needed in order to enable forwarding support */ | 310 | /* This is needed in order to enable forwarding support */ |
277 | if (frag_size) | 311 | if (frag_size) |
278 | skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE, | 312 | skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags, |
279 | max(frag_size, (u32)len_on_bd)); | 313 | len_on_bd); |
280 | 314 | ||
281 | #ifdef BNX2X_STOP_ON_ERROR | 315 | #ifdef BNX2X_STOP_ON_ERROR |
282 | if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { | 316 | if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { |
@@ -344,6 +378,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
344 | if (likely(new_skb)) { | 378 | if (likely(new_skb)) { |
345 | /* fix ip xsum and give it to the stack */ | 379 | /* fix ip xsum and give it to the stack */ |
346 | /* (no need to map the new skb) */ | 380 | /* (no need to map the new skb) */ |
381 | u16 parsing_flags = | ||
382 | le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags); | ||
347 | 383 | ||
348 | prefetch(skb); | 384 | prefetch(skb); |
349 | prefetch(((char *)(skb)) + L1_CACHE_BYTES); | 385 | prefetch(((char *)(skb)) + L1_CACHE_BYTES); |
@@ -373,9 +409,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
373 | } | 409 | } |
374 | 410 | ||
375 | if (!bnx2x_fill_frag_skb(bp, fp, skb, | 411 | if (!bnx2x_fill_frag_skb(bp, fp, skb, |
376 | &cqe->fast_path_cqe, cqe_idx)) { | 412 | &cqe->fast_path_cqe, cqe_idx, |
377 | if ((le16_to_cpu(cqe->fast_path_cqe. | 413 | parsing_flags)) { |
378 | pars_flags.flags) & PARSING_FLAGS_VLAN)) | 414 | if (parsing_flags & PARSING_FLAGS_VLAN) |
379 | __vlan_hwaccel_put_tag(skb, | 415 | __vlan_hwaccel_put_tag(skb, |
380 | le16_to_cpu(cqe->fast_path_cqe. | 416 | le16_to_cpu(cqe->fast_path_cqe. |
381 | vlan_tag)); | 417 | vlan_tag)); |
@@ -703,19 +739,20 @@ u16 bnx2x_get_mf_speed(struct bnx2x *bp) | |||
703 | { | 739 | { |
704 | u16 line_speed = bp->link_vars.line_speed; | 740 | u16 line_speed = bp->link_vars.line_speed; |
705 | if (IS_MF(bp)) { | 741 | if (IS_MF(bp)) { |
706 | u16 maxCfg = (bp->mf_config[BP_VN(bp)] & | 742 | u16 maxCfg = bnx2x_extract_max_cfg(bp, |
707 | FUNC_MF_CFG_MAX_BW_MASK) >> | 743 | bp->mf_config[BP_VN(bp)]); |
708 | FUNC_MF_CFG_MAX_BW_SHIFT; | 744 | |
709 | /* Calculate the current MAX line speed limit for the DCC | 745 | /* Calculate the current MAX line speed limit for the MF |
710 | * capable devices | 746 | * devices |
711 | */ | 747 | */ |
712 | if (IS_MF_SD(bp)) { | 748 | if (IS_MF_SI(bp)) |
749 | line_speed = (line_speed * maxCfg) / 100; | ||
750 | else { /* SD mode */ | ||
713 | u16 vn_max_rate = maxCfg * 100; | 751 | u16 vn_max_rate = maxCfg * 100; |
714 | 752 | ||
715 | if (vn_max_rate < line_speed) | 753 | if (vn_max_rate < line_speed) |
716 | line_speed = vn_max_rate; | 754 | line_speed = vn_max_rate; |
717 | } else /* IS_MF_SI(bp)) */ | 755 | } |
718 | line_speed = (line_speed * maxCfg) / 100; | ||
719 | } | 756 | } |
720 | 757 | ||
721 | return line_speed; | 758 | return line_speed; |
@@ -959,6 +996,23 @@ void bnx2x_free_skbs(struct bnx2x *bp) | |||
959 | bnx2x_free_rx_skbs(bp); | 996 | bnx2x_free_rx_skbs(bp); |
960 | } | 997 | } |
961 | 998 | ||
999 | void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value) | ||
1000 | { | ||
1001 | /* load old values */ | ||
1002 | u32 mf_cfg = bp->mf_config[BP_VN(bp)]; | ||
1003 | |||
1004 | if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) { | ||
1005 | /* leave all but MAX value */ | ||
1006 | mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK; | ||
1007 | |||
1008 | /* set new MAX value */ | ||
1009 | mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT) | ||
1010 | & FUNC_MF_CFG_MAX_BW_MASK; | ||
1011 | |||
1012 | bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg); | ||
1013 | } | ||
1014 | } | ||
1015 | |||
962 | static void bnx2x_free_msix_irqs(struct bnx2x *bp) | 1016 | static void bnx2x_free_msix_irqs(struct bnx2x *bp) |
963 | { | 1017 | { |
964 | int i, offset = 1; | 1018 | int i, offset = 1; |
@@ -1427,6 +1481,11 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode) | |||
1427 | 1481 | ||
1428 | bnx2x_set_eth_mac(bp, 1); | 1482 | bnx2x_set_eth_mac(bp, 1); |
1429 | 1483 | ||
1484 | if (bp->pending_max) { | ||
1485 | bnx2x_update_max_mf_config(bp, bp->pending_max); | ||
1486 | bp->pending_max = 0; | ||
1487 | } | ||
1488 | |||
1430 | if (bp->port.pmf) | 1489 | if (bp->port.pmf) |
1431 | bnx2x_initial_phy_init(bp, load_mode); | 1490 | bnx2x_initial_phy_init(bp, load_mode); |
1432 | 1491 | ||
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h index 03eb4d68e6bb..85ea7f26b51f 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.h +++ b/drivers/net/bnx2x/bnx2x_cmn.h | |||
@@ -341,6 +341,15 @@ void bnx2x_dcbx_init(struct bnx2x *bp); | |||
341 | */ | 341 | */ |
342 | int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); | 342 | int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state); |
343 | 343 | ||
344 | /** | ||
345 | * Updates MAX part of MF configuration in HW | ||
346 | * (if required) | ||
347 | * | ||
348 | * @param bp | ||
349 | * @param value | ||
350 | */ | ||
351 | void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value); | ||
352 | |||
344 | /* dev_close main block */ | 353 | /* dev_close main block */ |
345 | int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode); | 354 | int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode); |
346 | 355 | ||
@@ -1044,4 +1053,24 @@ static inline void storm_memset_cmng(struct bnx2x *bp, | |||
1044 | void bnx2x_acquire_phy_lock(struct bnx2x *bp); | 1053 | void bnx2x_acquire_phy_lock(struct bnx2x *bp); |
1045 | void bnx2x_release_phy_lock(struct bnx2x *bp); | 1054 | void bnx2x_release_phy_lock(struct bnx2x *bp); |
1046 | 1055 | ||
1056 | /** | ||
1057 | * Extracts MAX BW part from MF configuration. | ||
1058 | * | ||
1059 | * @param bp | ||
1060 | * @param mf_cfg | ||
1061 | * | ||
1062 | * @return u16 | ||
1063 | */ | ||
1064 | static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg) | ||
1065 | { | ||
1066 | u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> | ||
1067 | FUNC_MF_CFG_MAX_BW_SHIFT; | ||
1068 | if (!max_cfg) { | ||
1069 | BNX2X_ERR("Illegal configuration detected for Max BW - " | ||
1070 | "using 100 instead\n"); | ||
1071 | max_cfg = 100; | ||
1072 | } | ||
1073 | return max_cfg; | ||
1074 | } | ||
1075 | |||
1047 | #endif /* BNX2X_CMN_H */ | 1076 | #endif /* BNX2X_CMN_H */ |
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c index 5b44a8b48509..7e92f9d0dcfd 100644 --- a/drivers/net/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/bnx2x/bnx2x_ethtool.c | |||
@@ -238,7 +238,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
238 | speed |= (cmd->speed_hi << 16); | 238 | speed |= (cmd->speed_hi << 16); |
239 | 239 | ||
240 | if (IS_MF_SI(bp)) { | 240 | if (IS_MF_SI(bp)) { |
241 | u32 param = 0; | 241 | u32 part; |
242 | u32 line_speed = bp->link_vars.line_speed; | 242 | u32 line_speed = bp->link_vars.line_speed; |
243 | 243 | ||
244 | /* use 10G if no link detected */ | 244 | /* use 10G if no link detected */ |
@@ -251,23 +251,22 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
251 | REQ_BC_VER_4_SET_MF_BW); | 251 | REQ_BC_VER_4_SET_MF_BW); |
252 | return -EINVAL; | 252 | return -EINVAL; |
253 | } | 253 | } |
254 | if (line_speed < speed) { | 254 | |
255 | BNX2X_DEV_INFO("New speed should be less or equal " | 255 | part = (speed * 100) / line_speed; |
256 | "to actual line speed\n"); | 256 | |
257 | if (line_speed < speed || !part) { | ||
258 | BNX2X_DEV_INFO("Speed setting should be in a range " | ||
259 | "from 1%% to 100%% " | ||
260 | "of actual line speed\n"); | ||
257 | return -EINVAL; | 261 | return -EINVAL; |
258 | } | 262 | } |
259 | /* load old values */ | ||
260 | param = bp->mf_config[BP_VN(bp)]; | ||
261 | |||
262 | /* leave only MIN value */ | ||
263 | param &= FUNC_MF_CFG_MIN_BW_MASK; | ||
264 | 263 | ||
265 | /* set new MAX value */ | 264 | if (bp->state != BNX2X_STATE_OPEN) |
266 | param |= (((speed * 100) / line_speed) | 265 | /* store value for following "load" */ |
267 | << FUNC_MF_CFG_MAX_BW_SHIFT) | 266 | bp->pending_max = part; |
268 | & FUNC_MF_CFG_MAX_BW_MASK; | 267 | else |
268 | bnx2x_update_max_mf_config(bp, part); | ||
269 | 269 | ||
270 | bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, param); | ||
271 | return 0; | 270 | return 0; |
272 | } | 271 | } |
273 | 272 | ||
@@ -1781,9 +1780,7 @@ static int bnx2x_test_nvram(struct bnx2x *bp) | |||
1781 | { 0x100, 0x350 }, /* manuf_info */ | 1780 | { 0x100, 0x350 }, /* manuf_info */ |
1782 | { 0x450, 0xf0 }, /* feature_info */ | 1781 | { 0x450, 0xf0 }, /* feature_info */ |
1783 | { 0x640, 0x64 }, /* upgrade_key_info */ | 1782 | { 0x640, 0x64 }, /* upgrade_key_info */ |
1784 | { 0x6a4, 0x64 }, | ||
1785 | { 0x708, 0x70 }, /* manuf_key_info */ | 1783 | { 0x708, 0x70 }, /* manuf_key_info */ |
1786 | { 0x778, 0x70 }, | ||
1787 | { 0, 0 } | 1784 | { 0, 0 } |
1788 | }; | 1785 | }; |
1789 | __be32 buf[0x350 / 4]; | 1786 | __be32 buf[0x350 / 4]; |
@@ -1933,11 +1930,11 @@ static void bnx2x_self_test(struct net_device *dev, | |||
1933 | buf[4] = 1; | 1930 | buf[4] = 1; |
1934 | etest->flags |= ETH_TEST_FL_FAILED; | 1931 | etest->flags |= ETH_TEST_FL_FAILED; |
1935 | } | 1932 | } |
1936 | if (bp->port.pmf) | 1933 | |
1937 | if (bnx2x_link_test(bp, is_serdes) != 0) { | 1934 | if (bnx2x_link_test(bp, is_serdes) != 0) { |
1938 | buf[5] = 1; | 1935 | buf[5] = 1; |
1939 | etest->flags |= ETH_TEST_FL_FAILED; | 1936 | etest->flags |= ETH_TEST_FL_FAILED; |
1940 | } | 1937 | } |
1941 | 1938 | ||
1942 | #ifdef BNX2X_EXTRA_DEBUG | 1939 | #ifdef BNX2X_EXTRA_DEBUG |
1943 | bnx2x_panic_dump(bp); | 1940 | bnx2x_panic_dump(bp); |
diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h index 5a268e9a0895..fa6dbe3f2058 100644 --- a/drivers/net/bnx2x/bnx2x_init.h +++ b/drivers/net/bnx2x/bnx2x_init.h | |||
@@ -241,7 +241,7 @@ static const struct { | |||
241 | /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't | 241 | /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't |
242 | * want to handle "system kill" flow at the moment. | 242 | * want to handle "system kill" flow at the moment. |
243 | */ | 243 | */ |
244 | BLOCK_PRTY_INFO(PXP, 0x3ffffff, 0x3ffffff, 0x3ffffff, 0x3ffffff), | 244 | BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff), |
245 | BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff), | 245 | BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff), |
246 | BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff), | 246 | BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff), |
247 | BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0), | 247 | BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0), |
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index d584d32c747d..aa032339e321 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c | |||
@@ -1974,13 +1974,22 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) | |||
1974 | vn_max_rate = 0; | 1974 | vn_max_rate = 0; |
1975 | 1975 | ||
1976 | } else { | 1976 | } else { |
1977 | u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg); | ||
1978 | |||
1977 | vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> | 1979 | vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> |
1978 | FUNC_MF_CFG_MIN_BW_SHIFT) * 100; | 1980 | FUNC_MF_CFG_MIN_BW_SHIFT) * 100; |
1979 | /* If min rate is zero - set it to 1 */ | 1981 | /* If fairness is enabled (not all min rates are zeroes) and |
1982 | if current min rate is zero - set it to 1. | ||
1983 | This is a requirement of the algorithm. */ | ||
1980 | if (bp->vn_weight_sum && (vn_min_rate == 0)) | 1984 | if (bp->vn_weight_sum && (vn_min_rate == 0)) |
1981 | vn_min_rate = DEF_MIN_RATE; | 1985 | vn_min_rate = DEF_MIN_RATE; |
1982 | vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> | 1986 | |
1983 | FUNC_MF_CFG_MAX_BW_SHIFT) * 100; | 1987 | if (IS_MF_SI(bp)) |
1988 | /* maxCfg in percents of linkspeed */ | ||
1989 | vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100; | ||
1990 | else | ||
1991 | /* maxCfg is absolute in 100Mb units */ | ||
1992 | vn_max_rate = maxCfg * 100; | ||
1984 | } | 1993 | } |
1985 | 1994 | ||
1986 | DP(NETIF_MSG_IFUP, | 1995 | DP(NETIF_MSG_IFUP, |
@@ -2006,7 +2015,8 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) | |||
2006 | m_fair_vn.vn_credit_delta = | 2015 | m_fair_vn.vn_credit_delta = |
2007 | max_t(u32, (vn_min_rate * (T_FAIR_COEF / | 2016 | max_t(u32, (vn_min_rate * (T_FAIR_COEF / |
2008 | (8 * bp->vn_weight_sum))), | 2017 | (8 * bp->vn_weight_sum))), |
2009 | (bp->cmng.fair_vars.fair_threshold * 2)); | 2018 | (bp->cmng.fair_vars.fair_threshold + |
2019 | MIN_ABOVE_THRESH)); | ||
2010 | DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n", | 2020 | DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n", |
2011 | m_fair_vn.vn_credit_delta); | 2021 | m_fair_vn.vn_credit_delta); |
2012 | } | 2022 | } |
@@ -2082,8 +2092,9 @@ static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type) | |||
2082 | bnx2x_calc_vn_weight_sum(bp); | 2092 | bnx2x_calc_vn_weight_sum(bp); |
2083 | 2093 | ||
2084 | /* calculate and set min-max rate for each vn */ | 2094 | /* calculate and set min-max rate for each vn */ |
2085 | for (vn = VN_0; vn < E1HVN_MAX; vn++) | 2095 | if (bp->port.pmf) |
2086 | bnx2x_init_vn_minmax(bp, vn); | 2096 | for (vn = VN_0; vn < E1HVN_MAX; vn++) |
2097 | bnx2x_init_vn_minmax(bp, vn); | ||
2087 | 2098 | ||
2088 | /* always enable rate shaping and fairness */ | 2099 | /* always enable rate shaping and fairness */ |
2089 | bp->cmng.flags.cmng_enables |= | 2100 | bp->cmng.flags.cmng_enables |= |
@@ -2152,13 +2163,6 @@ static void bnx2x_link_attn(struct bnx2x *bp) | |||
2152 | bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); | 2163 | bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); |
2153 | } | 2164 | } |
2154 | 2165 | ||
2155 | /* indicate link status only if link status actually changed */ | ||
2156 | if (prev_link_status != bp->link_vars.link_status) | ||
2157 | bnx2x_link_report(bp); | ||
2158 | |||
2159 | if (IS_MF(bp)) | ||
2160 | bnx2x_link_sync_notify(bp); | ||
2161 | |||
2162 | if (bp->link_vars.link_up && bp->link_vars.line_speed) { | 2166 | if (bp->link_vars.link_up && bp->link_vars.line_speed) { |
2163 | int cmng_fns = bnx2x_get_cmng_fns_mode(bp); | 2167 | int cmng_fns = bnx2x_get_cmng_fns_mode(bp); |
2164 | 2168 | ||
@@ -2170,6 +2174,13 @@ static void bnx2x_link_attn(struct bnx2x *bp) | |||
2170 | DP(NETIF_MSG_IFUP, | 2174 | DP(NETIF_MSG_IFUP, |
2171 | "single function mode without fairness\n"); | 2175 | "single function mode without fairness\n"); |
2172 | } | 2176 | } |
2177 | |||
2178 | if (IS_MF(bp)) | ||
2179 | bnx2x_link_sync_notify(bp); | ||
2180 | |||
2181 | /* indicate link status only if link status actually changed */ | ||
2182 | if (prev_link_status != bp->link_vars.link_status) | ||
2183 | bnx2x_link_report(bp); | ||
2173 | } | 2184 | } |
2174 | 2185 | ||
2175 | void bnx2x__link_status_update(struct bnx2x *bp) | 2186 | void bnx2x__link_status_update(struct bnx2x *bp) |
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c index bda60d590fa8..3445ded6674f 100644 --- a/drivers/net/bnx2x/bnx2x_stats.c +++ b/drivers/net/bnx2x/bnx2x_stats.c | |||
@@ -1239,14 +1239,14 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) | |||
1239 | if (unlikely(bp->panic)) | 1239 | if (unlikely(bp->panic)) |
1240 | return; | 1240 | return; |
1241 | 1241 | ||
1242 | bnx2x_stats_stm[bp->stats_state][event].action(bp); | ||
1243 | |||
1242 | /* Protect a state change flow */ | 1244 | /* Protect a state change flow */ |
1243 | spin_lock_bh(&bp->stats_lock); | 1245 | spin_lock_bh(&bp->stats_lock); |
1244 | state = bp->stats_state; | 1246 | state = bp->stats_state; |
1245 | bp->stats_state = bnx2x_stats_stm[state][event].next_state; | 1247 | bp->stats_state = bnx2x_stats_stm[state][event].next_state; |
1246 | spin_unlock_bh(&bp->stats_lock); | 1248 | spin_unlock_bh(&bp->stats_lock); |
1247 | 1249 | ||
1248 | bnx2x_stats_stm[state][event].action(bp); | ||
1249 | |||
1250 | if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) | 1250 | if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) |
1251 | DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", | 1251 | DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", |
1252 | state, event, bp->stats_state); | 1252 | state, event, bp->stats_state); |
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 1024ae158227..a5d5d0b5b155 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c | |||
@@ -281,23 +281,23 @@ static inline int __check_agg_selection_timer(struct port *port) | |||
281 | } | 281 | } |
282 | 282 | ||
283 | /** | 283 | /** |
284 | * __get_rx_machine_lock - lock the port's RX machine | 284 | * __get_state_machine_lock - lock the port's state machines |
285 | * @port: the port we're looking at | 285 | * @port: the port we're looking at |
286 | * | 286 | * |
287 | */ | 287 | */ |
288 | static inline void __get_rx_machine_lock(struct port *port) | 288 | static inline void __get_state_machine_lock(struct port *port) |
289 | { | 289 | { |
290 | spin_lock_bh(&(SLAVE_AD_INFO(port->slave).rx_machine_lock)); | 290 | spin_lock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock)); |
291 | } | 291 | } |
292 | 292 | ||
293 | /** | 293 | /** |
294 | * __release_rx_machine_lock - unlock the port's RX machine | 294 | * __release_state_machine_lock - unlock the port's state machines |
295 | * @port: the port we're looking at | 295 | * @port: the port we're looking at |
296 | * | 296 | * |
297 | */ | 297 | */ |
298 | static inline void __release_rx_machine_lock(struct port *port) | 298 | static inline void __release_state_machine_lock(struct port *port) |
299 | { | 299 | { |
300 | spin_unlock_bh(&(SLAVE_AD_INFO(port->slave).rx_machine_lock)); | 300 | spin_unlock_bh(&(SLAVE_AD_INFO(port->slave).state_machine_lock)); |
301 | } | 301 | } |
302 | 302 | ||
303 | /** | 303 | /** |
@@ -388,14 +388,14 @@ static u8 __get_duplex(struct port *port) | |||
388 | } | 388 | } |
389 | 389 | ||
390 | /** | 390 | /** |
391 | * __initialize_port_locks - initialize a port's RX machine spinlock | 391 | * __initialize_port_locks - initialize a port's STATE machine spinlock |
392 | * @port: the port we're looking at | 392 | * @port: the port we're looking at |
393 | * | 393 | * |
394 | */ | 394 | */ |
395 | static inline void __initialize_port_locks(struct port *port) | 395 | static inline void __initialize_port_locks(struct port *port) |
396 | { | 396 | { |
397 | // make sure it isn't called twice | 397 | // make sure it isn't called twice |
398 | spin_lock_init(&(SLAVE_AD_INFO(port->slave).rx_machine_lock)); | 398 | spin_lock_init(&(SLAVE_AD_INFO(port->slave).state_machine_lock)); |
399 | } | 399 | } |
400 | 400 | ||
401 | //conversions | 401 | //conversions |
@@ -1025,9 +1025,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) | |||
1025 | { | 1025 | { |
1026 | rx_states_t last_state; | 1026 | rx_states_t last_state; |
1027 | 1027 | ||
1028 | // Lock to prevent 2 instances of this function to run simultaneously(rx interrupt and periodic machine callback) | ||
1029 | __get_rx_machine_lock(port); | ||
1030 | |||
1031 | // keep current State Machine state to compare later if it was changed | 1028 | // keep current State Machine state to compare later if it was changed |
1032 | last_state = port->sm_rx_state; | 1029 | last_state = port->sm_rx_state; |
1033 | 1030 | ||
@@ -1133,7 +1130,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) | |||
1133 | pr_err("%s: An illegal loopback occurred on adapter (%s).\n" | 1130 | pr_err("%s: An illegal loopback occurred on adapter (%s).\n" |
1134 | "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n", | 1131 | "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n", |
1135 | port->slave->dev->master->name, port->slave->dev->name); | 1132 | port->slave->dev->master->name, port->slave->dev->name); |
1136 | __release_rx_machine_lock(port); | ||
1137 | return; | 1133 | return; |
1138 | } | 1134 | } |
1139 | __update_selected(lacpdu, port); | 1135 | __update_selected(lacpdu, port); |
@@ -1153,7 +1149,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port) | |||
1153 | break; | 1149 | break; |
1154 | } | 1150 | } |
1155 | } | 1151 | } |
1156 | __release_rx_machine_lock(port); | ||
1157 | } | 1152 | } |
1158 | 1153 | ||
1159 | /** | 1154 | /** |
@@ -2155,6 +2150,12 @@ void bond_3ad_state_machine_handler(struct work_struct *work) | |||
2155 | goto re_arm; | 2150 | goto re_arm; |
2156 | } | 2151 | } |
2157 | 2152 | ||
2153 | /* Lock around state machines to protect data accessed | ||
2154 | * by all (e.g., port->sm_vars). ad_rx_machine may run | ||
2155 | * concurrently due to incoming LACPDU. | ||
2156 | */ | ||
2157 | __get_state_machine_lock(port); | ||
2158 | |||
2158 | ad_rx_machine(NULL, port); | 2159 | ad_rx_machine(NULL, port); |
2159 | ad_periodic_machine(port); | 2160 | ad_periodic_machine(port); |
2160 | ad_port_selection_logic(port); | 2161 | ad_port_selection_logic(port); |
@@ -2164,6 +2165,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work) | |||
2164 | // turn off the BEGIN bit, since we already handled it | 2165 | // turn off the BEGIN bit, since we already handled it |
2165 | if (port->sm_vars & AD_PORT_BEGIN) | 2166 | if (port->sm_vars & AD_PORT_BEGIN) |
2166 | port->sm_vars &= ~AD_PORT_BEGIN; | 2167 | port->sm_vars &= ~AD_PORT_BEGIN; |
2168 | |||
2169 | __release_state_machine_lock(port); | ||
2167 | } | 2170 | } |
2168 | 2171 | ||
2169 | re_arm: | 2172 | re_arm: |
@@ -2200,7 +2203,10 @@ static void bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave, u | |||
2200 | case AD_TYPE_LACPDU: | 2203 | case AD_TYPE_LACPDU: |
2201 | pr_debug("Received LACPDU on port %d\n", | 2204 | pr_debug("Received LACPDU on port %d\n", |
2202 | port->actor_port_number); | 2205 | port->actor_port_number); |
2206 | /* Protect against concurrent state machines */ | ||
2207 | __get_state_machine_lock(port); | ||
2203 | ad_rx_machine(lacpdu, port); | 2208 | ad_rx_machine(lacpdu, port); |
2209 | __release_state_machine_lock(port); | ||
2204 | break; | 2210 | break; |
2205 | 2211 | ||
2206 | case AD_TYPE_MARKER: | 2212 | case AD_TYPE_MARKER: |
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h index 2c46a154f2c6..b28baff70864 100644 --- a/drivers/net/bonding/bond_3ad.h +++ b/drivers/net/bonding/bond_3ad.h | |||
@@ -264,7 +264,8 @@ struct ad_bond_info { | |||
264 | struct ad_slave_info { | 264 | struct ad_slave_info { |
265 | struct aggregator aggregator; // 802.3ad aggregator structure | 265 | struct aggregator aggregator; // 802.3ad aggregator structure |
266 | struct port port; // 802.3ad port structure | 266 | struct port port; // 802.3ad port structure |
267 | spinlock_t rx_machine_lock; // To avoid race condition between callback and receive interrupt | 267 | spinlock_t state_machine_lock; /* mutex state machines vs. |
268 | incoming LACPDU */ | ||
268 | u16 id; | 269 | u16 id; |
269 | }; | 270 | }; |
270 | 271 | ||
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c index 5157e15e96eb..aeea9f9ff6e8 100644 --- a/drivers/net/can/softing/softing_main.c +++ b/drivers/net/can/softing/softing_main.c | |||
@@ -633,6 +633,7 @@ static const struct net_device_ops softing_netdev_ops = { | |||
633 | }; | 633 | }; |
634 | 634 | ||
635 | static const struct can_bittiming_const softing_btr_const = { | 635 | static const struct can_bittiming_const softing_btr_const = { |
636 | .name = "softing", | ||
636 | .tseg1_min = 1, | 637 | .tseg1_min = 1, |
637 | .tseg1_max = 16, | 638 | .tseg1_max = 16, |
638 | .tseg2_min = 1, | 639 | .tseg2_min = 1, |
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index 7ff170cbc7dc..302be4aa69d6 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c | |||
@@ -2760,6 +2760,8 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev) | |||
2760 | u32 status_idx = (u16) *cp->kcq1.status_idx_ptr; | 2760 | u32 status_idx = (u16) *cp->kcq1.status_idx_ptr; |
2761 | int kcqe_cnt; | 2761 | int kcqe_cnt; |
2762 | 2762 | ||
2763 | /* status block index must be read before reading other fields */ | ||
2764 | rmb(); | ||
2763 | cp->kwq_con_idx = *cp->kwq_con_idx_ptr; | 2765 | cp->kwq_con_idx = *cp->kwq_con_idx_ptr; |
2764 | 2766 | ||
2765 | while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) { | 2767 | while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) { |
@@ -2770,6 +2772,8 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev) | |||
2770 | barrier(); | 2772 | barrier(); |
2771 | if (status_idx != *cp->kcq1.status_idx_ptr) { | 2773 | if (status_idx != *cp->kcq1.status_idx_ptr) { |
2772 | status_idx = (u16) *cp->kcq1.status_idx_ptr; | 2774 | status_idx = (u16) *cp->kcq1.status_idx_ptr; |
2775 | /* status block index must be read first */ | ||
2776 | rmb(); | ||
2773 | cp->kwq_con_idx = *cp->kwq_con_idx_ptr; | 2777 | cp->kwq_con_idx = *cp->kwq_con_idx_ptr; |
2774 | } else | 2778 | } else |
2775 | break; | 2779 | break; |
@@ -2888,6 +2892,8 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info) | |||
2888 | u32 last_status = *info->status_idx_ptr; | 2892 | u32 last_status = *info->status_idx_ptr; |
2889 | int kcqe_cnt; | 2893 | int kcqe_cnt; |
2890 | 2894 | ||
2895 | /* status block index must be read before reading the KCQ */ | ||
2896 | rmb(); | ||
2891 | while ((kcqe_cnt = cnic_get_kcqes(dev, info))) { | 2897 | while ((kcqe_cnt = cnic_get_kcqes(dev, info))) { |
2892 | 2898 | ||
2893 | service_kcqes(dev, kcqe_cnt); | 2899 | service_kcqes(dev, kcqe_cnt); |
@@ -2898,6 +2904,8 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info) | |||
2898 | break; | 2904 | break; |
2899 | 2905 | ||
2900 | last_status = *info->status_idx_ptr; | 2906 | last_status = *info->status_idx_ptr; |
2907 | /* status block index must be read before reading the KCQ */ | ||
2908 | rmb(); | ||
2901 | } | 2909 | } |
2902 | return last_status; | 2910 | return last_status; |
2903 | } | 2911 | } |
@@ -2906,26 +2914,35 @@ static void cnic_service_bnx2x_bh(unsigned long data) | |||
2906 | { | 2914 | { |
2907 | struct cnic_dev *dev = (struct cnic_dev *) data; | 2915 | struct cnic_dev *dev = (struct cnic_dev *) data; |
2908 | struct cnic_local *cp = dev->cnic_priv; | 2916 | struct cnic_local *cp = dev->cnic_priv; |
2909 | u32 status_idx; | 2917 | u32 status_idx, new_status_idx; |
2910 | 2918 | ||
2911 | if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) | 2919 | if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) |
2912 | return; | 2920 | return; |
2913 | 2921 | ||
2914 | status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); | 2922 | while (1) { |
2923 | status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); | ||
2915 | 2924 | ||
2916 | CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); | 2925 | CNIC_WR16(dev, cp->kcq1.io_addr, |
2926 | cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); | ||
2917 | 2927 | ||
2918 | if (BNX2X_CHIP_IS_E2(cp->chip_id)) { | 2928 | if (!BNX2X_CHIP_IS_E2(cp->chip_id)) { |
2919 | status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2); | 2929 | cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, |
2930 | status_idx, IGU_INT_ENABLE, 1); | ||
2931 | break; | ||
2932 | } | ||
2933 | |||
2934 | new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2); | ||
2935 | |||
2936 | if (new_status_idx != status_idx) | ||
2937 | continue; | ||
2920 | 2938 | ||
2921 | CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx + | 2939 | CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx + |
2922 | MAX_KCQ_IDX); | 2940 | MAX_KCQ_IDX); |
2923 | 2941 | ||
2924 | cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, | 2942 | cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, |
2925 | status_idx, IGU_INT_ENABLE, 1); | 2943 | status_idx, IGU_INT_ENABLE, 1); |
2926 | } else { | 2944 | |
2927 | cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, | 2945 | break; |
2928 | status_idx, IGU_INT_ENABLE, 1); | ||
2929 | } | 2946 | } |
2930 | } | 2947 | } |
2931 | 2948 | ||
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c index 2a628d17d178..7018bfe408a4 100644 --- a/drivers/net/davinci_emac.c +++ b/drivers/net/davinci_emac.c | |||
@@ -1008,7 +1008,7 @@ static void emac_rx_handler(void *token, int len, int status) | |||
1008 | int ret; | 1008 | int ret; |
1009 | 1009 | ||
1010 | /* free and bail if we are shutting down */ | 1010 | /* free and bail if we are shutting down */ |
1011 | if (unlikely(!netif_running(ndev))) { | 1011 | if (unlikely(!netif_running(ndev) || !netif_carrier_ok(ndev))) { |
1012 | dev_kfree_skb_any(skb); | 1012 | dev_kfree_skb_any(skb); |
1013 | return; | 1013 | return; |
1014 | } | 1014 | } |
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c index 2d4c4fc1d900..461dd6f905f7 100644 --- a/drivers/net/dm9000.c +++ b/drivers/net/dm9000.c | |||
@@ -802,10 +802,7 @@ dm9000_init_dm9000(struct net_device *dev) | |||
802 | /* Checksum mode */ | 802 | /* Checksum mode */ |
803 | dm9000_set_rx_csum_unlocked(dev, db->rx_csum); | 803 | dm9000_set_rx_csum_unlocked(dev, db->rx_csum); |
804 | 804 | ||
805 | /* GPIO0 on pre-activate PHY */ | ||
806 | iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ | ||
807 | iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ | 805 | iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ |
808 | iow(db, DM9000_GPR, 0); /* Enable PHY */ | ||
809 | 806 | ||
810 | ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; | 807 | ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0; |
811 | 808 | ||
@@ -852,8 +849,8 @@ static void dm9000_timeout(struct net_device *dev) | |||
852 | unsigned long flags; | 849 | unsigned long flags; |
853 | 850 | ||
854 | /* Save previous register address */ | 851 | /* Save previous register address */ |
855 | reg_save = readb(db->io_addr); | ||
856 | spin_lock_irqsave(&db->lock, flags); | 852 | spin_lock_irqsave(&db->lock, flags); |
853 | reg_save = readb(db->io_addr); | ||
857 | 854 | ||
858 | netif_stop_queue(dev); | 855 | netif_stop_queue(dev); |
859 | dm9000_reset(db); | 856 | dm9000_reset(db); |
@@ -1194,6 +1191,10 @@ dm9000_open(struct net_device *dev) | |||
1194 | if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev)) | 1191 | if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev)) |
1195 | return -EAGAIN; | 1192 | return -EAGAIN; |
1196 | 1193 | ||
1194 | /* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */ | ||
1195 | iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */ | ||
1196 | mdelay(1); /* delay needs by DM9000B */ | ||
1197 | |||
1197 | /* Initialize DM9000 board */ | 1198 | /* Initialize DM9000 board */ |
1198 | dm9000_reset(db); | 1199 | dm9000_reset(db); |
1199 | dm9000_init_dm9000(dev); | 1200 | dm9000_init_dm9000(dev); |
diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c index 9d8a20b72fa9..8318ea06cb6d 100644 --- a/drivers/net/dnet.c +++ b/drivers/net/dnet.c | |||
@@ -337,8 +337,6 @@ static int dnet_mii_init(struct dnet *bp) | |||
337 | for (i = 0; i < PHY_MAX_ADDR; i++) | 337 | for (i = 0; i < PHY_MAX_ADDR; i++) |
338 | bp->mii_bus->irq[i] = PHY_POLL; | 338 | bp->mii_bus->irq[i] = PHY_POLL; |
339 | 339 | ||
340 | platform_set_drvdata(bp->dev, bp->mii_bus); | ||
341 | |||
342 | if (mdiobus_register(bp->mii_bus)) { | 340 | if (mdiobus_register(bp->mii_bus)) { |
343 | err = -ENXIO; | 341 | err = -ENXIO; |
344 | goto err_out_free_mdio_irq; | 342 | goto err_out_free_mdio_irq; |
@@ -863,6 +861,7 @@ static int __devinit dnet_probe(struct platform_device *pdev) | |||
863 | bp = netdev_priv(dev); | 861 | bp = netdev_priv(dev); |
864 | bp->dev = dev; | 862 | bp->dev = dev; |
865 | 863 | ||
864 | platform_set_drvdata(pdev, dev); | ||
866 | SET_NETDEV_DEV(dev, &pdev->dev); | 865 | SET_NETDEV_DEV(dev, &pdev->dev); |
867 | 866 | ||
868 | spin_lock_init(&bp->lock); | 867 | spin_lock_init(&bp->lock); |
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h index 55c1711f1688..33e7c45a4fe4 100644 --- a/drivers/net/e1000/e1000_osdep.h +++ b/drivers/net/e1000/e1000_osdep.h | |||
@@ -42,7 +42,8 @@ | |||
42 | #define GBE_CONFIG_RAM_BASE \ | 42 | #define GBE_CONFIG_RAM_BASE \ |
43 | ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET)) | 43 | ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET)) |
44 | 44 | ||
45 | #define GBE_CONFIG_BASE_VIRT phys_to_virt(GBE_CONFIG_RAM_BASE) | 45 | #define GBE_CONFIG_BASE_VIRT \ |
46 | ((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE)) | ||
46 | 47 | ||
47 | #define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \ | 48 | #define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \ |
48 | (iowrite16_rep(base + offset, data, count)) | 49 | (iowrite16_rep(base + offset, data, count)) |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 3fa110ddb041..2e5022849f18 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -5967,7 +5967,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
5967 | /* APME bit in EEPROM is mapped to WUC.APME */ | 5967 | /* APME bit in EEPROM is mapped to WUC.APME */ |
5968 | eeprom_data = er32(WUC); | 5968 | eeprom_data = er32(WUC); |
5969 | eeprom_apme_mask = E1000_WUC_APME; | 5969 | eeprom_apme_mask = E1000_WUC_APME; |
5970 | if (eeprom_data & E1000_WUC_PHY_WAKE) | 5970 | if ((hw->mac.type > e1000_ich10lan) && |
5971 | (eeprom_data & E1000_WUC_PHY_WAKE)) | ||
5971 | adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; | 5972 | adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; |
5972 | } else if (adapter->flags & FLAG_APME_IN_CTRL3) { | 5973 | } else if (adapter->flags & FLAG_APME_IN_CTRL3) { |
5973 | if (adapter->flags & FLAG_APME_CHECK_PORT_B && | 5974 | if (adapter->flags & FLAG_APME_CHECK_PORT_B && |
diff --git a/drivers/net/fec.c b/drivers/net/fec.c index 2a71373719ae..cd0282d5d40f 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c | |||
@@ -74,7 +74,8 @@ static struct platform_device_id fec_devtype[] = { | |||
74 | }, { | 74 | }, { |
75 | .name = "imx28-fec", | 75 | .name = "imx28-fec", |
76 | .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, | 76 | .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, |
77 | } | 77 | }, |
78 | { } | ||
78 | }; | 79 | }; |
79 | 80 | ||
80 | static unsigned char macaddr[ETH_ALEN]; | 81 | static unsigned char macaddr[ETH_ALEN]; |
diff --git a/drivers/net/igbvf/vf.c b/drivers/net/igbvf/vf.c index 74486a8b009a..af3822f9ea9a 100644 --- a/drivers/net/igbvf/vf.c +++ b/drivers/net/igbvf/vf.c | |||
@@ -220,7 +220,7 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr) | |||
220 | * The parameter rar_count will usually be hw->mac.rar_entry_count | 220 | * The parameter rar_count will usually be hw->mac.rar_entry_count |
221 | * unless there are workarounds that change this. | 221 | * unless there are workarounds that change this. |
222 | **/ | 222 | **/ |
223 | void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, | 223 | static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, |
224 | u8 *mc_addr_list, u32 mc_addr_count, | 224 | u8 *mc_addr_list, u32 mc_addr_count, |
225 | u32 rar_used_count, u32 rar_count) | 225 | u32 rar_used_count, u32 rar_count) |
226 | { | 226 | { |
diff --git a/drivers/net/macb.c b/drivers/net/macb.c index f69e73e2191e..79ccb54ab00c 100644 --- a/drivers/net/macb.c +++ b/drivers/net/macb.c | |||
@@ -260,7 +260,7 @@ static int macb_mii_init(struct macb *bp) | |||
260 | for (i = 0; i < PHY_MAX_ADDR; i++) | 260 | for (i = 0; i < PHY_MAX_ADDR; i++) |
261 | bp->mii_bus->irq[i] = PHY_POLL; | 261 | bp->mii_bus->irq[i] = PHY_POLL; |
262 | 262 | ||
263 | platform_set_drvdata(bp->dev, bp->mii_bus); | 263 | dev_set_drvdata(&bp->dev->dev, bp->mii_bus); |
264 | 264 | ||
265 | if (mdiobus_register(bp->mii_bus)) | 265 | if (mdiobus_register(bp->mii_bus)) |
266 | goto err_out_free_mdio_irq; | 266 | goto err_out_free_mdio_irq; |
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c index 5933621ac3ff..fc27a9926d9e 100644 --- a/drivers/net/macvtap.c +++ b/drivers/net/macvtap.c | |||
@@ -528,8 +528,9 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, | |||
528 | vnet_hdr_len = q->vnet_hdr_sz; | 528 | vnet_hdr_len = q->vnet_hdr_sz; |
529 | 529 | ||
530 | err = -EINVAL; | 530 | err = -EINVAL; |
531 | if ((len -= vnet_hdr_len) < 0) | 531 | if (len < vnet_hdr_len) |
532 | goto err; | 532 | goto err; |
533 | len -= vnet_hdr_len; | ||
533 | 534 | ||
534 | err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0, | 535 | err = memcpy_fromiovecend((void *)&vnet_hdr, iv, 0, |
535 | sizeof(vnet_hdr)); | 536 | sizeof(vnet_hdr)); |
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c index 9226cda4d054..530ab5a10bd3 100644 --- a/drivers/net/pcmcia/fmvj18x_cs.c +++ b/drivers/net/pcmcia/fmvj18x_cs.c | |||
@@ -691,6 +691,7 @@ static struct pcmcia_device_id fmvj18x_ids[] = { | |||
691 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a), | 691 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a), |
692 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01), | 692 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01), |
693 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05), | 693 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05), |
694 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0b05), | ||
694 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101), | 695 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101), |
695 | PCMCIA_DEVICE_NULL, | 696 | PCMCIA_DEVICE_NULL, |
696 | }; | 697 | }; |
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c index 27e6f6d43cac..e3ebd90ae651 100644 --- a/drivers/net/r6040.c +++ b/drivers/net/r6040.c | |||
@@ -49,8 +49,8 @@ | |||
49 | #include <asm/processor.h> | 49 | #include <asm/processor.h> |
50 | 50 | ||
51 | #define DRV_NAME "r6040" | 51 | #define DRV_NAME "r6040" |
52 | #define DRV_VERSION "0.26" | 52 | #define DRV_VERSION "0.27" |
53 | #define DRV_RELDATE "30May2010" | 53 | #define DRV_RELDATE "23Feb2011" |
54 | 54 | ||
55 | /* PHY CHIP Address */ | 55 | /* PHY CHIP Address */ |
56 | #define PHY1_ADDR 1 /* For MAC1 */ | 56 | #define PHY1_ADDR 1 /* For MAC1 */ |
@@ -69,6 +69,8 @@ | |||
69 | 69 | ||
70 | /* MAC registers */ | 70 | /* MAC registers */ |
71 | #define MCR0 0x00 /* Control register 0 */ | 71 | #define MCR0 0x00 /* Control register 0 */ |
72 | #define MCR0_PROMISC 0x0020 /* Promiscuous mode */ | ||
73 | #define MCR0_HASH_EN 0x0100 /* Enable multicast hash table function */ | ||
72 | #define MCR1 0x04 /* Control register 1 */ | 74 | #define MCR1 0x04 /* Control register 1 */ |
73 | #define MAC_RST 0x0001 /* Reset the MAC */ | 75 | #define MAC_RST 0x0001 /* Reset the MAC */ |
74 | #define MBCR 0x08 /* Bus control */ | 76 | #define MBCR 0x08 /* Bus control */ |
@@ -851,77 +853,92 @@ static void r6040_multicast_list(struct net_device *dev) | |||
851 | { | 853 | { |
852 | struct r6040_private *lp = netdev_priv(dev); | 854 | struct r6040_private *lp = netdev_priv(dev); |
853 | void __iomem *ioaddr = lp->base; | 855 | void __iomem *ioaddr = lp->base; |
854 | u16 *adrp; | ||
855 | u16 reg; | ||
856 | unsigned long flags; | 856 | unsigned long flags; |
857 | struct netdev_hw_addr *ha; | 857 | struct netdev_hw_addr *ha; |
858 | int i; | 858 | int i; |
859 | u16 *adrp; | ||
860 | u16 hash_table[4] = { 0 }; | ||
861 | |||
862 | spin_lock_irqsave(&lp->lock, flags); | ||
859 | 863 | ||
860 | /* MAC Address */ | 864 | /* Keep our MAC Address */ |
861 | adrp = (u16 *)dev->dev_addr; | 865 | adrp = (u16 *)dev->dev_addr; |
862 | iowrite16(adrp[0], ioaddr + MID_0L); | 866 | iowrite16(adrp[0], ioaddr + MID_0L); |
863 | iowrite16(adrp[1], ioaddr + MID_0M); | 867 | iowrite16(adrp[1], ioaddr + MID_0M); |
864 | iowrite16(adrp[2], ioaddr + MID_0H); | 868 | iowrite16(adrp[2], ioaddr + MID_0H); |
865 | 869 | ||
866 | /* Promiscous Mode */ | ||
867 | spin_lock_irqsave(&lp->lock, flags); | ||
868 | |||
869 | /* Clear AMCP & PROM bits */ | 870 | /* Clear AMCP & PROM bits */ |
870 | reg = ioread16(ioaddr) & ~0x0120; | 871 | lp->mcr0 = ioread16(ioaddr + MCR0) & ~(MCR0_PROMISC | MCR0_HASH_EN); |
871 | if (dev->flags & IFF_PROMISC) { | ||
872 | reg |= 0x0020; | ||
873 | lp->mcr0 |= 0x0020; | ||
874 | } | ||
875 | /* Too many multicast addresses | ||
876 | * accept all traffic */ | ||
877 | else if ((netdev_mc_count(dev) > MCAST_MAX) || | ||
878 | (dev->flags & IFF_ALLMULTI)) | ||
879 | reg |= 0x0020; | ||
880 | 872 | ||
881 | iowrite16(reg, ioaddr); | 873 | /* Promiscuous mode */ |
882 | spin_unlock_irqrestore(&lp->lock, flags); | 874 | if (dev->flags & IFF_PROMISC) |
875 | lp->mcr0 |= MCR0_PROMISC; | ||
883 | 876 | ||
884 | /* Build the hash table */ | 877 | /* Enable multicast hash table function to |
885 | if (netdev_mc_count(dev) > MCAST_MAX) { | 878 | * receive all multicast packets. */ |
886 | u16 hash_table[4]; | 879 | else if (dev->flags & IFF_ALLMULTI) { |
887 | u32 crc; | 880 | lp->mcr0 |= MCR0_HASH_EN; |
888 | 881 | ||
889 | for (i = 0; i < 4; i++) | 882 | for (i = 0; i < MCAST_MAX ; i++) { |
890 | hash_table[i] = 0; | 883 | iowrite16(0, ioaddr + MID_1L + 8 * i); |
884 | iowrite16(0, ioaddr + MID_1M + 8 * i); | ||
885 | iowrite16(0, ioaddr + MID_1H + 8 * i); | ||
886 | } | ||
891 | 887 | ||
888 | for (i = 0; i < 4; i++) | ||
889 | hash_table[i] = 0xffff; | ||
890 | } | ||
891 | /* Use internal multicast address registers if the number of | ||
892 | * multicast addresses is not greater than MCAST_MAX. */ | ||
893 | else if (netdev_mc_count(dev) <= MCAST_MAX) { | ||
894 | i = 0; | ||
892 | netdev_for_each_mc_addr(ha, dev) { | 895 | netdev_for_each_mc_addr(ha, dev) { |
893 | char *addrs = ha->addr; | 896 | u16 *adrp = (u16 *) ha->addr; |
897 | iowrite16(adrp[0], ioaddr + MID_1L + 8 * i); | ||
898 | iowrite16(adrp[1], ioaddr + MID_1M + 8 * i); | ||
899 | iowrite16(adrp[2], ioaddr + MID_1H + 8 * i); | ||
900 | i++; | ||
901 | } | ||
902 | while (i < MCAST_MAX) { | ||
903 | iowrite16(0, ioaddr + MID_1L + 8 * i); | ||
904 | iowrite16(0, ioaddr + MID_1M + 8 * i); | ||
905 | iowrite16(0, ioaddr + MID_1H + 8 * i); | ||
906 | i++; | ||
907 | } | ||
908 | } | ||
909 | /* Otherwise, Enable multicast hash table function. */ | ||
910 | else { | ||
911 | u32 crc; | ||
894 | 912 | ||
895 | if (!(*addrs & 1)) | 913 | lp->mcr0 |= MCR0_HASH_EN; |
896 | continue; | 914 | |
915 | for (i = 0; i < MCAST_MAX ; i++) { | ||
916 | iowrite16(0, ioaddr + MID_1L + 8 * i); | ||
917 | iowrite16(0, ioaddr + MID_1M + 8 * i); | ||
918 | iowrite16(0, ioaddr + MID_1H + 8 * i); | ||
919 | } | ||
897 | 920 | ||
898 | crc = ether_crc_le(6, addrs); | 921 | /* Build multicast hash table */ |
922 | netdev_for_each_mc_addr(ha, dev) { | ||
923 | u8 *addrs = ha->addr; | ||
924 | |||
925 | crc = ether_crc(ETH_ALEN, addrs); | ||
899 | crc >>= 26; | 926 | crc >>= 26; |
900 | hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); | 927 | hash_table[crc >> 4] |= 1 << (crc & 0xf); |
901 | } | 928 | } |
902 | /* Fill the MAC hash tables with their values */ | 929 | } |
930 | |||
931 | iowrite16(lp->mcr0, ioaddr + MCR0); | ||
932 | |||
933 | /* Fill the MAC hash tables with their values */ | ||
934 | if (lp->mcr0 && MCR0_HASH_EN) { | ||
903 | iowrite16(hash_table[0], ioaddr + MAR0); | 935 | iowrite16(hash_table[0], ioaddr + MAR0); |
904 | iowrite16(hash_table[1], ioaddr + MAR1); | 936 | iowrite16(hash_table[1], ioaddr + MAR1); |
905 | iowrite16(hash_table[2], ioaddr + MAR2); | 937 | iowrite16(hash_table[2], ioaddr + MAR2); |
906 | iowrite16(hash_table[3], ioaddr + MAR3); | 938 | iowrite16(hash_table[3], ioaddr + MAR3); |
907 | } | 939 | } |
908 | /* Multicast Address 1~4 case */ | 940 | |
909 | i = 0; | 941 | spin_unlock_irqrestore(&lp->lock, flags); |
910 | netdev_for_each_mc_addr(ha, dev) { | ||
911 | if (i >= MCAST_MAX) | ||
912 | break; | ||
913 | adrp = (u16 *) ha->addr; | ||
914 | iowrite16(adrp[0], ioaddr + MID_1L + 8 * i); | ||
915 | iowrite16(adrp[1], ioaddr + MID_1M + 8 * i); | ||
916 | iowrite16(adrp[2], ioaddr + MID_1H + 8 * i); | ||
917 | i++; | ||
918 | } | ||
919 | while (i < MCAST_MAX) { | ||
920 | iowrite16(0xffff, ioaddr + MID_1L + 8 * i); | ||
921 | iowrite16(0xffff, ioaddr + MID_1M + 8 * i); | ||
922 | iowrite16(0xffff, ioaddr + MID_1H + 8 * i); | ||
923 | i++; | ||
924 | } | ||
925 | } | 942 | } |
926 | 943 | ||
927 | static void netdev_get_drvinfo(struct net_device *dev, | 944 | static void netdev_get_drvinfo(struct net_device *dev, |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 469ab0b7ce31..7ffdb80adf40 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/dma-mapping.h> | 25 | #include <linux/dma-mapping.h> |
26 | #include <linux/pm_runtime.h> | 26 | #include <linux/pm_runtime.h> |
27 | #include <linux/firmware.h> | 27 | #include <linux/firmware.h> |
28 | #include <linux/pci-aspm.h> | ||
28 | 29 | ||
29 | #include <asm/system.h> | 30 | #include <asm/system.h> |
30 | #include <asm/io.h> | 31 | #include <asm/io.h> |
@@ -617,8 +618,9 @@ static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data) | |||
617 | } | 618 | } |
618 | } | 619 | } |
619 | 620 | ||
620 | static void rtl8168_oob_notify(void __iomem *ioaddr, u8 cmd) | 621 | static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd) |
621 | { | 622 | { |
623 | void __iomem *ioaddr = tp->mmio_addr; | ||
622 | int i; | 624 | int i; |
623 | 625 | ||
624 | RTL_W8(ERIDR, cmd); | 626 | RTL_W8(ERIDR, cmd); |
@@ -630,7 +632,7 @@ static void rtl8168_oob_notify(void __iomem *ioaddr, u8 cmd) | |||
630 | break; | 632 | break; |
631 | } | 633 | } |
632 | 634 | ||
633 | ocp_write(ioaddr, 0x1, 0x30, 0x00000001); | 635 | ocp_write(tp, 0x1, 0x30, 0x00000001); |
634 | } | 636 | } |
635 | 637 | ||
636 | #define OOB_CMD_RESET 0x00 | 638 | #define OOB_CMD_RESET 0x00 |
@@ -2868,8 +2870,11 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) | |||
2868 | { | 2870 | { |
2869 | void __iomem *ioaddr = tp->mmio_addr; | 2871 | void __iomem *ioaddr = tp->mmio_addr; |
2870 | 2872 | ||
2871 | if (tp->mac_version == RTL_GIGA_MAC_VER_27) | 2873 | if (((tp->mac_version == RTL_GIGA_MAC_VER_27) || |
2874 | (tp->mac_version == RTL_GIGA_MAC_VER_28)) && | ||
2875 | (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) { | ||
2872 | return; | 2876 | return; |
2877 | } | ||
2873 | 2878 | ||
2874 | if (((tp->mac_version == RTL_GIGA_MAC_VER_23) || | 2879 | if (((tp->mac_version == RTL_GIGA_MAC_VER_23) || |
2875 | (tp->mac_version == RTL_GIGA_MAC_VER_24)) && | 2880 | (tp->mac_version == RTL_GIGA_MAC_VER_24)) && |
@@ -2891,6 +2896,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) | |||
2891 | switch (tp->mac_version) { | 2896 | switch (tp->mac_version) { |
2892 | case RTL_GIGA_MAC_VER_25: | 2897 | case RTL_GIGA_MAC_VER_25: |
2893 | case RTL_GIGA_MAC_VER_26: | 2898 | case RTL_GIGA_MAC_VER_26: |
2899 | case RTL_GIGA_MAC_VER_27: | ||
2900 | case RTL_GIGA_MAC_VER_28: | ||
2894 | RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); | 2901 | RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80); |
2895 | break; | 2902 | break; |
2896 | } | 2903 | } |
@@ -2900,12 +2907,17 @@ static void r8168_pll_power_up(struct rtl8169_private *tp) | |||
2900 | { | 2907 | { |
2901 | void __iomem *ioaddr = tp->mmio_addr; | 2908 | void __iomem *ioaddr = tp->mmio_addr; |
2902 | 2909 | ||
2903 | if (tp->mac_version == RTL_GIGA_MAC_VER_27) | 2910 | if (((tp->mac_version == RTL_GIGA_MAC_VER_27) || |
2911 | (tp->mac_version == RTL_GIGA_MAC_VER_28)) && | ||
2912 | (ocp_read(tp, 0x0f, 0x0010) & 0x00008000)) { | ||
2904 | return; | 2913 | return; |
2914 | } | ||
2905 | 2915 | ||
2906 | switch (tp->mac_version) { | 2916 | switch (tp->mac_version) { |
2907 | case RTL_GIGA_MAC_VER_25: | 2917 | case RTL_GIGA_MAC_VER_25: |
2908 | case RTL_GIGA_MAC_VER_26: | 2918 | case RTL_GIGA_MAC_VER_26: |
2919 | case RTL_GIGA_MAC_VER_27: | ||
2920 | case RTL_GIGA_MAC_VER_28: | ||
2909 | RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); | 2921 | RTL_W8(PMCH, RTL_R8(PMCH) | 0x80); |
2910 | break; | 2922 | break; |
2911 | } | 2923 | } |
@@ -3009,6 +3021,11 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3009 | mii->reg_num_mask = 0x1f; | 3021 | mii->reg_num_mask = 0x1f; |
3010 | mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII); | 3022 | mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII); |
3011 | 3023 | ||
3024 | /* disable ASPM completely as that cause random device stop working | ||
3025 | * problems as well as full system hangs for some PCIe devices users */ | ||
3026 | pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | | ||
3027 | PCIE_LINK_STATE_CLKPM); | ||
3028 | |||
3012 | /* enable device (incl. PCI PM wakeup and hotplug setup) */ | 3029 | /* enable device (incl. PCI PM wakeup and hotplug setup) */ |
3013 | rc = pci_enable_device(pdev); | 3030 | rc = pci_enable_device(pdev); |
3014 | if (rc < 0) { | 3031 | if (rc < 0) { |
@@ -3042,7 +3059,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3042 | goto err_out_mwi_2; | 3059 | goto err_out_mwi_2; |
3043 | } | 3060 | } |
3044 | 3061 | ||
3045 | tp->cp_cmd = PCIMulRW | RxChkSum; | 3062 | tp->cp_cmd = RxChkSum; |
3046 | 3063 | ||
3047 | if ((sizeof(dma_addr_t) > 4) && | 3064 | if ((sizeof(dma_addr_t) > 4) && |
3048 | !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { | 3065 | !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) { |
@@ -3318,7 +3335,8 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp) | |||
3318 | /* Disable interrupts */ | 3335 | /* Disable interrupts */ |
3319 | rtl8169_irq_mask_and_ack(ioaddr); | 3336 | rtl8169_irq_mask_and_ack(ioaddr); |
3320 | 3337 | ||
3321 | if (tp->mac_version == RTL_GIGA_MAC_VER_28) { | 3338 | if (tp->mac_version == RTL_GIGA_MAC_VER_27 || |
3339 | tp->mac_version == RTL_GIGA_MAC_VER_28) { | ||
3322 | while (RTL_R8(TxPoll) & NPQ) | 3340 | while (RTL_R8(TxPoll) & NPQ) |
3323 | udelay(20); | 3341 | udelay(20); |
3324 | 3342 | ||
@@ -3847,8 +3865,7 @@ static void rtl_hw_start_8168(struct net_device *dev) | |||
3847 | Cxpl_dbg_sel | \ | 3865 | Cxpl_dbg_sel | \ |
3848 | ASF | \ | 3866 | ASF | \ |
3849 | PktCntrDisable | \ | 3867 | PktCntrDisable | \ |
3850 | PCIDAC | \ | 3868 | Mac_dbgo_sel) |
3851 | PCIMulRW) | ||
3852 | 3869 | ||
3853 | static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev) | 3870 | static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev) |
3854 | { | 3871 | { |
@@ -3878,8 +3895,6 @@ static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev) | |||
3878 | if ((cfg1 & LEDS0) && (cfg1 & LEDS1)) | 3895 | if ((cfg1 & LEDS0) && (cfg1 & LEDS1)) |
3879 | RTL_W8(Config1, cfg1 & ~LEDS0); | 3896 | RTL_W8(Config1, cfg1 & ~LEDS0); |
3880 | 3897 | ||
3881 | RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK); | ||
3882 | |||
3883 | rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1)); | 3898 | rtl_ephy_init(ioaddr, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1)); |
3884 | } | 3899 | } |
3885 | 3900 | ||
@@ -3891,8 +3906,6 @@ static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev) | |||
3891 | 3906 | ||
3892 | RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable); | 3907 | RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable); |
3893 | RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); | 3908 | RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); |
3894 | |||
3895 | RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R810X_CPCMD_QUIRK_MASK); | ||
3896 | } | 3909 | } |
3897 | 3910 | ||
3898 | static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev) | 3911 | static void rtl_hw_start_8102e_3(void __iomem *ioaddr, struct pci_dev *pdev) |
@@ -3918,6 +3931,8 @@ static void rtl_hw_start_8101(struct net_device *dev) | |||
3918 | } | 3931 | } |
3919 | } | 3932 | } |
3920 | 3933 | ||
3934 | RTL_W8(Cfg9346, Cfg9346_Unlock); | ||
3935 | |||
3921 | switch (tp->mac_version) { | 3936 | switch (tp->mac_version) { |
3922 | case RTL_GIGA_MAC_VER_07: | 3937 | case RTL_GIGA_MAC_VER_07: |
3923 | rtl_hw_start_8102e_1(ioaddr, pdev); | 3938 | rtl_hw_start_8102e_1(ioaddr, pdev); |
@@ -3932,14 +3947,13 @@ static void rtl_hw_start_8101(struct net_device *dev) | |||
3932 | break; | 3947 | break; |
3933 | } | 3948 | } |
3934 | 3949 | ||
3935 | RTL_W8(Cfg9346, Cfg9346_Unlock); | 3950 | RTL_W8(Cfg9346, Cfg9346_Lock); |
3936 | 3951 | ||
3937 | RTL_W8(MaxTxPacketSize, TxPacketMax); | 3952 | RTL_W8(MaxTxPacketSize, TxPacketMax); |
3938 | 3953 | ||
3939 | rtl_set_rx_max_size(ioaddr, rx_buf_sz); | 3954 | rtl_set_rx_max_size(ioaddr, rx_buf_sz); |
3940 | 3955 | ||
3941 | tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW; | 3956 | tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK; |
3942 | |||
3943 | RTL_W16(CPlusCmd, tp->cp_cmd); | 3957 | RTL_W16(CPlusCmd, tp->cp_cmd); |
3944 | 3958 | ||
3945 | RTL_W16(IntrMitigate, 0x0000); | 3959 | RTL_W16(IntrMitigate, 0x0000); |
@@ -3949,14 +3963,10 @@ static void rtl_hw_start_8101(struct net_device *dev) | |||
3949 | RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); | 3963 | RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); |
3950 | rtl_set_rx_tx_config_registers(tp); | 3964 | rtl_set_rx_tx_config_registers(tp); |
3951 | 3965 | ||
3952 | RTL_W8(Cfg9346, Cfg9346_Lock); | ||
3953 | |||
3954 | RTL_R8(IntrMask); | 3966 | RTL_R8(IntrMask); |
3955 | 3967 | ||
3956 | rtl_set_rx_mode(dev); | 3968 | rtl_set_rx_mode(dev); |
3957 | 3969 | ||
3958 | RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); | ||
3959 | |||
3960 | RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); | 3970 | RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000); |
3961 | 3971 | ||
3962 | RTL_W16(IntrMask, tp->intr_event); | 3972 | RTL_W16(IntrMask, tp->intr_event); |
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c index 0e8bb19ed60d..ca886d98bdc7 100644 --- a/drivers/net/sfc/ethtool.c +++ b/drivers/net/sfc/ethtool.c | |||
@@ -569,9 +569,14 @@ static void efx_ethtool_self_test(struct net_device *net_dev, | |||
569 | struct ethtool_test *test, u64 *data) | 569 | struct ethtool_test *test, u64 *data) |
570 | { | 570 | { |
571 | struct efx_nic *efx = netdev_priv(net_dev); | 571 | struct efx_nic *efx = netdev_priv(net_dev); |
572 | struct efx_self_tests efx_tests; | 572 | struct efx_self_tests *efx_tests; |
573 | int already_up; | 573 | int already_up; |
574 | int rc; | 574 | int rc = -ENOMEM; |
575 | |||
576 | efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL); | ||
577 | if (!efx_tests) | ||
578 | goto fail; | ||
579 | |||
575 | 580 | ||
576 | ASSERT_RTNL(); | 581 | ASSERT_RTNL(); |
577 | if (efx->state != STATE_RUNNING) { | 582 | if (efx->state != STATE_RUNNING) { |
@@ -589,13 +594,11 @@ static void efx_ethtool_self_test(struct net_device *net_dev, | |||
589 | if (rc) { | 594 | if (rc) { |
590 | netif_err(efx, drv, efx->net_dev, | 595 | netif_err(efx, drv, efx->net_dev, |
591 | "failed opening device.\n"); | 596 | "failed opening device.\n"); |
592 | goto fail2; | 597 | goto fail1; |
593 | } | 598 | } |
594 | } | 599 | } |
595 | 600 | ||
596 | memset(&efx_tests, 0, sizeof(efx_tests)); | 601 | rc = efx_selftest(efx, efx_tests, test->flags); |
597 | |||
598 | rc = efx_selftest(efx, &efx_tests, test->flags); | ||
599 | 602 | ||
600 | if (!already_up) | 603 | if (!already_up) |
601 | dev_close(efx->net_dev); | 604 | dev_close(efx->net_dev); |
@@ -604,10 +607,11 @@ static void efx_ethtool_self_test(struct net_device *net_dev, | |||
604 | rc == 0 ? "passed" : "failed", | 607 | rc == 0 ? "passed" : "failed", |
605 | (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); | 608 | (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); |
606 | 609 | ||
607 | fail2: | 610 | fail1: |
608 | fail1: | ||
609 | /* Fill ethtool results structures */ | 611 | /* Fill ethtool results structures */ |
610 | efx_ethtool_fill_self_tests(efx, &efx_tests, NULL, data); | 612 | efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data); |
613 | kfree(efx_tests); | ||
614 | fail: | ||
611 | if (rc) | 615 | if (rc) |
612 | test->flags |= ETH_TEST_FL_FAILED; | 616 | test->flags |= ETH_TEST_FL_FAILED; |
613 | } | 617 | } |
diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 42daf98ba736..35b28f42d208 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c | |||
@@ -3856,9 +3856,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, | |||
3856 | memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); | 3856 | memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); |
3857 | memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); | 3857 | memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); |
3858 | 3858 | ||
3859 | /* device is off until link detection */ | ||
3860 | netif_carrier_off(dev); | ||
3861 | |||
3862 | return dev; | 3859 | return dev; |
3863 | } | 3860 | } |
3864 | 3861 | ||
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c index 64bfdae5956f..d70bde95460b 100644 --- a/drivers/net/smsc911x.c +++ b/drivers/net/smsc911x.c | |||
@@ -1178,6 +1178,11 @@ static int smsc911x_open(struct net_device *dev) | |||
1178 | smsc911x_reg_write(pdata, HW_CFG, 0x00050000); | 1178 | smsc911x_reg_write(pdata, HW_CFG, 0x00050000); |
1179 | smsc911x_reg_write(pdata, AFC_CFG, 0x006E3740); | 1179 | smsc911x_reg_write(pdata, AFC_CFG, 0x006E3740); |
1180 | 1180 | ||
1181 | /* Increase the legal frame size of VLAN tagged frames to 1522 bytes */ | ||
1182 | spin_lock_irq(&pdata->mac_lock); | ||
1183 | smsc911x_mac_write(pdata, VLAN1, ETH_P_8021Q); | ||
1184 | spin_unlock_irq(&pdata->mac_lock); | ||
1185 | |||
1181 | /* Make sure EEPROM has finished loading before setting GPIO_CFG */ | 1186 | /* Make sure EEPROM has finished loading before setting GPIO_CFG */ |
1182 | timeout = 50; | 1187 | timeout = 50; |
1183 | while ((smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_) && | 1188 | while ((smsc911x_reg_read(pdata, E2P_CMD) & E2P_CMD_EPC_BUSY_) && |
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index 02b622e3b9fb..5002f5be47be 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c | |||
@@ -651,6 +651,10 @@ static const struct usb_device_id products[] = { | |||
651 | .driver_info = (unsigned long)&dm9601_info, | 651 | .driver_info = (unsigned long)&dm9601_info, |
652 | }, | 652 | }, |
653 | { | 653 | { |
654 | USB_DEVICE(0x0fe6, 0x9700), /* DM9601 USB to Fast Ethernet Adapter */ | ||
655 | .driver_info = (unsigned long)&dm9601_info, | ||
656 | }, | ||
657 | { | ||
654 | USB_DEVICE(0x0a46, 0x9000), /* DM9000E */ | 658 | USB_DEVICE(0x0a46, 0x9000), /* DM9000E */ |
655 | .driver_info = (unsigned long)&dm9601_info, | 659 | .driver_info = (unsigned long)&dm9601_info, |
656 | }, | 660 | }, |
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c index 78c26fdccad1..62ce2f4e8605 100644 --- a/drivers/net/wireless/ath/ath5k/phy.c +++ b/drivers/net/wireless/ath/ath5k/phy.c | |||
@@ -282,6 +282,34 @@ int ath5k_hw_phy_disable(struct ath5k_hw *ah) | |||
282 | return 0; | 282 | return 0; |
283 | } | 283 | } |
284 | 284 | ||
285 | /* | ||
286 | * Wait for synth to settle | ||
287 | */ | ||
288 | static void ath5k_hw_wait_for_synth(struct ath5k_hw *ah, | ||
289 | struct ieee80211_channel *channel) | ||
290 | { | ||
291 | /* | ||
292 | * On 5211+ read activation -> rx delay | ||
293 | * and use it (100ns steps). | ||
294 | */ | ||
295 | if (ah->ah_version != AR5K_AR5210) { | ||
296 | u32 delay; | ||
297 | delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) & | ||
298 | AR5K_PHY_RX_DELAY_M; | ||
299 | delay = (channel->hw_value & CHANNEL_CCK) ? | ||
300 | ((delay << 2) / 22) : (delay / 10); | ||
301 | if (ah->ah_bwmode == AR5K_BWMODE_10MHZ) | ||
302 | delay = delay << 1; | ||
303 | if (ah->ah_bwmode == AR5K_BWMODE_5MHZ) | ||
304 | delay = delay << 2; | ||
305 | /* XXX: /2 on turbo ? Let's be safe | ||
306 | * for now */ | ||
307 | udelay(100 + delay); | ||
308 | } else { | ||
309 | mdelay(1); | ||
310 | } | ||
311 | } | ||
312 | |||
285 | 313 | ||
286 | /**********************\ | 314 | /**********************\ |
287 | * RF Gain optimization * | 315 | * RF Gain optimization * |
@@ -1253,6 +1281,7 @@ static int ath5k_hw_channel(struct ath5k_hw *ah, | |||
1253 | case AR5K_RF5111: | 1281 | case AR5K_RF5111: |
1254 | ret = ath5k_hw_rf5111_channel(ah, channel); | 1282 | ret = ath5k_hw_rf5111_channel(ah, channel); |
1255 | break; | 1283 | break; |
1284 | case AR5K_RF2317: | ||
1256 | case AR5K_RF2425: | 1285 | case AR5K_RF2425: |
1257 | ret = ath5k_hw_rf2425_channel(ah, channel); | 1286 | ret = ath5k_hw_rf2425_channel(ah, channel); |
1258 | break; | 1287 | break; |
@@ -3237,6 +3266,13 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, | |||
3237 | /* Failed */ | 3266 | /* Failed */ |
3238 | if (i >= 100) | 3267 | if (i >= 100) |
3239 | return -EIO; | 3268 | return -EIO; |
3269 | |||
3270 | /* Set channel and wait for synth */ | ||
3271 | ret = ath5k_hw_channel(ah, channel); | ||
3272 | if (ret) | ||
3273 | return ret; | ||
3274 | |||
3275 | ath5k_hw_wait_for_synth(ah, channel); | ||
3240 | } | 3276 | } |
3241 | 3277 | ||
3242 | /* | 3278 | /* |
@@ -3251,13 +3287,53 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, | |||
3251 | if (ret) | 3287 | if (ret) |
3252 | return ret; | 3288 | return ret; |
3253 | 3289 | ||
3290 | /* Write OFDM timings on 5212*/ | ||
3291 | if (ah->ah_version == AR5K_AR5212 && | ||
3292 | channel->hw_value & CHANNEL_OFDM) { | ||
3293 | |||
3294 | ret = ath5k_hw_write_ofdm_timings(ah, channel); | ||
3295 | if (ret) | ||
3296 | return ret; | ||
3297 | |||
3298 | /* Spur info is available only from EEPROM versions | ||
3299 | * greater than 5.3, but the EEPROM routines will use | ||
3300 | * static values for older versions */ | ||
3301 | if (ah->ah_mac_srev >= AR5K_SREV_AR5424) | ||
3302 | ath5k_hw_set_spur_mitigation_filter(ah, | ||
3303 | channel); | ||
3304 | } | ||
3305 | |||
3306 | /* If we used fast channel switching | ||
3307 | * we are done, release RF bus and | ||
3308 | * fire up NF calibration. | ||
3309 | * | ||
3310 | * Note: Only NF calibration due to | ||
3311 | * channel change, not AGC calibration | ||
3312 | * since AGC is still running ! | ||
3313 | */ | ||
3314 | if (fast) { | ||
3315 | /* | ||
3316 | * Release RF Bus grant | ||
3317 | */ | ||
3318 | AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ, | ||
3319 | AR5K_PHY_RFBUS_REQ_REQUEST); | ||
3320 | |||
3321 | /* | ||
3322 | * Start NF calibration | ||
3323 | */ | ||
3324 | AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL, | ||
3325 | AR5K_PHY_AGCCTL_NF); | ||
3326 | |||
3327 | return ret; | ||
3328 | } | ||
3329 | |||
3254 | /* | 3330 | /* |
3255 | * For 5210 we do all initialization using | 3331 | * For 5210 we do all initialization using |
3256 | * initvals, so we don't have to modify | 3332 | * initvals, so we don't have to modify |
3257 | * any settings (5210 also only supports | 3333 | * any settings (5210 also only supports |
3258 | * a/aturbo modes) | 3334 | * a/aturbo modes) |
3259 | */ | 3335 | */ |
3260 | if ((ah->ah_version != AR5K_AR5210) && !fast) { | 3336 | if (ah->ah_version != AR5K_AR5210) { |
3261 | 3337 | ||
3262 | /* | 3338 | /* |
3263 | * Write initial RF gain settings | 3339 | * Write initial RF gain settings |
@@ -3276,22 +3352,6 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, | |||
3276 | if (ret) | 3352 | if (ret) |
3277 | return ret; | 3353 | return ret; |
3278 | 3354 | ||
3279 | /* Write OFDM timings on 5212*/ | ||
3280 | if (ah->ah_version == AR5K_AR5212 && | ||
3281 | channel->hw_value & CHANNEL_OFDM) { | ||
3282 | |||
3283 | ret = ath5k_hw_write_ofdm_timings(ah, channel); | ||
3284 | if (ret) | ||
3285 | return ret; | ||
3286 | |||
3287 | /* Spur info is available only from EEPROM versions | ||
3288 | * greater than 5.3, but the EEPROM routines will use | ||
3289 | * static values for older versions */ | ||
3290 | if (ah->ah_mac_srev >= AR5K_SREV_AR5424) | ||
3291 | ath5k_hw_set_spur_mitigation_filter(ah, | ||
3292 | channel); | ||
3293 | } | ||
3294 | |||
3295 | /*Enable/disable 802.11b mode on 5111 | 3355 | /*Enable/disable 802.11b mode on 5111 |
3296 | (enable 2111 frequency converter + CCK)*/ | 3356 | (enable 2111 frequency converter + CCK)*/ |
3297 | if (ah->ah_radio == AR5K_RF5111) { | 3357 | if (ah->ah_radio == AR5K_RF5111) { |
@@ -3322,47 +3382,20 @@ int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, | |||
3322 | */ | 3382 | */ |
3323 | ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT); | 3383 | ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT); |
3324 | 3384 | ||
3385 | ath5k_hw_wait_for_synth(ah, channel); | ||
3386 | |||
3325 | /* | 3387 | /* |
3326 | * On 5211+ read activation -> rx delay | 3388 | * Perform ADC test to see if baseband is ready |
3327 | * and use it. | 3389 | * Set tx hold and check adc test register |
3328 | */ | 3390 | */ |
3329 | if (ah->ah_version != AR5K_AR5210) { | 3391 | phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1); |
3330 | u32 delay; | 3392 | ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1); |
3331 | delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) & | 3393 | for (i = 0; i <= 20; i++) { |
3332 | AR5K_PHY_RX_DELAY_M; | 3394 | if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10)) |
3333 | delay = (channel->hw_value & CHANNEL_CCK) ? | 3395 | break; |
3334 | ((delay << 2) / 22) : (delay / 10); | 3396 | udelay(200); |
3335 | if (ah->ah_bwmode == AR5K_BWMODE_10MHZ) | ||
3336 | delay = delay << 1; | ||
3337 | if (ah->ah_bwmode == AR5K_BWMODE_5MHZ) | ||
3338 | delay = delay << 2; | ||
3339 | /* XXX: /2 on turbo ? Let's be safe | ||
3340 | * for now */ | ||
3341 | udelay(100 + delay); | ||
3342 | } else { | ||
3343 | mdelay(1); | ||
3344 | } | ||
3345 | |||
3346 | if (fast) | ||
3347 | /* | ||
3348 | * Release RF Bus grant | ||
3349 | */ | ||
3350 | AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ, | ||
3351 | AR5K_PHY_RFBUS_REQ_REQUEST); | ||
3352 | else { | ||
3353 | /* | ||
3354 | * Perform ADC test to see if baseband is ready | ||
3355 | * Set tx hold and check adc test register | ||
3356 | */ | ||
3357 | phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1); | ||
3358 | ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1); | ||
3359 | for (i = 0; i <= 20; i++) { | ||
3360 | if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10)) | ||
3361 | break; | ||
3362 | udelay(200); | ||
3363 | } | ||
3364 | ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1); | ||
3365 | } | 3397 | } |
3398 | ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1); | ||
3366 | 3399 | ||
3367 | /* | 3400 | /* |
3368 | * Start automatic gain control calibration | 3401 | * Start automatic gain control calibration |
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 23838e37d45f..1a7fa6ea4cf5 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <linux/device.h> | 21 | #include <linux/device.h> |
22 | #include <linux/leds.h> | 22 | #include <linux/leds.h> |
23 | #include <linux/completion.h> | 23 | #include <linux/completion.h> |
24 | #include <linux/pm_qos_params.h> | ||
25 | 24 | ||
26 | #include "debug.h" | 25 | #include "debug.h" |
27 | #include "common.h" | 26 | #include "common.h" |
@@ -57,8 +56,6 @@ struct ath_node; | |||
57 | 56 | ||
58 | #define A_MAX(a, b) ((a) > (b) ? (a) : (b)) | 57 | #define A_MAX(a, b) ((a) > (b) ? (a) : (b)) |
59 | 58 | ||
60 | #define ATH9K_PM_QOS_DEFAULT_VALUE 55 | ||
61 | |||
62 | #define TSF_TO_TU(_h,_l) \ | 59 | #define TSF_TO_TU(_h,_l) \ |
63 | ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10)) | 60 | ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10)) |
64 | 61 | ||
@@ -633,8 +630,6 @@ struct ath_softc { | |||
633 | struct ath_descdma txsdma; | 630 | struct ath_descdma txsdma; |
634 | 631 | ||
635 | struct ath_ant_comb ant_comb; | 632 | struct ath_ant_comb ant_comb; |
636 | |||
637 | struct pm_qos_request_list pm_qos_req; | ||
638 | }; | 633 | }; |
639 | 634 | ||
640 | struct ath_wiphy { | 635 | struct ath_wiphy { |
@@ -666,7 +661,6 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz) | |||
666 | extern struct ieee80211_ops ath9k_ops; | 661 | extern struct ieee80211_ops ath9k_ops; |
667 | extern int ath9k_modparam_nohwcrypt; | 662 | extern int ath9k_modparam_nohwcrypt; |
668 | extern int led_blink; | 663 | extern int led_blink; |
669 | extern int ath9k_pm_qos_value; | ||
670 | extern bool is_ath9k_unloaded; | 664 | extern bool is_ath9k_unloaded; |
671 | 665 | ||
672 | irqreturn_t ath_isr(int irq, void *dev); | 666 | irqreturn_t ath_isr(int irq, void *dev); |
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 5ab3084eb9cb..07b1633b7f3f 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c | |||
@@ -219,8 +219,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev) | |||
219 | struct tx_buf *tx_buf = NULL; | 219 | struct tx_buf *tx_buf = NULL; |
220 | struct sk_buff *nskb = NULL; | 220 | struct sk_buff *nskb = NULL; |
221 | int ret = 0, i; | 221 | int ret = 0, i; |
222 | u16 *hdr, tx_skb_cnt = 0; | 222 | u16 tx_skb_cnt = 0; |
223 | u8 *buf; | 223 | u8 *buf; |
224 | __le16 *hdr; | ||
224 | 225 | ||
225 | if (hif_dev->tx.tx_skb_cnt == 0) | 226 | if (hif_dev->tx.tx_skb_cnt == 0) |
226 | return 0; | 227 | return 0; |
@@ -245,9 +246,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev) | |||
245 | 246 | ||
246 | buf = tx_buf->buf; | 247 | buf = tx_buf->buf; |
247 | buf += tx_buf->offset; | 248 | buf += tx_buf->offset; |
248 | hdr = (u16 *)buf; | 249 | hdr = (__le16 *)buf; |
249 | *hdr++ = nskb->len; | 250 | *hdr++ = cpu_to_le16(nskb->len); |
250 | *hdr++ = ATH_USB_TX_STREAM_MODE_TAG; | 251 | *hdr++ = cpu_to_le16(ATH_USB_TX_STREAM_MODE_TAG); |
251 | buf += 4; | 252 | buf += 4; |
252 | memcpy(buf, nskb->data, nskb->len); | 253 | memcpy(buf, nskb->data, nskb->len); |
253 | tx_buf->len = nskb->len + 4; | 254 | tx_buf->len = nskb->len + 4; |
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 087a6a95edd5..a033d01bf8a0 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c | |||
@@ -41,10 +41,6 @@ static int ath9k_btcoex_enable; | |||
41 | module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444); | 41 | module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444); |
42 | MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence"); | 42 | MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence"); |
43 | 43 | ||
44 | int ath9k_pm_qos_value = ATH9K_PM_QOS_DEFAULT_VALUE; | ||
45 | module_param_named(pmqos, ath9k_pm_qos_value, int, S_IRUSR | S_IRGRP | S_IROTH); | ||
46 | MODULE_PARM_DESC(pmqos, "User specified PM-QOS value"); | ||
47 | |||
48 | bool is_ath9k_unloaded; | 44 | bool is_ath9k_unloaded; |
49 | /* We use the hw_value as an index into our private channel structure */ | 45 | /* We use the hw_value as an index into our private channel structure */ |
50 | 46 | ||
@@ -762,9 +758,6 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid, | |||
762 | ath_init_leds(sc); | 758 | ath_init_leds(sc); |
763 | ath_start_rfkill_poll(sc); | 759 | ath_start_rfkill_poll(sc); |
764 | 760 | ||
765 | pm_qos_add_request(&sc->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, | ||
766 | PM_QOS_DEFAULT_VALUE); | ||
767 | |||
768 | return 0; | 761 | return 0; |
769 | 762 | ||
770 | error_world: | 763 | error_world: |
@@ -831,7 +824,6 @@ void ath9k_deinit_device(struct ath_softc *sc) | |||
831 | } | 824 | } |
832 | 825 | ||
833 | ieee80211_unregister_hw(hw); | 826 | ieee80211_unregister_hw(hw); |
834 | pm_qos_remove_request(&sc->pm_qos_req); | ||
835 | ath_rx_cleanup(sc); | 827 | ath_rx_cleanup(sc); |
836 | ath_tx_cleanup(sc); | 828 | ath_tx_cleanup(sc); |
837 | ath9k_deinit_softc(sc); | 829 | ath9k_deinit_softc(sc); |
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c index 180170d3ce25..2915b11edefb 100644 --- a/drivers/net/wireless/ath/ath9k/mac.c +++ b/drivers/net/wireless/ath/ath9k/mac.c | |||
@@ -885,7 +885,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints) | |||
885 | struct ath_common *common = ath9k_hw_common(ah); | 885 | struct ath_common *common = ath9k_hw_common(ah); |
886 | 886 | ||
887 | if (!(ints & ATH9K_INT_GLOBAL)) | 887 | if (!(ints & ATH9K_INT_GLOBAL)) |
888 | ath9k_hw_enable_interrupts(ah); | 888 | ath9k_hw_disable_interrupts(ah); |
889 | 889 | ||
890 | ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints); | 890 | ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints); |
891 | 891 | ||
@@ -963,7 +963,8 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints) | |||
963 | REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); | 963 | REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); |
964 | } | 964 | } |
965 | 965 | ||
966 | ath9k_hw_enable_interrupts(ah); | 966 | if (ints & ATH9K_INT_GLOBAL) |
967 | ath9k_hw_enable_interrupts(ah); | ||
967 | 968 | ||
968 | return; | 969 | return; |
969 | } | 970 | } |
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index da5c64597c1f..a09d15f7aa6e 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c | |||
@@ -1173,12 +1173,6 @@ static int ath9k_start(struct ieee80211_hw *hw) | |||
1173 | ath9k_btcoex_timer_resume(sc); | 1173 | ath9k_btcoex_timer_resume(sc); |
1174 | } | 1174 | } |
1175 | 1175 | ||
1176 | /* User has the option to provide pm-qos value as a module | ||
1177 | * parameter rather than using the default value of | ||
1178 | * 'ATH9K_PM_QOS_DEFAULT_VALUE'. | ||
1179 | */ | ||
1180 | pm_qos_update_request(&sc->pm_qos_req, ath9k_pm_qos_value); | ||
1181 | |||
1182 | if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en) | 1176 | if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en) |
1183 | common->bus_ops->extn_synch_en(common); | 1177 | common->bus_ops->extn_synch_en(common); |
1184 | 1178 | ||
@@ -1345,8 +1339,6 @@ static void ath9k_stop(struct ieee80211_hw *hw) | |||
1345 | 1339 | ||
1346 | sc->sc_flags |= SC_OP_INVALID; | 1340 | sc->sc_flags |= SC_OP_INVALID; |
1347 | 1341 | ||
1348 | pm_qos_update_request(&sc->pm_qos_req, PM_QOS_DEFAULT_VALUE); | ||
1349 | |||
1350 | mutex_unlock(&sc->mutex); | 1342 | mutex_unlock(&sc->mutex); |
1351 | 1343 | ||
1352 | ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n"); | 1344 | ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n"); |
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c index 537732e5964f..f82c400be288 100644 --- a/drivers/net/wireless/ath/carl9170/usb.c +++ b/drivers/net/wireless/ath/carl9170/usb.c | |||
@@ -118,6 +118,8 @@ static struct usb_device_id carl9170_usb_ids[] = { | |||
118 | { USB_DEVICE(0x057c, 0x8402) }, | 118 | { USB_DEVICE(0x057c, 0x8402) }, |
119 | /* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */ | 119 | /* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */ |
120 | { USB_DEVICE(0x1668, 0x1200) }, | 120 | { USB_DEVICE(0x1668, 0x1200) }, |
121 | /* Airlive X.USB a/b/g/n */ | ||
122 | { USB_DEVICE(0x1b75, 0x9170) }, | ||
121 | 123 | ||
122 | /* terminate */ | 124 | /* terminate */ |
123 | {} | 125 | {} |
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index 79ab0a6b1386..537fb8c84e3a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c | |||
@@ -51,7 +51,7 @@ | |||
51 | #include "iwl-agn-debugfs.h" | 51 | #include "iwl-agn-debugfs.h" |
52 | 52 | ||
53 | /* Highest firmware API version supported */ | 53 | /* Highest firmware API version supported */ |
54 | #define IWL5000_UCODE_API_MAX 2 | 54 | #define IWL5000_UCODE_API_MAX 5 |
55 | #define IWL5150_UCODE_API_MAX 2 | 55 | #define IWL5150_UCODE_API_MAX 2 |
56 | 56 | ||
57 | /* Lowest firmware API version supported */ | 57 | /* Lowest firmware API version supported */ |
diff --git a/drivers/net/wireless/p54/p54pci.c b/drivers/net/wireless/p54/p54pci.c index 1eacba4daa5b..0494d7b102d4 100644 --- a/drivers/net/wireless/p54/p54pci.c +++ b/drivers/net/wireless/p54/p54pci.c | |||
@@ -199,6 +199,7 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index, | |||
199 | while (i != idx) { | 199 | while (i != idx) { |
200 | u16 len; | 200 | u16 len; |
201 | struct sk_buff *skb; | 201 | struct sk_buff *skb; |
202 | dma_addr_t dma_addr; | ||
202 | desc = &ring[i]; | 203 | desc = &ring[i]; |
203 | len = le16_to_cpu(desc->len); | 204 | len = le16_to_cpu(desc->len); |
204 | skb = rx_buf[i]; | 205 | skb = rx_buf[i]; |
@@ -216,17 +217,20 @@ static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index, | |||
216 | 217 | ||
217 | len = priv->common.rx_mtu; | 218 | len = priv->common.rx_mtu; |
218 | } | 219 | } |
220 | dma_addr = le32_to_cpu(desc->host_addr); | ||
221 | pci_dma_sync_single_for_cpu(priv->pdev, dma_addr, | ||
222 | priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); | ||
219 | skb_put(skb, len); | 223 | skb_put(skb, len); |
220 | 224 | ||
221 | if (p54_rx(dev, skb)) { | 225 | if (p54_rx(dev, skb)) { |
222 | pci_unmap_single(priv->pdev, | 226 | pci_unmap_single(priv->pdev, dma_addr, |
223 | le32_to_cpu(desc->host_addr), | 227 | priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); |
224 | priv->common.rx_mtu + 32, | ||
225 | PCI_DMA_FROMDEVICE); | ||
226 | rx_buf[i] = NULL; | 228 | rx_buf[i] = NULL; |
227 | desc->host_addr = 0; | 229 | desc->host_addr = cpu_to_le32(0); |
228 | } else { | 230 | } else { |
229 | skb_trim(skb, 0); | 231 | skb_trim(skb, 0); |
232 | pci_dma_sync_single_for_device(priv->pdev, dma_addr, | ||
233 | priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE); | ||
230 | desc->len = cpu_to_le16(priv->common.rx_mtu + 32); | 234 | desc->len = cpu_to_le16(priv->common.rx_mtu + 32); |
231 | } | 235 | } |
232 | 236 | ||
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c index 21713a7638c4..9b344a921e74 100644 --- a/drivers/net/wireless/p54/p54usb.c +++ b/drivers/net/wireless/p54/p54usb.c | |||
@@ -98,6 +98,7 @@ static struct usb_device_id p54u_table[] __devinitdata = { | |||
98 | {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */ | 98 | {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */ |
99 | {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */ | 99 | {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */ |
100 | {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */ | 100 | {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */ |
101 | {USB_DEVICE(0x1740, 0x1000)}, /* Senao NUB-350 */ | ||
101 | {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */ | 102 | {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */ |
102 | {USB_DEVICE(0x2001, 0x3705)}, /* D-Link DWL-G120 rev C1 */ | 103 | {USB_DEVICE(0x2001, 0x3705)}, /* D-Link DWL-G120 rev C1 */ |
103 | {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */ | 104 | {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */ |
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index 848cc2cce247..518542b4bf9e 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c | |||
@@ -2597,6 +2597,9 @@ static int rndis_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, | |||
2597 | __le32 mode; | 2597 | __le32 mode; |
2598 | int ret; | 2598 | int ret; |
2599 | 2599 | ||
2600 | if (priv->device_type != RNDIS_BCM4320B) | ||
2601 | return -ENOTSUPP; | ||
2602 | |||
2600 | netdev_dbg(usbdev->net, "%s(): %s, %d\n", __func__, | 2603 | netdev_dbg(usbdev->net, "%s(): %s, %d\n", __func__, |
2601 | enabled ? "enabled" : "disabled", | 2604 | enabled ? "enabled" : "disabled", |
2602 | timeout); | 2605 | timeout); |
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c index aa97971a38af..3b3f1e45ab3e 100644 --- a/drivers/net/wireless/rt2x00/rt2800pci.c +++ b/drivers/net/wireless/rt2x00/rt2800pci.c | |||
@@ -652,6 +652,12 @@ static void rt2800pci_fill_rxdone(struct queue_entry *entry, | |||
652 | */ | 652 | */ |
653 | rxdesc->flags |= RX_FLAG_IV_STRIPPED; | 653 | rxdesc->flags |= RX_FLAG_IV_STRIPPED; |
654 | 654 | ||
655 | /* | ||
656 | * The hardware has already checked the Michael Mic and has | ||
657 | * stripped it from the frame. Signal this to mac80211. | ||
658 | */ | ||
659 | rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; | ||
660 | |||
655 | if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) | 661 | if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) |
656 | rxdesc->flags |= RX_FLAG_DECRYPTED; | 662 | rxdesc->flags |= RX_FLAG_DECRYPTED; |
657 | else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) | 663 | else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) |
@@ -1065,6 +1071,8 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = { | |||
1065 | { PCI_DEVICE(0x1814, 0x3390), PCI_DEVICE_DATA(&rt2800pci_ops) }, | 1071 | { PCI_DEVICE(0x1814, 0x3390), PCI_DEVICE_DATA(&rt2800pci_ops) }, |
1066 | #endif | 1072 | #endif |
1067 | #ifdef CONFIG_RT2800PCI_RT35XX | 1073 | #ifdef CONFIG_RT2800PCI_RT35XX |
1074 | { PCI_DEVICE(0x1432, 0x7711), PCI_DEVICE_DATA(&rt2800pci_ops) }, | ||
1075 | { PCI_DEVICE(0x1432, 0x7722), PCI_DEVICE_DATA(&rt2800pci_ops) }, | ||
1068 | { PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) }, | 1076 | { PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) }, |
1069 | { PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) }, | 1077 | { PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) }, |
1070 | { PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) }, | 1078 | { PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) }, |
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c index b97a4a54ff4c..197a36c05fda 100644 --- a/drivers/net/wireless/rt2x00/rt2800usb.c +++ b/drivers/net/wireless/rt2x00/rt2800usb.c | |||
@@ -486,6 +486,12 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry, | |||
486 | */ | 486 | */ |
487 | rxdesc->flags |= RX_FLAG_IV_STRIPPED; | 487 | rxdesc->flags |= RX_FLAG_IV_STRIPPED; |
488 | 488 | ||
489 | /* | ||
490 | * The hardware has already checked the Michael Mic and has | ||
491 | * stripped it from the frame. Signal this to mac80211. | ||
492 | */ | ||
493 | rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; | ||
494 | |||
489 | if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) | 495 | if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) |
490 | rxdesc->flags |= RX_FLAG_DECRYPTED; | 496 | rxdesc->flags |= RX_FLAG_DECRYPTED; |
491 | else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) | 497 | else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) |
diff --git a/drivers/nfc/Kconfig b/drivers/nfc/Kconfig index ffedfd492754..ea1580085347 100644 --- a/drivers/nfc/Kconfig +++ b/drivers/nfc/Kconfig | |||
@@ -3,7 +3,7 @@ | |||
3 | # | 3 | # |
4 | 4 | ||
5 | menuconfig NFC_DEVICES | 5 | menuconfig NFC_DEVICES |
6 | bool "NFC devices" | 6 | bool "Near Field Communication (NFC) devices" |
7 | default n | 7 | default n |
8 | ---help--- | 8 | ---help--- |
9 | You'll have to say Y if your computer contains an NFC device that | 9 | You'll have to say Y if your computer contains an NFC device that |
diff --git a/drivers/nfc/pn544.c b/drivers/nfc/pn544.c index bae647264dd6..724f65d8f9e4 100644 --- a/drivers/nfc/pn544.c +++ b/drivers/nfc/pn544.c | |||
@@ -60,7 +60,7 @@ enum pn544_irq { | |||
60 | struct pn544_info { | 60 | struct pn544_info { |
61 | struct miscdevice miscdev; | 61 | struct miscdevice miscdev; |
62 | struct i2c_client *i2c_dev; | 62 | struct i2c_client *i2c_dev; |
63 | struct regulator_bulk_data regs[2]; | 63 | struct regulator_bulk_data regs[3]; |
64 | 64 | ||
65 | enum pn544_state state; | 65 | enum pn544_state state; |
66 | wait_queue_head_t read_wait; | 66 | wait_queue_head_t read_wait; |
@@ -74,6 +74,7 @@ struct pn544_info { | |||
74 | 74 | ||
75 | static const char reg_vdd_io[] = "Vdd_IO"; | 75 | static const char reg_vdd_io[] = "Vdd_IO"; |
76 | static const char reg_vbat[] = "VBat"; | 76 | static const char reg_vbat[] = "VBat"; |
77 | static const char reg_vsim[] = "VSim"; | ||
77 | 78 | ||
78 | /* sysfs interface */ | 79 | /* sysfs interface */ |
79 | static ssize_t pn544_test(struct device *dev, | 80 | static ssize_t pn544_test(struct device *dev, |
@@ -740,6 +741,7 @@ static int __devinit pn544_probe(struct i2c_client *client, | |||
740 | 741 | ||
741 | info->regs[0].supply = reg_vdd_io; | 742 | info->regs[0].supply = reg_vdd_io; |
742 | info->regs[1].supply = reg_vbat; | 743 | info->regs[1].supply = reg_vbat; |
744 | info->regs[2].supply = reg_vsim; | ||
743 | r = regulator_bulk_get(&client->dev, ARRAY_SIZE(info->regs), | 745 | r = regulator_bulk_get(&client->dev, ARRAY_SIZE(info->regs), |
744 | info->regs); | 746 | info->regs); |
745 | if (r < 0) | 747 | if (r < 0) |
diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c index 28295d0a50f6..4d87b5dc9284 100644 --- a/drivers/of/pdt.c +++ b/drivers/of/pdt.c | |||
@@ -36,19 +36,55 @@ unsigned int of_pdt_unique_id __initdata; | |||
36 | (p)->unique_id = of_pdt_unique_id++; \ | 36 | (p)->unique_id = of_pdt_unique_id++; \ |
37 | } while (0) | 37 | } while (0) |
38 | 38 | ||
39 | static inline const char *of_pdt_node_name(struct device_node *dp) | 39 | static char * __init of_pdt_build_full_name(struct device_node *dp) |
40 | { | 40 | { |
41 | return dp->path_component_name; | 41 | int len, ourlen, plen; |
42 | char *n; | ||
43 | |||
44 | dp->path_component_name = build_path_component(dp); | ||
45 | |||
46 | plen = strlen(dp->parent->full_name); | ||
47 | ourlen = strlen(dp->path_component_name); | ||
48 | len = ourlen + plen + 2; | ||
49 | |||
50 | n = prom_early_alloc(len); | ||
51 | strcpy(n, dp->parent->full_name); | ||
52 | if (!of_node_is_root(dp->parent)) { | ||
53 | strcpy(n + plen, "/"); | ||
54 | plen++; | ||
55 | } | ||
56 | strcpy(n + plen, dp->path_component_name); | ||
57 | |||
58 | return n; | ||
42 | } | 59 | } |
43 | 60 | ||
44 | #else | 61 | #else /* CONFIG_SPARC */ |
45 | 62 | ||
46 | static inline void of_pdt_incr_unique_id(void *p) { } | 63 | static inline void of_pdt_incr_unique_id(void *p) { } |
47 | static inline void irq_trans_init(struct device_node *dp) { } | 64 | static inline void irq_trans_init(struct device_node *dp) { } |
48 | 65 | ||
49 | static inline const char *of_pdt_node_name(struct device_node *dp) | 66 | static char * __init of_pdt_build_full_name(struct device_node *dp) |
50 | { | 67 | { |
51 | return dp->name; | 68 | static int failsafe_id = 0; /* for generating unique names on failure */ |
69 | char *buf; | ||
70 | int len; | ||
71 | |||
72 | if (of_pdt_prom_ops->pkg2path(dp->phandle, NULL, 0, &len)) | ||
73 | goto failsafe; | ||
74 | |||
75 | buf = prom_early_alloc(len + 1); | ||
76 | if (of_pdt_prom_ops->pkg2path(dp->phandle, buf, len, &len)) | ||
77 | goto failsafe; | ||
78 | return buf; | ||
79 | |||
80 | failsafe: | ||
81 | buf = prom_early_alloc(strlen(dp->parent->full_name) + | ||
82 | strlen(dp->name) + 16); | ||
83 | sprintf(buf, "%s/%s@unknown%i", | ||
84 | of_node_is_root(dp->parent) ? "" : dp->parent->full_name, | ||
85 | dp->name, failsafe_id++); | ||
86 | pr_err("%s: pkg2path failed; assigning %s\n", __func__, buf); | ||
87 | return buf; | ||
52 | } | 88 | } |
53 | 89 | ||
54 | #endif /* !CONFIG_SPARC */ | 90 | #endif /* !CONFIG_SPARC */ |
@@ -132,47 +168,6 @@ static char * __init of_pdt_get_one_property(phandle node, const char *name) | |||
132 | return buf; | 168 | return buf; |
133 | } | 169 | } |
134 | 170 | ||
135 | static char * __init of_pdt_try_pkg2path(phandle node) | ||
136 | { | ||
137 | char *res, *buf = NULL; | ||
138 | int len; | ||
139 | |||
140 | if (!of_pdt_prom_ops->pkg2path) | ||
141 | return NULL; | ||
142 | |||
143 | if (of_pdt_prom_ops->pkg2path(node, buf, 0, &len)) | ||
144 | return NULL; | ||
145 | buf = prom_early_alloc(len + 1); | ||
146 | if (of_pdt_prom_ops->pkg2path(node, buf, len, &len)) { | ||
147 | pr_err("%s: package-to-path failed\n", __func__); | ||
148 | return NULL; | ||
149 | } | ||
150 | |||
151 | res = strrchr(buf, '/'); | ||
152 | if (!res) { | ||
153 | pr_err("%s: couldn't find / in %s\n", __func__, buf); | ||
154 | return NULL; | ||
155 | } | ||
156 | return res+1; | ||
157 | } | ||
158 | |||
159 | /* | ||
160 | * When fetching the node's name, first try using package-to-path; if | ||
161 | * that fails (either because the arch hasn't supplied a PROM callback, | ||
162 | * or some other random failure), fall back to just looking at the node's | ||
163 | * 'name' property. | ||
164 | */ | ||
165 | static char * __init of_pdt_build_name(phandle node) | ||
166 | { | ||
167 | char *buf; | ||
168 | |||
169 | buf = of_pdt_try_pkg2path(node); | ||
170 | if (!buf) | ||
171 | buf = of_pdt_get_one_property(node, "name"); | ||
172 | |||
173 | return buf; | ||
174 | } | ||
175 | |||
176 | static struct device_node * __init of_pdt_create_node(phandle node, | 171 | static struct device_node * __init of_pdt_create_node(phandle node, |
177 | struct device_node *parent) | 172 | struct device_node *parent) |
178 | { | 173 | { |
@@ -187,7 +182,7 @@ static struct device_node * __init of_pdt_create_node(phandle node, | |||
187 | 182 | ||
188 | kref_init(&dp->kref); | 183 | kref_init(&dp->kref); |
189 | 184 | ||
190 | dp->name = of_pdt_build_name(node); | 185 | dp->name = of_pdt_get_one_property(node, "name"); |
191 | dp->type = of_pdt_get_one_property(node, "device_type"); | 186 | dp->type = of_pdt_get_one_property(node, "device_type"); |
192 | dp->phandle = node; | 187 | dp->phandle = node; |
193 | 188 | ||
@@ -198,26 +193,6 @@ static struct device_node * __init of_pdt_create_node(phandle node, | |||
198 | return dp; | 193 | return dp; |
199 | } | 194 | } |
200 | 195 | ||
201 | static char * __init of_pdt_build_full_name(struct device_node *dp) | ||
202 | { | ||
203 | int len, ourlen, plen; | ||
204 | char *n; | ||
205 | |||
206 | plen = strlen(dp->parent->full_name); | ||
207 | ourlen = strlen(of_pdt_node_name(dp)); | ||
208 | len = ourlen + plen + 2; | ||
209 | |||
210 | n = prom_early_alloc(len); | ||
211 | strcpy(n, dp->parent->full_name); | ||
212 | if (!of_node_is_root(dp->parent)) { | ||
213 | strcpy(n + plen, "/"); | ||
214 | plen++; | ||
215 | } | ||
216 | strcpy(n + plen, of_pdt_node_name(dp)); | ||
217 | |||
218 | return n; | ||
219 | } | ||
220 | |||
221 | static struct device_node * __init of_pdt_build_tree(struct device_node *parent, | 196 | static struct device_node * __init of_pdt_build_tree(struct device_node *parent, |
222 | phandle node, | 197 | phandle node, |
223 | struct device_node ***nextp) | 198 | struct device_node ***nextp) |
@@ -240,9 +215,6 @@ static struct device_node * __init of_pdt_build_tree(struct device_node *parent, | |||
240 | *(*nextp) = dp; | 215 | *(*nextp) = dp; |
241 | *nextp = &dp->allnext; | 216 | *nextp = &dp->allnext; |
242 | 217 | ||
243 | #if defined(CONFIG_SPARC) | ||
244 | dp->path_component_name = build_path_component(dp); | ||
245 | #endif | ||
246 | dp->full_name = of_pdt_build_full_name(dp); | 218 | dp->full_name = of_pdt_build_full_name(dp); |
247 | 219 | ||
248 | dp->child = of_pdt_build_tree(dp, | 220 | dp->child = of_pdt_build_tree(dp, |
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c index 3a5a6fcc0ead..492b7d807fe8 100644 --- a/drivers/pci/xen-pcifront.c +++ b/drivers/pci/xen-pcifront.c | |||
@@ -243,7 +243,7 @@ struct pci_ops pcifront_bus_ops = { | |||
243 | 243 | ||
244 | #ifdef CONFIG_PCI_MSI | 244 | #ifdef CONFIG_PCI_MSI |
245 | static int pci_frontend_enable_msix(struct pci_dev *dev, | 245 | static int pci_frontend_enable_msix(struct pci_dev *dev, |
246 | int **vector, int nvec) | 246 | int vector[], int nvec) |
247 | { | 247 | { |
248 | int err; | 248 | int err; |
249 | int i; | 249 | int i; |
@@ -277,18 +277,24 @@ static int pci_frontend_enable_msix(struct pci_dev *dev, | |||
277 | if (likely(!err)) { | 277 | if (likely(!err)) { |
278 | if (likely(!op.value)) { | 278 | if (likely(!op.value)) { |
279 | /* we get the result */ | 279 | /* we get the result */ |
280 | for (i = 0; i < nvec; i++) | 280 | for (i = 0; i < nvec; i++) { |
281 | *(*vector+i) = op.msix_entries[i].vector; | 281 | if (op.msix_entries[i].vector <= 0) { |
282 | return 0; | 282 | dev_warn(&dev->dev, "MSI-X entry %d is invalid: %d!\n", |
283 | i, op.msix_entries[i].vector); | ||
284 | err = -EINVAL; | ||
285 | vector[i] = -1; | ||
286 | continue; | ||
287 | } | ||
288 | vector[i] = op.msix_entries[i].vector; | ||
289 | } | ||
283 | } else { | 290 | } else { |
284 | printk(KERN_DEBUG "enable msix get value %x\n", | 291 | printk(KERN_DEBUG "enable msix get value %x\n", |
285 | op.value); | 292 | op.value); |
286 | return op.value; | ||
287 | } | 293 | } |
288 | } else { | 294 | } else { |
289 | dev_err(&dev->dev, "enable msix get err %x\n", err); | 295 | dev_err(&dev->dev, "enable msix get err %x\n", err); |
290 | return err; | ||
291 | } | 296 | } |
297 | return err; | ||
292 | } | 298 | } |
293 | 299 | ||
294 | static void pci_frontend_disable_msix(struct pci_dev *dev) | 300 | static void pci_frontend_disable_msix(struct pci_dev *dev) |
@@ -310,7 +316,7 @@ static void pci_frontend_disable_msix(struct pci_dev *dev) | |||
310 | dev_err(&dev->dev, "pci_disable_msix get err %x\n", err); | 316 | dev_err(&dev->dev, "pci_disable_msix get err %x\n", err); |
311 | } | 317 | } |
312 | 318 | ||
313 | static int pci_frontend_enable_msi(struct pci_dev *dev, int **vector) | 319 | static int pci_frontend_enable_msi(struct pci_dev *dev, int vector[]) |
314 | { | 320 | { |
315 | int err; | 321 | int err; |
316 | struct xen_pci_op op = { | 322 | struct xen_pci_op op = { |
@@ -324,7 +330,13 @@ static int pci_frontend_enable_msi(struct pci_dev *dev, int **vector) | |||
324 | 330 | ||
325 | err = do_pci_op(pdev, &op); | 331 | err = do_pci_op(pdev, &op); |
326 | if (likely(!err)) { | 332 | if (likely(!err)) { |
327 | *(*vector) = op.value; | 333 | vector[0] = op.value; |
334 | if (op.value <= 0) { | ||
335 | dev_warn(&dev->dev, "MSI entry is invalid: %d!\n", | ||
336 | op.value); | ||
337 | err = -EINVAL; | ||
338 | vector[0] = -1; | ||
339 | } | ||
328 | } else { | 340 | } else { |
329 | dev_err(&dev->dev, "pci frontend enable msi failed for dev " | 341 | dev_err(&dev->dev, "pci frontend enable msi failed for dev " |
330 | "%x:%x\n", op.bus, op.devfn); | 342 | "%x:%x\n", op.bus, op.devfn); |
@@ -733,8 +745,7 @@ static void free_pdev(struct pcifront_device *pdev) | |||
733 | 745 | ||
734 | pcifront_free_roots(pdev); | 746 | pcifront_free_roots(pdev); |
735 | 747 | ||
736 | /*For PCIE_AER error handling job*/ | 748 | cancel_work_sync(&pdev->op_work); |
737 | flush_scheduled_work(); | ||
738 | 749 | ||
739 | if (pdev->irq >= 0) | 750 | if (pdev->irq >= 0) |
740 | unbind_from_irqhandler(pdev->irq, pdev); | 751 | unbind_from_irqhandler(pdev->irq, pdev); |
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c index 0bdda5b3ed55..42fbf1a75576 100644 --- a/drivers/pcmcia/pcmcia_resource.c +++ b/drivers/pcmcia/pcmcia_resource.c | |||
@@ -518,6 +518,8 @@ int pcmcia_enable_device(struct pcmcia_device *p_dev) | |||
518 | flags |= CONF_ENABLE_IOCARD; | 518 | flags |= CONF_ENABLE_IOCARD; |
519 | if (flags & CONF_ENABLE_IOCARD) | 519 | if (flags & CONF_ENABLE_IOCARD) |
520 | s->socket.flags |= SS_IOCARD; | 520 | s->socket.flags |= SS_IOCARD; |
521 | if (flags & CONF_ENABLE_ZVCARD) | ||
522 | s->socket.flags |= SS_ZVCARD | SS_IOCARD; | ||
521 | if (flags & CONF_ENABLE_SPKR) { | 523 | if (flags & CONF_ENABLE_SPKR) { |
522 | s->socket.flags |= SS_SPKR_ENA; | 524 | s->socket.flags |= SS_SPKR_ENA; |
523 | status = CCSR_AUDIO_ENA; | 525 | status = CCSR_AUDIO_ENA; |
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c index 3755e7c8c715..2c540542b5af 100644 --- a/drivers/pcmcia/pxa2xx_base.c +++ b/drivers/pcmcia/pxa2xx_base.c | |||
@@ -215,7 +215,7 @@ pxa2xx_pcmcia_frequency_change(struct soc_pcmcia_socket *skt, | |||
215 | } | 215 | } |
216 | #endif | 216 | #endif |
217 | 217 | ||
218 | static void pxa2xx_configure_sockets(struct device *dev) | 218 | void pxa2xx_configure_sockets(struct device *dev) |
219 | { | 219 | { |
220 | struct pcmcia_low_level *ops = dev->platform_data; | 220 | struct pcmcia_low_level *ops = dev->platform_data; |
221 | /* | 221 | /* |
diff --git a/drivers/pcmcia/pxa2xx_base.h b/drivers/pcmcia/pxa2xx_base.h index bb62ea87b8f9..b609b45469ed 100644 --- a/drivers/pcmcia/pxa2xx_base.h +++ b/drivers/pcmcia/pxa2xx_base.h | |||
@@ -1,3 +1,4 @@ | |||
1 | int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt); | 1 | int pxa2xx_drv_pcmcia_add_one(struct soc_pcmcia_socket *skt); |
2 | void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops); | 2 | void pxa2xx_drv_pcmcia_ops(struct pcmcia_low_level *ops); |
3 | void pxa2xx_configure_sockets(struct device *dev); | ||
3 | 4 | ||
diff --git a/drivers/pcmcia/pxa2xx_colibri.c b/drivers/pcmcia/pxa2xx_colibri.c index c3f72192af66..a52039564e74 100644 --- a/drivers/pcmcia/pxa2xx_colibri.c +++ b/drivers/pcmcia/pxa2xx_colibri.c | |||
@@ -181,6 +181,9 @@ static int __init colibri_pcmcia_init(void) | |||
181 | { | 181 | { |
182 | int ret; | 182 | int ret; |
183 | 183 | ||
184 | if (!machine_is_colibri() && !machine_is_colibri320()) | ||
185 | return -ENODEV; | ||
186 | |||
184 | colibri_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); | 187 | colibri_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); |
185 | if (!colibri_pcmcia_device) | 188 | if (!colibri_pcmcia_device) |
186 | return -ENOMEM; | 189 | return -ENOMEM; |
diff --git a/drivers/pcmcia/pxa2xx_lubbock.c b/drivers/pcmcia/pxa2xx_lubbock.c index b9f8c8fb42bd..25afe637c657 100644 --- a/drivers/pcmcia/pxa2xx_lubbock.c +++ b/drivers/pcmcia/pxa2xx_lubbock.c | |||
@@ -226,6 +226,7 @@ int pcmcia_lubbock_init(struct sa1111_dev *sadev) | |||
226 | lubbock_set_misc_wr((1 << 15) | (1 << 14), 0); | 226 | lubbock_set_misc_wr((1 << 15) | (1 << 14), 0); |
227 | 227 | ||
228 | pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops); | 228 | pxa2xx_drv_pcmcia_ops(&lubbock_pcmcia_ops); |
229 | pxa2xx_configure_sockets(&sadev->dev); | ||
229 | ret = sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops, | 230 | ret = sa1111_pcmcia_add(sadev, &lubbock_pcmcia_ops, |
230 | pxa2xx_drv_pcmcia_add_one); | 231 | pxa2xx_drv_pcmcia_add_one); |
231 | } | 232 | } |
diff --git a/drivers/pps/generators/Kconfig b/drivers/pps/generators/Kconfig index f3a73dd77660..e4c4f3dc0728 100644 --- a/drivers/pps/generators/Kconfig +++ b/drivers/pps/generators/Kconfig | |||
@@ -6,7 +6,7 @@ comment "PPS generators support" | |||
6 | 6 | ||
7 | config PPS_GENERATOR_PARPORT | 7 | config PPS_GENERATOR_PARPORT |
8 | tristate "Parallel port PPS signal generator" | 8 | tristate "Parallel port PPS signal generator" |
9 | depends on PARPORT | 9 | depends on PARPORT && BROKEN |
10 | help | 10 | help |
11 | If you say yes here you get support for a PPS signal generator which | 11 | If you say yes here you get support for a PPS signal generator which |
12 | utilizes STROBE pin of a parallel port to send PPS signals. It uses | 12 | utilizes STROBE pin of a parallel port to send PPS signals. It uses |
diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c index cba1b43f7519..a4e8eb9fece6 100644 --- a/drivers/pps/kapi.c +++ b/drivers/pps/kapi.c | |||
@@ -168,7 +168,7 @@ void pps_event(struct pps_device *pps, struct pps_event_time *ts, int event, | |||
168 | { | 168 | { |
169 | unsigned long flags; | 169 | unsigned long flags; |
170 | int captured = 0; | 170 | int captured = 0; |
171 | struct pps_ktime ts_real; | 171 | struct pps_ktime ts_real = { .sec = 0, .nsec = 0, .flags = 0 }; |
172 | 172 | ||
173 | /* check event type */ | 173 | /* check event type */ |
174 | BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0); | 174 | BUG_ON((event & (PPS_CAPTUREASSERT | PPS_CAPTURECLEAR)) == 0); |
diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c index 76b41853a877..1269fbd2deca 100644 --- a/drivers/rapidio/rio-sysfs.c +++ b/drivers/rapidio/rio-sysfs.c | |||
@@ -77,9 +77,9 @@ rio_read_config(struct file *filp, struct kobject *kobj, | |||
77 | 77 | ||
78 | /* Several chips lock up trying to read undefined config space */ | 78 | /* Several chips lock up trying to read undefined config space */ |
79 | if (capable(CAP_SYS_ADMIN)) | 79 | if (capable(CAP_SYS_ADMIN)) |
80 | size = 0x200000; | 80 | size = RIO_MAINT_SPACE_SZ; |
81 | 81 | ||
82 | if (off > size) | 82 | if (off >= size) |
83 | return 0; | 83 | return 0; |
84 | if (off + count > size) { | 84 | if (off + count > size) { |
85 | size -= off; | 85 | size -= off; |
@@ -147,10 +147,10 @@ rio_write_config(struct file *filp, struct kobject *kobj, | |||
147 | loff_t init_off = off; | 147 | loff_t init_off = off; |
148 | u8 *data = (u8 *) buf; | 148 | u8 *data = (u8 *) buf; |
149 | 149 | ||
150 | if (off > 0x200000) | 150 | if (off >= RIO_MAINT_SPACE_SZ) |
151 | return 0; | 151 | return 0; |
152 | if (off + count > 0x200000) { | 152 | if (off + count > RIO_MAINT_SPACE_SZ) { |
153 | size = 0x200000 - off; | 153 | size = RIO_MAINT_SPACE_SZ - off; |
154 | count = size; | 154 | count = size; |
155 | } | 155 | } |
156 | 156 | ||
@@ -200,7 +200,7 @@ static struct bin_attribute rio_config_attr = { | |||
200 | .name = "config", | 200 | .name = "config", |
201 | .mode = S_IRUGO | S_IWUSR, | 201 | .mode = S_IRUGO | S_IWUSR, |
202 | }, | 202 | }, |
203 | .size = 0x200000, | 203 | .size = RIO_MAINT_SPACE_SZ, |
204 | .read = rio_read_config, | 204 | .read = rio_read_config, |
205 | .write = rio_write_config, | 205 | .write = rio_write_config, |
206 | }; | 206 | }; |
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c index f53d31b950d4..2bb5de1f2421 100644 --- a/drivers/regulator/mc13xxx-regulator-core.c +++ b/drivers/regulator/mc13xxx-regulator-core.c | |||
@@ -174,7 +174,7 @@ static int mc13xxx_regulator_get_voltage(struct regulator_dev *rdev) | |||
174 | 174 | ||
175 | dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val); | 175 | dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val); |
176 | 176 | ||
177 | BUG_ON(val < 0 || val > mc13xxx_regulators[id].desc.n_voltages); | 177 | BUG_ON(val > mc13xxx_regulators[id].desc.n_voltages); |
178 | 178 | ||
179 | return mc13xxx_regulators[id].voltages[val]; | 179 | return mc13xxx_regulators[id].voltages[val]; |
180 | } | 180 | } |
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c index 8b0d2c4bde91..06df898842c0 100644 --- a/drivers/regulator/wm831x-dcdc.c +++ b/drivers/regulator/wm831x-dcdc.c | |||
@@ -120,6 +120,7 @@ static unsigned int wm831x_dcdc_get_mode(struct regulator_dev *rdev) | |||
120 | return REGULATOR_MODE_IDLE; | 120 | return REGULATOR_MODE_IDLE; |
121 | default: | 121 | default: |
122 | BUG(); | 122 | BUG(); |
123 | return -EINVAL; | ||
123 | } | 124 | } |
124 | } | 125 | } |
125 | 126 | ||
diff --git a/drivers/rtc/rtc-at91sam9.c b/drivers/rtc/rtc-at91sam9.c index c36749e4c926..5469c52cba3d 100644 --- a/drivers/rtc/rtc-at91sam9.c +++ b/drivers/rtc/rtc-at91sam9.c | |||
@@ -309,7 +309,7 @@ static const struct rtc_class_ops at91_rtc_ops = { | |||
309 | .read_alarm = at91_rtc_readalarm, | 309 | .read_alarm = at91_rtc_readalarm, |
310 | .set_alarm = at91_rtc_setalarm, | 310 | .set_alarm = at91_rtc_setalarm, |
311 | .proc = at91_rtc_proc, | 311 | .proc = at91_rtc_proc, |
312 | .alarm_irq_enabled = at91_rtc_alarm_irq_enable, | 312 | .alarm_irq_enable = at91_rtc_alarm_irq_enable, |
313 | }; | 313 | }; |
314 | 314 | ||
315 | /* | 315 | /* |
diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c index 23a9ee19764c..950735415a7c 100644 --- a/drivers/rtc/rtc-ds3232.c +++ b/drivers/rtc/rtc-ds3232.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * RTC client/driver for the Maxim/Dallas DS3232 Real-Time Clock over I2C | 2 | * RTC client/driver for the Maxim/Dallas DS3232 Real-Time Clock over I2C |
3 | * | 3 | * |
4 | * Copyright (C) 2009-2010 Freescale Semiconductor. | 4 | * Copyright (C) 2009-2011 Freescale Semiconductor. |
5 | * Author: Jack Lan <jack.lan@freescale.com> | 5 | * Author: Jack Lan <jack.lan@freescale.com> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or modify it | 7 | * This program is free software; you can redistribute it and/or modify it |
@@ -141,9 +141,11 @@ static int ds3232_read_time(struct device *dev, struct rtc_time *time) | |||
141 | time->tm_hour = bcd2bin(hour); | 141 | time->tm_hour = bcd2bin(hour); |
142 | } | 142 | } |
143 | 143 | ||
144 | time->tm_wday = bcd2bin(week); | 144 | /* Day of the week in linux range is 0~6 while 1~7 in RTC chip */ |
145 | time->tm_wday = bcd2bin(week) - 1; | ||
145 | time->tm_mday = bcd2bin(day); | 146 | time->tm_mday = bcd2bin(day); |
146 | time->tm_mon = bcd2bin(month & 0x7F); | 147 | /* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */ |
148 | time->tm_mon = bcd2bin(month & 0x7F) - 1; | ||
147 | if (century) | 149 | if (century) |
148 | add_century = 100; | 150 | add_century = 100; |
149 | 151 | ||
@@ -162,9 +164,11 @@ static int ds3232_set_time(struct device *dev, struct rtc_time *time) | |||
162 | buf[0] = bin2bcd(time->tm_sec); | 164 | buf[0] = bin2bcd(time->tm_sec); |
163 | buf[1] = bin2bcd(time->tm_min); | 165 | buf[1] = bin2bcd(time->tm_min); |
164 | buf[2] = bin2bcd(time->tm_hour); | 166 | buf[2] = bin2bcd(time->tm_hour); |
165 | buf[3] = bin2bcd(time->tm_wday); /* Day of the week */ | 167 | /* Day of the week in linux range is 0~6 while 1~7 in RTC chip */ |
168 | buf[3] = bin2bcd(time->tm_wday + 1); | ||
166 | buf[4] = bin2bcd(time->tm_mday); /* Date */ | 169 | buf[4] = bin2bcd(time->tm_mday); /* Date */ |
167 | buf[5] = bin2bcd(time->tm_mon); | 170 | /* linux tm_mon range:0~11, while month range is 1~12 in RTC chip */ |
171 | buf[5] = bin2bcd(time->tm_mon + 1); | ||
168 | if (time->tm_year >= 100) { | 172 | if (time->tm_year >= 100) { |
169 | buf[5] |= 0x80; | 173 | buf[5] |= 0x80; |
170 | buf[6] = bin2bcd(time->tm_year - 100); | 174 | buf[6] = bin2bcd(time->tm_year - 100); |
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index cf953ecbfca9..b80fa2882408 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c | |||
@@ -77,18 +77,20 @@ static irqreturn_t s3c_rtc_tickirq(int irq, void *id) | |||
77 | } | 77 | } |
78 | 78 | ||
79 | /* Update control registers */ | 79 | /* Update control registers */ |
80 | static void s3c_rtc_setaie(int to) | 80 | static int s3c_rtc_setaie(struct device *dev, unsigned int enabled) |
81 | { | 81 | { |
82 | unsigned int tmp; | 82 | unsigned int tmp; |
83 | 83 | ||
84 | pr_debug("%s: aie=%d\n", __func__, to); | 84 | pr_debug("%s: aie=%d\n", __func__, enabled); |
85 | 85 | ||
86 | tmp = readb(s3c_rtc_base + S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN; | 86 | tmp = readb(s3c_rtc_base + S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN; |
87 | 87 | ||
88 | if (to) | 88 | if (enabled) |
89 | tmp |= S3C2410_RTCALM_ALMEN; | 89 | tmp |= S3C2410_RTCALM_ALMEN; |
90 | 90 | ||
91 | writeb(tmp, s3c_rtc_base + S3C2410_RTCALM); | 91 | writeb(tmp, s3c_rtc_base + S3C2410_RTCALM); |
92 | |||
93 | return 0; | ||
92 | } | 94 | } |
93 | 95 | ||
94 | static int s3c_rtc_setpie(struct device *dev, int enabled) | 96 | static int s3c_rtc_setpie(struct device *dev, int enabled) |
@@ -308,7 +310,7 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) | |||
308 | 310 | ||
309 | writeb(alrm_en, base + S3C2410_RTCALM); | 311 | writeb(alrm_en, base + S3C2410_RTCALM); |
310 | 312 | ||
311 | s3c_rtc_setaie(alrm->enabled); | 313 | s3c_rtc_setaie(dev, alrm->enabled); |
312 | 314 | ||
313 | return 0; | 315 | return 0; |
314 | } | 316 | } |
@@ -440,7 +442,7 @@ static int __devexit s3c_rtc_remove(struct platform_device *dev) | |||
440 | rtc_device_unregister(rtc); | 442 | rtc_device_unregister(rtc); |
441 | 443 | ||
442 | s3c_rtc_setpie(&dev->dev, 0); | 444 | s3c_rtc_setpie(&dev->dev, 0); |
443 | s3c_rtc_setaie(0); | 445 | s3c_rtc_setaie(&dev->dev, 0); |
444 | 446 | ||
445 | clk_disable(rtc_clk); | 447 | clk_disable(rtc_clk); |
446 | clk_put(rtc_clk); | 448 | clk_put(rtc_clk); |
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index c881a14fa5dd..1f6a4d894e73 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c | |||
@@ -62,8 +62,8 @@ static int xpram_devs; | |||
62 | /* | 62 | /* |
63 | * Parameter parsing functions. | 63 | * Parameter parsing functions. |
64 | */ | 64 | */ |
65 | static int __initdata devs = XPRAM_DEVS; | 65 | static int devs = XPRAM_DEVS; |
66 | static char __initdata *sizes[XPRAM_MAX_DEVS]; | 66 | static char *sizes[XPRAM_MAX_DEVS]; |
67 | 67 | ||
68 | module_param(devs, int, 0); | 68 | module_param(devs, int, 0); |
69 | module_param_array(sizes, charp, NULL, 0); | 69 | module_param_array(sizes, charp, NULL, 0); |
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c index 8cd58e412b5e..5ad44daef73b 100644 --- a/drivers/s390/char/keyboard.c +++ b/drivers/s390/char/keyboard.c | |||
@@ -460,7 +460,8 @@ kbd_ioctl(struct kbd_data *kbd, struct file *file, | |||
460 | unsigned int cmd, unsigned long arg) | 460 | unsigned int cmd, unsigned long arg) |
461 | { | 461 | { |
462 | void __user *argp; | 462 | void __user *argp; |
463 | int ct, perm; | 463 | unsigned int ct; |
464 | int perm; | ||
464 | 465 | ||
465 | argp = (void __user *)arg; | 466 | argp = (void __user *)arg; |
466 | 467 | ||
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h index 7a242f073632..267b54e8ff5a 100644 --- a/drivers/s390/char/tape.h +++ b/drivers/s390/char/tape.h | |||
@@ -280,6 +280,14 @@ tape_do_io_free(struct tape_device *device, struct tape_request *request) | |||
280 | return rc; | 280 | return rc; |
281 | } | 281 | } |
282 | 282 | ||
283 | static inline void | ||
284 | tape_do_io_async_free(struct tape_device *device, struct tape_request *request) | ||
285 | { | ||
286 | request->callback = (void *) tape_free_request; | ||
287 | request->callback_data = NULL; | ||
288 | tape_do_io_async(device, request); | ||
289 | } | ||
290 | |||
283 | extern int tape_oper_handler(int irq, int status); | 291 | extern int tape_oper_handler(int irq, int status); |
284 | extern void tape_noper_handler(int irq, int status); | 292 | extern void tape_noper_handler(int irq, int status); |
285 | extern int tape_open(struct tape_device *); | 293 | extern int tape_open(struct tape_device *); |
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c index c17f35b6136a..c26511171ffe 100644 --- a/drivers/s390/char/tape_34xx.c +++ b/drivers/s390/char/tape_34xx.c | |||
@@ -53,23 +53,11 @@ static void tape_34xx_delete_sbid_from(struct tape_device *, int); | |||
53 | * Medium sense for 34xx tapes. There is no 'real' medium sense call. | 53 | * Medium sense for 34xx tapes. There is no 'real' medium sense call. |
54 | * So we just do a normal sense. | 54 | * So we just do a normal sense. |
55 | */ | 55 | */ |
56 | static int | 56 | static void __tape_34xx_medium_sense(struct tape_request *request) |
57 | tape_34xx_medium_sense(struct tape_device *device) | ||
58 | { | 57 | { |
59 | struct tape_request *request; | 58 | struct tape_device *device = request->device; |
60 | unsigned char *sense; | 59 | unsigned char *sense; |
61 | int rc; | ||
62 | |||
63 | request = tape_alloc_request(1, 32); | ||
64 | if (IS_ERR(request)) { | ||
65 | DBF_EXCEPTION(6, "MSEN fail\n"); | ||
66 | return PTR_ERR(request); | ||
67 | } | ||
68 | |||
69 | request->op = TO_MSEN; | ||
70 | tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata); | ||
71 | 60 | ||
72 | rc = tape_do_io_interruptible(device, request); | ||
73 | if (request->rc == 0) { | 61 | if (request->rc == 0) { |
74 | sense = request->cpdata; | 62 | sense = request->cpdata; |
75 | 63 | ||
@@ -88,15 +76,47 @@ tape_34xx_medium_sense(struct tape_device *device) | |||
88 | device->tape_generic_status |= GMT_WR_PROT(~0); | 76 | device->tape_generic_status |= GMT_WR_PROT(~0); |
89 | else | 77 | else |
90 | device->tape_generic_status &= ~GMT_WR_PROT(~0); | 78 | device->tape_generic_status &= ~GMT_WR_PROT(~0); |
91 | } else { | 79 | } else |
92 | DBF_EVENT(4, "tape_34xx: medium sense failed with rc=%d\n", | 80 | DBF_EVENT(4, "tape_34xx: medium sense failed with rc=%d\n", |
93 | request->rc); | 81 | request->rc); |
94 | } | ||
95 | tape_free_request(request); | 82 | tape_free_request(request); |
83 | } | ||
84 | |||
85 | static int tape_34xx_medium_sense(struct tape_device *device) | ||
86 | { | ||
87 | struct tape_request *request; | ||
88 | int rc; | ||
89 | |||
90 | request = tape_alloc_request(1, 32); | ||
91 | if (IS_ERR(request)) { | ||
92 | DBF_EXCEPTION(6, "MSEN fail\n"); | ||
93 | return PTR_ERR(request); | ||
94 | } | ||
96 | 95 | ||
96 | request->op = TO_MSEN; | ||
97 | tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata); | ||
98 | rc = tape_do_io_interruptible(device, request); | ||
99 | __tape_34xx_medium_sense(request); | ||
97 | return rc; | 100 | return rc; |
98 | } | 101 | } |
99 | 102 | ||
103 | static void tape_34xx_medium_sense_async(struct tape_device *device) | ||
104 | { | ||
105 | struct tape_request *request; | ||
106 | |||
107 | request = tape_alloc_request(1, 32); | ||
108 | if (IS_ERR(request)) { | ||
109 | DBF_EXCEPTION(6, "MSEN fail\n"); | ||
110 | return; | ||
111 | } | ||
112 | |||
113 | request->op = TO_MSEN; | ||
114 | tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata); | ||
115 | request->callback = (void *) __tape_34xx_medium_sense; | ||
116 | request->callback_data = NULL; | ||
117 | tape_do_io_async(device, request); | ||
118 | } | ||
119 | |||
100 | struct tape_34xx_work { | 120 | struct tape_34xx_work { |
101 | struct tape_device *device; | 121 | struct tape_device *device; |
102 | enum tape_op op; | 122 | enum tape_op op; |
@@ -109,6 +129,9 @@ struct tape_34xx_work { | |||
109 | * is inserted but cannot call tape_do_io* from an interrupt context. | 129 | * is inserted but cannot call tape_do_io* from an interrupt context. |
110 | * Maybe that's useful for other actions we want to start from the | 130 | * Maybe that's useful for other actions we want to start from the |
111 | * interrupt handler. | 131 | * interrupt handler. |
132 | * Note: the work handler is called by the system work queue. The tape | ||
133 | * commands started by the handler need to be asynchrounous, otherwise | ||
134 | * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq). | ||
112 | */ | 135 | */ |
113 | static void | 136 | static void |
114 | tape_34xx_work_handler(struct work_struct *work) | 137 | tape_34xx_work_handler(struct work_struct *work) |
@@ -119,7 +142,7 @@ tape_34xx_work_handler(struct work_struct *work) | |||
119 | 142 | ||
120 | switch(p->op) { | 143 | switch(p->op) { |
121 | case TO_MSEN: | 144 | case TO_MSEN: |
122 | tape_34xx_medium_sense(device); | 145 | tape_34xx_medium_sense_async(device); |
123 | break; | 146 | break; |
124 | default: | 147 | default: |
125 | DBF_EVENT(3, "T34XX: internal error: unknown work\n"); | 148 | DBF_EVENT(3, "T34XX: internal error: unknown work\n"); |
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c index fbe361fcd2c0..de2e99e0a71b 100644 --- a/drivers/s390/char/tape_3590.c +++ b/drivers/s390/char/tape_3590.c | |||
@@ -329,17 +329,17 @@ out: | |||
329 | /* | 329 | /* |
330 | * Enable encryption | 330 | * Enable encryption |
331 | */ | 331 | */ |
332 | static int tape_3592_enable_crypt(struct tape_device *device) | 332 | static struct tape_request *__tape_3592_enable_crypt(struct tape_device *device) |
333 | { | 333 | { |
334 | struct tape_request *request; | 334 | struct tape_request *request; |
335 | char *data; | 335 | char *data; |
336 | 336 | ||
337 | DBF_EVENT(6, "tape_3592_enable_crypt\n"); | 337 | DBF_EVENT(6, "tape_3592_enable_crypt\n"); |
338 | if (!crypt_supported(device)) | 338 | if (!crypt_supported(device)) |
339 | return -ENOSYS; | 339 | return ERR_PTR(-ENOSYS); |
340 | request = tape_alloc_request(2, 72); | 340 | request = tape_alloc_request(2, 72); |
341 | if (IS_ERR(request)) | 341 | if (IS_ERR(request)) |
342 | return PTR_ERR(request); | 342 | return request; |
343 | data = request->cpdata; | 343 | data = request->cpdata; |
344 | memset(data,0,72); | 344 | memset(data,0,72); |
345 | 345 | ||
@@ -354,23 +354,42 @@ static int tape_3592_enable_crypt(struct tape_device *device) | |||
354 | request->op = TO_CRYPT_ON; | 354 | request->op = TO_CRYPT_ON; |
355 | tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); | 355 | tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); |
356 | tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); | 356 | tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); |
357 | return request; | ||
358 | } | ||
359 | |||
360 | static int tape_3592_enable_crypt(struct tape_device *device) | ||
361 | { | ||
362 | struct tape_request *request; | ||
363 | |||
364 | request = __tape_3592_enable_crypt(device); | ||
365 | if (IS_ERR(request)) | ||
366 | return PTR_ERR(request); | ||
357 | return tape_do_io_free(device, request); | 367 | return tape_do_io_free(device, request); |
358 | } | 368 | } |
359 | 369 | ||
370 | static void tape_3592_enable_crypt_async(struct tape_device *device) | ||
371 | { | ||
372 | struct tape_request *request; | ||
373 | |||
374 | request = __tape_3592_enable_crypt(device); | ||
375 | if (!IS_ERR(request)) | ||
376 | tape_do_io_async_free(device, request); | ||
377 | } | ||
378 | |||
360 | /* | 379 | /* |
361 | * Disable encryption | 380 | * Disable encryption |
362 | */ | 381 | */ |
363 | static int tape_3592_disable_crypt(struct tape_device *device) | 382 | static struct tape_request *__tape_3592_disable_crypt(struct tape_device *device) |
364 | { | 383 | { |
365 | struct tape_request *request; | 384 | struct tape_request *request; |
366 | char *data; | 385 | char *data; |
367 | 386 | ||
368 | DBF_EVENT(6, "tape_3592_disable_crypt\n"); | 387 | DBF_EVENT(6, "tape_3592_disable_crypt\n"); |
369 | if (!crypt_supported(device)) | 388 | if (!crypt_supported(device)) |
370 | return -ENOSYS; | 389 | return ERR_PTR(-ENOSYS); |
371 | request = tape_alloc_request(2, 72); | 390 | request = tape_alloc_request(2, 72); |
372 | if (IS_ERR(request)) | 391 | if (IS_ERR(request)) |
373 | return PTR_ERR(request); | 392 | return request; |
374 | data = request->cpdata; | 393 | data = request->cpdata; |
375 | memset(data,0,72); | 394 | memset(data,0,72); |
376 | 395 | ||
@@ -383,9 +402,28 @@ static int tape_3592_disable_crypt(struct tape_device *device) | |||
383 | tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); | 402 | tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); |
384 | tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); | 403 | tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); |
385 | 404 | ||
405 | return request; | ||
406 | } | ||
407 | |||
408 | static int tape_3592_disable_crypt(struct tape_device *device) | ||
409 | { | ||
410 | struct tape_request *request; | ||
411 | |||
412 | request = __tape_3592_disable_crypt(device); | ||
413 | if (IS_ERR(request)) | ||
414 | return PTR_ERR(request); | ||
386 | return tape_do_io_free(device, request); | 415 | return tape_do_io_free(device, request); |
387 | } | 416 | } |
388 | 417 | ||
418 | static void tape_3592_disable_crypt_async(struct tape_device *device) | ||
419 | { | ||
420 | struct tape_request *request; | ||
421 | |||
422 | request = __tape_3592_disable_crypt(device); | ||
423 | if (!IS_ERR(request)) | ||
424 | tape_do_io_async_free(device, request); | ||
425 | } | ||
426 | |||
389 | /* | 427 | /* |
390 | * IOCTL: Set encryption status | 428 | * IOCTL: Set encryption status |
391 | */ | 429 | */ |
@@ -457,8 +495,7 @@ tape_3590_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg) | |||
457 | /* | 495 | /* |
458 | * SENSE Medium: Get Sense data about medium state | 496 | * SENSE Medium: Get Sense data about medium state |
459 | */ | 497 | */ |
460 | static int | 498 | static int tape_3590_sense_medium(struct tape_device *device) |
461 | tape_3590_sense_medium(struct tape_device *device) | ||
462 | { | 499 | { |
463 | struct tape_request *request; | 500 | struct tape_request *request; |
464 | 501 | ||
@@ -470,6 +507,18 @@ tape_3590_sense_medium(struct tape_device *device) | |||
470 | return tape_do_io_free(device, request); | 507 | return tape_do_io_free(device, request); |
471 | } | 508 | } |
472 | 509 | ||
510 | static void tape_3590_sense_medium_async(struct tape_device *device) | ||
511 | { | ||
512 | struct tape_request *request; | ||
513 | |||
514 | request = tape_alloc_request(1, 128); | ||
515 | if (IS_ERR(request)) | ||
516 | return; | ||
517 | request->op = TO_MSEN; | ||
518 | tape_ccw_end(request->cpaddr, MEDIUM_SENSE, 128, request->cpdata); | ||
519 | tape_do_io_async_free(device, request); | ||
520 | } | ||
521 | |||
473 | /* | 522 | /* |
474 | * MTTELL: Tell block. Return the number of block relative to current file. | 523 | * MTTELL: Tell block. Return the number of block relative to current file. |
475 | */ | 524 | */ |
@@ -546,15 +595,14 @@ tape_3590_read_opposite(struct tape_device *device, | |||
546 | * 2. The attention msg is written to the "read subsystem data" buffer. | 595 | * 2. The attention msg is written to the "read subsystem data" buffer. |
547 | * In this case we probably should print it to the console. | 596 | * In this case we probably should print it to the console. |
548 | */ | 597 | */ |
549 | static int | 598 | static void tape_3590_read_attmsg_async(struct tape_device *device) |
550 | tape_3590_read_attmsg(struct tape_device *device) | ||
551 | { | 599 | { |
552 | struct tape_request *request; | 600 | struct tape_request *request; |
553 | char *buf; | 601 | char *buf; |
554 | 602 | ||
555 | request = tape_alloc_request(3, 4096); | 603 | request = tape_alloc_request(3, 4096); |
556 | if (IS_ERR(request)) | 604 | if (IS_ERR(request)) |
557 | return PTR_ERR(request); | 605 | return; |
558 | request->op = TO_READ_ATTMSG; | 606 | request->op = TO_READ_ATTMSG; |
559 | buf = request->cpdata; | 607 | buf = request->cpdata; |
560 | buf[0] = PREP_RD_SS_DATA; | 608 | buf[0] = PREP_RD_SS_DATA; |
@@ -562,12 +610,15 @@ tape_3590_read_attmsg(struct tape_device *device) | |||
562 | tape_ccw_cc(request->cpaddr, PERFORM_SS_FUNC, 12, buf); | 610 | tape_ccw_cc(request->cpaddr, PERFORM_SS_FUNC, 12, buf); |
563 | tape_ccw_cc(request->cpaddr + 1, READ_SS_DATA, 4096 - 12, buf + 12); | 611 | tape_ccw_cc(request->cpaddr + 1, READ_SS_DATA, 4096 - 12, buf + 12); |
564 | tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); | 612 | tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); |
565 | return tape_do_io_free(device, request); | 613 | tape_do_io_async_free(device, request); |
566 | } | 614 | } |
567 | 615 | ||
568 | /* | 616 | /* |
569 | * These functions are used to schedule follow-up actions from within an | 617 | * These functions are used to schedule follow-up actions from within an |
570 | * interrupt context (like unsolicited interrupts). | 618 | * interrupt context (like unsolicited interrupts). |
619 | * Note: the work handler is called by the system work queue. The tape | ||
620 | * commands started by the handler need to be asynchrounous, otherwise | ||
621 | * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq). | ||
571 | */ | 622 | */ |
572 | struct work_handler_data { | 623 | struct work_handler_data { |
573 | struct tape_device *device; | 624 | struct tape_device *device; |
@@ -583,16 +634,16 @@ tape_3590_work_handler(struct work_struct *work) | |||
583 | 634 | ||
584 | switch (p->op) { | 635 | switch (p->op) { |
585 | case TO_MSEN: | 636 | case TO_MSEN: |
586 | tape_3590_sense_medium(p->device); | 637 | tape_3590_sense_medium_async(p->device); |
587 | break; | 638 | break; |
588 | case TO_READ_ATTMSG: | 639 | case TO_READ_ATTMSG: |
589 | tape_3590_read_attmsg(p->device); | 640 | tape_3590_read_attmsg_async(p->device); |
590 | break; | 641 | break; |
591 | case TO_CRYPT_ON: | 642 | case TO_CRYPT_ON: |
592 | tape_3592_enable_crypt(p->device); | 643 | tape_3592_enable_crypt_async(p->device); |
593 | break; | 644 | break; |
594 | case TO_CRYPT_OFF: | 645 | case TO_CRYPT_OFF: |
595 | tape_3592_disable_crypt(p->device); | 646 | tape_3592_disable_crypt_async(p->device); |
596 | break; | 647 | break; |
597 | default: | 648 | default: |
598 | DBF_EVENT(3, "T3590: work handler undefined for " | 649 | DBF_EVENT(3, "T3590: work handler undefined for " |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 9045c52abd25..fb2bb35c62cb 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -443,7 +443,7 @@ static void scsi_run_queue(struct request_queue *q) | |||
443 | &sdev->request_queue->queue_flags); | 443 | &sdev->request_queue->queue_flags); |
444 | if (flagset) | 444 | if (flagset) |
445 | queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); | 445 | queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); |
446 | __blk_run_queue(sdev->request_queue); | 446 | __blk_run_queue(sdev->request_queue, false); |
447 | if (flagset) | 447 | if (flagset) |
448 | queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); | 448 | queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); |
449 | spin_unlock(sdev->request_queue->queue_lock); | 449 | spin_unlock(sdev->request_queue->queue_lock); |
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 998c01be3234..5c3ccfc6b622 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c | |||
@@ -3829,7 +3829,7 @@ fc_bsg_goose_queue(struct fc_rport *rport) | |||
3829 | !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags); | 3829 | !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags); |
3830 | if (flagset) | 3830 | if (flagset) |
3831 | queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q); | 3831 | queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q); |
3832 | __blk_run_queue(rport->rqst_q); | 3832 | __blk_run_queue(rport->rqst_q, false); |
3833 | if (flagset) | 3833 | if (flagset) |
3834 | queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q); | 3834 | queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q); |
3835 | spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags); | 3835 | spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags); |
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 158cecbec718..4a109835e420 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c | |||
@@ -282,6 +282,9 @@ int core_tmr_lun_reset( | |||
282 | 282 | ||
283 | atomic_set(&task->task_active, 0); | 283 | atomic_set(&task->task_active, 0); |
284 | atomic_set(&task->task_stop, 0); | 284 | atomic_set(&task->task_stop, 0); |
285 | } else { | ||
286 | if (atomic_read(&task->task_execute_queue) != 0) | ||
287 | transport_remove_task_from_execute_queue(task, dev); | ||
285 | } | 288 | } |
286 | __transport_stop_task_timer(task, &flags); | 289 | __transport_stop_task_timer(task, &flags); |
287 | 290 | ||
@@ -301,6 +304,7 @@ int core_tmr_lun_reset( | |||
301 | DEBUG_LR("LUN_RESET: got t_transport_active = 1 for" | 304 | DEBUG_LR("LUN_RESET: got t_transport_active = 1 for" |
302 | " task: %p, t_fe_count: %d dev: %p\n", task, | 305 | " task: %p, t_fe_count: %d dev: %p\n", task, |
303 | fe_count, dev); | 306 | fe_count, dev); |
307 | atomic_set(&T_TASK(cmd)->t_transport_aborted, 1); | ||
304 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, | 308 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, |
305 | flags); | 309 | flags); |
306 | core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); | 310 | core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); |
@@ -310,6 +314,7 @@ int core_tmr_lun_reset( | |||
310 | } | 314 | } |
311 | DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p," | 315 | DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p," |
312 | " t_fe_count: %d dev: %p\n", task, fe_count, dev); | 316 | " t_fe_count: %d dev: %p\n", task, fe_count, dev); |
317 | atomic_set(&T_TASK(cmd)->t_transport_aborted, 1); | ||
313 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); | 318 | spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags); |
314 | core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); | 319 | core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); |
315 | 320 | ||
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 236e22d8cfae..4bbf6c147f89 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -1207,7 +1207,7 @@ transport_get_task_from_execute_queue(struct se_device *dev) | |||
1207 | * | 1207 | * |
1208 | * | 1208 | * |
1209 | */ | 1209 | */ |
1210 | static void transport_remove_task_from_execute_queue( | 1210 | void transport_remove_task_from_execute_queue( |
1211 | struct se_task *task, | 1211 | struct se_task *task, |
1212 | struct se_device *dev) | 1212 | struct se_device *dev) |
1213 | { | 1213 | { |
@@ -5549,7 +5549,8 @@ static void transport_generic_wait_for_tasks( | |||
5549 | 5549 | ||
5550 | atomic_set(&T_TASK(cmd)->transport_lun_stop, 0); | 5550 | atomic_set(&T_TASK(cmd)->transport_lun_stop, 0); |
5551 | } | 5551 | } |
5552 | if (!atomic_read(&T_TASK(cmd)->t_transport_active)) | 5552 | if (!atomic_read(&T_TASK(cmd)->t_transport_active) || |
5553 | atomic_read(&T_TASK(cmd)->t_transport_aborted)) | ||
5553 | goto remove; | 5554 | goto remove; |
5554 | 5555 | ||
5555 | atomic_set(&T_TASK(cmd)->t_transport_stop, 1); | 5556 | atomic_set(&T_TASK(cmd)->t_transport_stop, 1); |
@@ -5956,6 +5957,9 @@ static void transport_processing_shutdown(struct se_device *dev) | |||
5956 | 5957 | ||
5957 | atomic_set(&task->task_active, 0); | 5958 | atomic_set(&task->task_active, 0); |
5958 | atomic_set(&task->task_stop, 0); | 5959 | atomic_set(&task->task_stop, 0); |
5960 | } else { | ||
5961 | if (atomic_read(&task->task_execute_queue) != 0) | ||
5962 | transport_remove_task_from_execute_queue(task, dev); | ||
5959 | } | 5963 | } |
5960 | __transport_stop_task_timer(task, &flags); | 5964 | __transport_stop_task_timer(task, &flags); |
5961 | 5965 | ||
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index f7a5dba3ca23..bf7c687519ef 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig | |||
@@ -4,7 +4,6 @@ | |||
4 | 4 | ||
5 | menuconfig THERMAL | 5 | menuconfig THERMAL |
6 | tristate "Generic Thermal sysfs driver" | 6 | tristate "Generic Thermal sysfs driver" |
7 | depends on NET | ||
8 | help | 7 | help |
9 | Generic Thermal Sysfs driver offers a generic mechanism for | 8 | Generic Thermal Sysfs driver offers a generic mechanism for |
10 | thermal management. Usually it's made up of one or more thermal | 9 | thermal management. Usually it's made up of one or more thermal |
diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c index 7d0e63c79280..713b7ea4a607 100644 --- a/drivers/thermal/thermal_sys.c +++ b/drivers/thermal/thermal_sys.c | |||
@@ -62,20 +62,6 @@ static DEFINE_MUTEX(thermal_list_lock); | |||
62 | 62 | ||
63 | static unsigned int thermal_event_seqnum; | 63 | static unsigned int thermal_event_seqnum; |
64 | 64 | ||
65 | static struct genl_family thermal_event_genl_family = { | ||
66 | .id = GENL_ID_GENERATE, | ||
67 | .name = THERMAL_GENL_FAMILY_NAME, | ||
68 | .version = THERMAL_GENL_VERSION, | ||
69 | .maxattr = THERMAL_GENL_ATTR_MAX, | ||
70 | }; | ||
71 | |||
72 | static struct genl_multicast_group thermal_event_mcgrp = { | ||
73 | .name = THERMAL_GENL_MCAST_GROUP_NAME, | ||
74 | }; | ||
75 | |||
76 | static int genetlink_init(void); | ||
77 | static void genetlink_exit(void); | ||
78 | |||
79 | static int get_idr(struct idr *idr, struct mutex *lock, int *id) | 65 | static int get_idr(struct idr *idr, struct mutex *lock, int *id) |
80 | { | 66 | { |
81 | int err; | 67 | int err; |
@@ -1225,6 +1211,18 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz) | |||
1225 | 1211 | ||
1226 | EXPORT_SYMBOL(thermal_zone_device_unregister); | 1212 | EXPORT_SYMBOL(thermal_zone_device_unregister); |
1227 | 1213 | ||
1214 | #ifdef CONFIG_NET | ||
1215 | static struct genl_family thermal_event_genl_family = { | ||
1216 | .id = GENL_ID_GENERATE, | ||
1217 | .name = THERMAL_GENL_FAMILY_NAME, | ||
1218 | .version = THERMAL_GENL_VERSION, | ||
1219 | .maxattr = THERMAL_GENL_ATTR_MAX, | ||
1220 | }; | ||
1221 | |||
1222 | static struct genl_multicast_group thermal_event_mcgrp = { | ||
1223 | .name = THERMAL_GENL_MCAST_GROUP_NAME, | ||
1224 | }; | ||
1225 | |||
1228 | int generate_netlink_event(u32 orig, enum events event) | 1226 | int generate_netlink_event(u32 orig, enum events event) |
1229 | { | 1227 | { |
1230 | struct sk_buff *skb; | 1228 | struct sk_buff *skb; |
@@ -1301,6 +1299,15 @@ static int genetlink_init(void) | |||
1301 | return result; | 1299 | return result; |
1302 | } | 1300 | } |
1303 | 1301 | ||
1302 | static void genetlink_exit(void) | ||
1303 | { | ||
1304 | genl_unregister_family(&thermal_event_genl_family); | ||
1305 | } | ||
1306 | #else /* !CONFIG_NET */ | ||
1307 | static inline int genetlink_init(void) { return 0; } | ||
1308 | static inline void genetlink_exit(void) {} | ||
1309 | #endif /* !CONFIG_NET */ | ||
1310 | |||
1304 | static int __init thermal_init(void) | 1311 | static int __init thermal_init(void) |
1305 | { | 1312 | { |
1306 | int result = 0; | 1313 | int result = 0; |
@@ -1316,11 +1323,6 @@ static int __init thermal_init(void) | |||
1316 | return result; | 1323 | return result; |
1317 | } | 1324 | } |
1318 | 1325 | ||
1319 | static void genetlink_exit(void) | ||
1320 | { | ||
1321 | genl_unregister_family(&thermal_event_genl_family); | ||
1322 | } | ||
1323 | |||
1324 | static void __exit thermal_exit(void) | 1326 | static void __exit thermal_exit(void) |
1325 | { | 1327 | { |
1326 | class_unregister(&thermal_class); | 1328 | class_unregister(&thermal_class); |
diff --git a/drivers/tty/serial/serial_cs.c b/drivers/tty/serial/serial_cs.c index 93760b2ea172..1ef4df9bf7e4 100644 --- a/drivers/tty/serial/serial_cs.c +++ b/drivers/tty/serial/serial_cs.c | |||
@@ -712,6 +712,7 @@ static struct pcmcia_device_id serial_ids[] = { | |||
712 | PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf), | 712 | PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf), |
713 | PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0e01), | 713 | PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0e01), |
714 | PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05), | 714 | PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05), |
715 | PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0b05), | ||
715 | PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101), | 716 | PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101), |
716 | PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070), | 717 | PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070), |
717 | PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0101, 0x0562), | 718 | PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0101, 0x0562), |
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index d041c6826e43..0f299b7aad60 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c | |||
@@ -2681,17 +2681,13 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, | |||
2681 | 2681 | ||
2682 | mutex_lock(&usb_address0_mutex); | 2682 | mutex_lock(&usb_address0_mutex); |
2683 | 2683 | ||
2684 | if (!udev->config && oldspeed == USB_SPEED_SUPER) { | 2684 | /* Reset the device; full speed may morph to high speed */ |
2685 | /* Don't reset USB 3.0 devices during an initial setup */ | 2685 | /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */ |
2686 | usb_set_device_state(udev, USB_STATE_DEFAULT); | 2686 | retval = hub_port_reset(hub, port1, udev, delay); |
2687 | } else { | 2687 | if (retval < 0) /* error or disconnect */ |
2688 | /* Reset the device; full speed may morph to high speed */ | 2688 | goto fail; |
2689 | /* FIXME a USB 2.0 device may morph into SuperSpeed on reset. */ | 2689 | /* success, speed is known */ |
2690 | retval = hub_port_reset(hub, port1, udev, delay); | 2690 | |
2691 | if (retval < 0) /* error or disconnect */ | ||
2692 | goto fail; | ||
2693 | /* success, speed is known */ | ||
2694 | } | ||
2695 | retval = -ENODEV; | 2691 | retval = -ENODEV; |
2696 | 2692 | ||
2697 | if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) { | 2693 | if (oldspeed != USB_SPEED_UNKNOWN && oldspeed != udev->speed) { |
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 44c595432d6f..81ce6a8e1d94 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c | |||
@@ -48,6 +48,10 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
48 | { USB_DEVICE(0x04b4, 0x0526), .driver_info = | 48 | { USB_DEVICE(0x04b4, 0x0526), .driver_info = |
49 | USB_QUIRK_CONFIG_INTF_STRINGS }, | 49 | USB_QUIRK_CONFIG_INTF_STRINGS }, |
50 | 50 | ||
51 | /* Samsung Android phone modem - ID conflict with SPH-I500 */ | ||
52 | { USB_DEVICE(0x04e8, 0x6601), .driver_info = | ||
53 | USB_QUIRK_CONFIG_INTF_STRINGS }, | ||
54 | |||
51 | /* Roland SC-8820 */ | 55 | /* Roland SC-8820 */ |
52 | { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME }, | 56 | { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME }, |
53 | 57 | ||
@@ -68,6 +72,10 @@ static const struct usb_device_id usb_quirk_list[] = { | |||
68 | /* M-Systems Flash Disk Pioneers */ | 72 | /* M-Systems Flash Disk Pioneers */ |
69 | { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, | 73 | { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, |
70 | 74 | ||
75 | /* Keytouch QWERTY Panel keyboard */ | ||
76 | { USB_DEVICE(0x0926, 0x3333), .driver_info = | ||
77 | USB_QUIRK_CONFIG_INTF_STRINGS }, | ||
78 | |||
71 | /* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */ | 79 | /* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */ |
72 | { USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF }, | 80 | { USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF }, |
73 | 81 | ||
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c index 3c6e1a058745..5e1495097ec3 100644 --- a/drivers/usb/gadget/f_phonet.c +++ b/drivers/usb/gadget/f_phonet.c | |||
@@ -346,14 +346,19 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req) | |||
346 | 346 | ||
347 | if (unlikely(!skb)) | 347 | if (unlikely(!skb)) |
348 | break; | 348 | break; |
349 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0, | ||
350 | req->actual); | ||
351 | page = NULL; | ||
352 | 349 | ||
353 | if (req->actual < req->length) { /* Last fragment */ | 350 | if (skb->len == 0) { /* First fragment */ |
354 | skb->protocol = htons(ETH_P_PHONET); | 351 | skb->protocol = htons(ETH_P_PHONET); |
355 | skb_reset_mac_header(skb); | 352 | skb_reset_mac_header(skb); |
356 | pskb_pull(skb, 1); | 353 | /* Can't use pskb_pull() on page in IRQ */ |
354 | memcpy(skb_put(skb, 1), page_address(page), 1); | ||
355 | } | ||
356 | |||
357 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, | ||
358 | skb->len == 0, req->actual); | ||
359 | page = NULL; | ||
360 | |||
361 | if (req->actual < req->length) { /* Last fragment */ | ||
357 | skb->dev = dev; | 362 | skb->dev = dev; |
358 | dev->stats.rx_packets++; | 363 | dev->stats.rx_packets++; |
359 | dev->stats.rx_bytes += skb->len; | 364 | dev->stats.rx_bytes += skb->len; |
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c index e8f4f36fdf0b..a6f21b891f68 100644 --- a/drivers/usb/host/ehci-xilinx-of.c +++ b/drivers/usb/host/ehci-xilinx-of.c | |||
@@ -29,6 +29,7 @@ | |||
29 | 29 | ||
30 | #include <linux/of.h> | 30 | #include <linux/of.h> |
31 | #include <linux/of_platform.h> | 31 | #include <linux/of_platform.h> |
32 | #include <linux/of_address.h> | ||
32 | 33 | ||
33 | /** | 34 | /** |
34 | * ehci_xilinx_of_setup - Initialize the device for ehci_reset() | 35 | * ehci_xilinx_of_setup - Initialize the device for ehci_reset() |
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c index fcbf4abbf381..0231814a97a5 100644 --- a/drivers/usb/host/xhci-dbg.c +++ b/drivers/usb/host/xhci-dbg.c | |||
@@ -169,9 +169,10 @@ static void xhci_print_ports(struct xhci_hcd *xhci) | |||
169 | } | 169 | } |
170 | } | 170 | } |
171 | 171 | ||
172 | void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num) | 172 | void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num) |
173 | { | 173 | { |
174 | void *addr; | 174 | struct xhci_intr_reg __iomem *ir_set = &xhci->run_regs->ir_set[set_num]; |
175 | void __iomem *addr; | ||
175 | u32 temp; | 176 | u32 temp; |
176 | u64 temp_64; | 177 | u64 temp_64; |
177 | 178 | ||
@@ -449,7 +450,7 @@ char *xhci_get_slot_state(struct xhci_hcd *xhci, | |||
449 | } | 450 | } |
450 | } | 451 | } |
451 | 452 | ||
452 | void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) | 453 | static void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) |
453 | { | 454 | { |
454 | /* Fields are 32 bits wide, DMA addresses are in bytes */ | 455 | /* Fields are 32 bits wide, DMA addresses are in bytes */ |
455 | int field_size = 32 / 8; | 456 | int field_size = 32 / 8; |
@@ -488,7 +489,7 @@ void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx) | |||
488 | dbg_rsvd64(xhci, (u64 *)slot_ctx, dma); | 489 | dbg_rsvd64(xhci, (u64 *)slot_ctx, dma); |
489 | } | 490 | } |
490 | 491 | ||
491 | void xhci_dbg_ep_ctx(struct xhci_hcd *xhci, | 492 | static void xhci_dbg_ep_ctx(struct xhci_hcd *xhci, |
492 | struct xhci_container_ctx *ctx, | 493 | struct xhci_container_ctx *ctx, |
493 | unsigned int last_ep) | 494 | unsigned int last_ep) |
494 | { | 495 | { |
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 1d0f45f0e7a6..a9534396e85b 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c | |||
@@ -307,7 +307,7 @@ struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, | |||
307 | 307 | ||
308 | /***************** Streams structures manipulation *************************/ | 308 | /***************** Streams structures manipulation *************************/ |
309 | 309 | ||
310 | void xhci_free_stream_ctx(struct xhci_hcd *xhci, | 310 | static void xhci_free_stream_ctx(struct xhci_hcd *xhci, |
311 | unsigned int num_stream_ctxs, | 311 | unsigned int num_stream_ctxs, |
312 | struct xhci_stream_ctx *stream_ctx, dma_addr_t dma) | 312 | struct xhci_stream_ctx *stream_ctx, dma_addr_t dma) |
313 | { | 313 | { |
@@ -335,7 +335,7 @@ void xhci_free_stream_ctx(struct xhci_hcd *xhci, | |||
335 | * The stream context array must be a power of 2, and can be as small as | 335 | * The stream context array must be a power of 2, and can be as small as |
336 | * 64 bytes or as large as 1MB. | 336 | * 64 bytes or as large as 1MB. |
337 | */ | 337 | */ |
338 | struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci, | 338 | static struct xhci_stream_ctx *xhci_alloc_stream_ctx(struct xhci_hcd *xhci, |
339 | unsigned int num_stream_ctxs, dma_addr_t *dma, | 339 | unsigned int num_stream_ctxs, dma_addr_t *dma, |
340 | gfp_t mem_flags) | 340 | gfp_t mem_flags) |
341 | { | 341 | { |
@@ -1900,11 +1900,11 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
1900 | val &= DBOFF_MASK; | 1900 | val &= DBOFF_MASK; |
1901 | xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x" | 1901 | xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x" |
1902 | " from cap regs base addr\n", val); | 1902 | " from cap regs base addr\n", val); |
1903 | xhci->dba = (void *) xhci->cap_regs + val; | 1903 | xhci->dba = (void __iomem *) xhci->cap_regs + val; |
1904 | xhci_dbg_regs(xhci); | 1904 | xhci_dbg_regs(xhci); |
1905 | xhci_print_run_regs(xhci); | 1905 | xhci_print_run_regs(xhci); |
1906 | /* Set ir_set to interrupt register set 0 */ | 1906 | /* Set ir_set to interrupt register set 0 */ |
1907 | xhci->ir_set = (void *) xhci->run_regs->ir_set; | 1907 | xhci->ir_set = &xhci->run_regs->ir_set[0]; |
1908 | 1908 | ||
1909 | /* | 1909 | /* |
1910 | * Event ring setup: Allocate a normal ring, but also setup | 1910 | * Event ring setup: Allocate a normal ring, but also setup |
@@ -1961,7 +1961,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |||
1961 | /* Set the event ring dequeue address */ | 1961 | /* Set the event ring dequeue address */ |
1962 | xhci_set_hc_event_deq(xhci); | 1962 | xhci_set_hc_event_deq(xhci); |
1963 | xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n"); | 1963 | xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n"); |
1964 | xhci_print_ir_set(xhci, xhci->ir_set, 0); | 1964 | xhci_print_ir_set(xhci, 0); |
1965 | 1965 | ||
1966 | /* | 1966 | /* |
1967 | * XXX: Might need to set the Interrupter Moderation Register to | 1967 | * XXX: Might need to set the Interrupter Moderation Register to |
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 3e8211c1ce5a..3289bf4832c9 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c | |||
@@ -474,8 +474,11 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, | |||
474 | state->new_deq_seg = find_trb_seg(cur_td->start_seg, | 474 | state->new_deq_seg = find_trb_seg(cur_td->start_seg, |
475 | dev->eps[ep_index].stopped_trb, | 475 | dev->eps[ep_index].stopped_trb, |
476 | &state->new_cycle_state); | 476 | &state->new_cycle_state); |
477 | if (!state->new_deq_seg) | 477 | if (!state->new_deq_seg) { |
478 | BUG(); | 478 | WARN_ON(1); |
479 | return; | ||
480 | } | ||
481 | |||
479 | /* Dig out the cycle state saved by the xHC during the stop ep cmd */ | 482 | /* Dig out the cycle state saved by the xHC during the stop ep cmd */ |
480 | xhci_dbg(xhci, "Finding endpoint context\n"); | 483 | xhci_dbg(xhci, "Finding endpoint context\n"); |
481 | ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); | 484 | ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index); |
@@ -486,8 +489,10 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci, | |||
486 | state->new_deq_seg = find_trb_seg(state->new_deq_seg, | 489 | state->new_deq_seg = find_trb_seg(state->new_deq_seg, |
487 | state->new_deq_ptr, | 490 | state->new_deq_ptr, |
488 | &state->new_cycle_state); | 491 | &state->new_cycle_state); |
489 | if (!state->new_deq_seg) | 492 | if (!state->new_deq_seg) { |
490 | BUG(); | 493 | WARN_ON(1); |
494 | return; | ||
495 | } | ||
491 | 496 | ||
492 | trb = &state->new_deq_ptr->generic; | 497 | trb = &state->new_deq_ptr->generic; |
493 | if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) && | 498 | if ((trb->field[3] & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK) && |
@@ -2363,12 +2368,13 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb) | |||
2363 | 2368 | ||
2364 | /* Scatter gather list entries may cross 64KB boundaries */ | 2369 | /* Scatter gather list entries may cross 64KB boundaries */ |
2365 | running_total = TRB_MAX_BUFF_SIZE - | 2370 | running_total = TRB_MAX_BUFF_SIZE - |
2366 | (sg_dma_address(sg) & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); | 2371 | (sg_dma_address(sg) & (TRB_MAX_BUFF_SIZE - 1)); |
2372 | running_total &= TRB_MAX_BUFF_SIZE - 1; | ||
2367 | if (running_total != 0) | 2373 | if (running_total != 0) |
2368 | num_trbs++; | 2374 | num_trbs++; |
2369 | 2375 | ||
2370 | /* How many more 64KB chunks to transfer, how many more TRBs? */ | 2376 | /* How many more 64KB chunks to transfer, how many more TRBs? */ |
2371 | while (running_total < sg_dma_len(sg)) { | 2377 | while (running_total < sg_dma_len(sg) && running_total < temp) { |
2372 | num_trbs++; | 2378 | num_trbs++; |
2373 | running_total += TRB_MAX_BUFF_SIZE; | 2379 | running_total += TRB_MAX_BUFF_SIZE; |
2374 | } | 2380 | } |
@@ -2394,11 +2400,11 @@ static unsigned int count_sg_trbs_needed(struct xhci_hcd *xhci, struct urb *urb) | |||
2394 | static void check_trb_math(struct urb *urb, int num_trbs, int running_total) | 2400 | static void check_trb_math(struct urb *urb, int num_trbs, int running_total) |
2395 | { | 2401 | { |
2396 | if (num_trbs != 0) | 2402 | if (num_trbs != 0) |
2397 | dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated number of " | 2403 | dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated number of " |
2398 | "TRBs, %d left\n", __func__, | 2404 | "TRBs, %d left\n", __func__, |
2399 | urb->ep->desc.bEndpointAddress, num_trbs); | 2405 | urb->ep->desc.bEndpointAddress, num_trbs); |
2400 | if (running_total != urb->transfer_buffer_length) | 2406 | if (running_total != urb->transfer_buffer_length) |
2401 | dev_dbg(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " | 2407 | dev_err(&urb->dev->dev, "%s - ep %#x - Miscalculated tx length, " |
2402 | "queued %#x (%d), asked for %#x (%d)\n", | 2408 | "queued %#x (%d), asked for %#x (%d)\n", |
2403 | __func__, | 2409 | __func__, |
2404 | urb->ep->desc.bEndpointAddress, | 2410 | urb->ep->desc.bEndpointAddress, |
@@ -2533,8 +2539,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2533 | sg = urb->sg; | 2539 | sg = urb->sg; |
2534 | addr = (u64) sg_dma_address(sg); | 2540 | addr = (u64) sg_dma_address(sg); |
2535 | this_sg_len = sg_dma_len(sg); | 2541 | this_sg_len = sg_dma_len(sg); |
2536 | trb_buff_len = TRB_MAX_BUFF_SIZE - | 2542 | trb_buff_len = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1)); |
2537 | (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); | ||
2538 | trb_buff_len = min_t(int, trb_buff_len, this_sg_len); | 2543 | trb_buff_len = min_t(int, trb_buff_len, this_sg_len); |
2539 | if (trb_buff_len > urb->transfer_buffer_length) | 2544 | if (trb_buff_len > urb->transfer_buffer_length) |
2540 | trb_buff_len = urb->transfer_buffer_length; | 2545 | trb_buff_len = urb->transfer_buffer_length; |
@@ -2572,7 +2577,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2572 | (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), | 2577 | (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), |
2573 | (unsigned int) addr + trb_buff_len); | 2578 | (unsigned int) addr + trb_buff_len); |
2574 | if (TRB_MAX_BUFF_SIZE - | 2579 | if (TRB_MAX_BUFF_SIZE - |
2575 | (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)) < trb_buff_len) { | 2580 | (addr & (TRB_MAX_BUFF_SIZE - 1)) < trb_buff_len) { |
2576 | xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n"); | 2581 | xhci_warn(xhci, "WARN: sg dma xfer crosses 64KB boundaries!\n"); |
2577 | xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n", | 2582 | xhci_dbg(xhci, "Next boundary at %#x, end dma = %#x\n", |
2578 | (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), | 2583 | (unsigned int) (addr + TRB_MAX_BUFF_SIZE) & ~(TRB_MAX_BUFF_SIZE - 1), |
@@ -2616,7 +2621,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2616 | } | 2621 | } |
2617 | 2622 | ||
2618 | trb_buff_len = TRB_MAX_BUFF_SIZE - | 2623 | trb_buff_len = TRB_MAX_BUFF_SIZE - |
2619 | (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); | 2624 | (addr & (TRB_MAX_BUFF_SIZE - 1)); |
2620 | trb_buff_len = min_t(int, trb_buff_len, this_sg_len); | 2625 | trb_buff_len = min_t(int, trb_buff_len, this_sg_len); |
2621 | if (running_total + trb_buff_len > urb->transfer_buffer_length) | 2626 | if (running_total + trb_buff_len > urb->transfer_buffer_length) |
2622 | trb_buff_len = | 2627 | trb_buff_len = |
@@ -2656,7 +2661,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2656 | num_trbs = 0; | 2661 | num_trbs = 0; |
2657 | /* How much data is (potentially) left before the 64KB boundary? */ | 2662 | /* How much data is (potentially) left before the 64KB boundary? */ |
2658 | running_total = TRB_MAX_BUFF_SIZE - | 2663 | running_total = TRB_MAX_BUFF_SIZE - |
2659 | (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); | 2664 | (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1)); |
2665 | running_total &= TRB_MAX_BUFF_SIZE - 1; | ||
2660 | 2666 | ||
2661 | /* If there's some data on this 64KB chunk, or we have to send a | 2667 | /* If there's some data on this 64KB chunk, or we have to send a |
2662 | * zero-length transfer, we need at least one TRB | 2668 | * zero-length transfer, we need at least one TRB |
@@ -2700,8 +2706,8 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, | |||
2700 | /* How much data is in the first TRB? */ | 2706 | /* How much data is in the first TRB? */ |
2701 | addr = (u64) urb->transfer_dma; | 2707 | addr = (u64) urb->transfer_dma; |
2702 | trb_buff_len = TRB_MAX_BUFF_SIZE - | 2708 | trb_buff_len = TRB_MAX_BUFF_SIZE - |
2703 | (urb->transfer_dma & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); | 2709 | (urb->transfer_dma & (TRB_MAX_BUFF_SIZE - 1)); |
2704 | if (urb->transfer_buffer_length < trb_buff_len) | 2710 | if (trb_buff_len > urb->transfer_buffer_length) |
2705 | trb_buff_len = urb->transfer_buffer_length; | 2711 | trb_buff_len = urb->transfer_buffer_length; |
2706 | 2712 | ||
2707 | first_trb = true; | 2713 | first_trb = true; |
@@ -2879,8 +2885,8 @@ static int count_isoc_trbs_needed(struct xhci_hcd *xhci, | |||
2879 | addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset); | 2885 | addr = (u64) (urb->transfer_dma + urb->iso_frame_desc[i].offset); |
2880 | td_len = urb->iso_frame_desc[i].length; | 2886 | td_len = urb->iso_frame_desc[i].length; |
2881 | 2887 | ||
2882 | running_total = TRB_MAX_BUFF_SIZE - | 2888 | running_total = TRB_MAX_BUFF_SIZE - (addr & (TRB_MAX_BUFF_SIZE - 1)); |
2883 | (addr & ((1 << TRB_MAX_BUFF_SHIFT) - 1)); | 2889 | running_total &= TRB_MAX_BUFF_SIZE - 1; |
2884 | if (running_total != 0) | 2890 | if (running_total != 0) |
2885 | num_trbs++; | 2891 | num_trbs++; |
2886 | 2892 | ||
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 34cf4e165877..2083fc2179b2 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c | |||
@@ -109,7 +109,7 @@ int xhci_halt(struct xhci_hcd *xhci) | |||
109 | /* | 109 | /* |
110 | * Set the run bit and wait for the host to be running. | 110 | * Set the run bit and wait for the host to be running. |
111 | */ | 111 | */ |
112 | int xhci_start(struct xhci_hcd *xhci) | 112 | static int xhci_start(struct xhci_hcd *xhci) |
113 | { | 113 | { |
114 | u32 temp; | 114 | u32 temp; |
115 | int ret; | 115 | int ret; |
@@ -329,7 +329,7 @@ int xhci_init(struct usb_hcd *hcd) | |||
329 | 329 | ||
330 | 330 | ||
331 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING | 331 | #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING |
332 | void xhci_event_ring_work(unsigned long arg) | 332 | static void xhci_event_ring_work(unsigned long arg) |
333 | { | 333 | { |
334 | unsigned long flags; | 334 | unsigned long flags; |
335 | int temp; | 335 | int temp; |
@@ -473,7 +473,7 @@ int xhci_run(struct usb_hcd *hcd) | |||
473 | xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); | 473 | xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); |
474 | xhci_writel(xhci, ER_IRQ_ENABLE(temp), | 474 | xhci_writel(xhci, ER_IRQ_ENABLE(temp), |
475 | &xhci->ir_set->irq_pending); | 475 | &xhci->ir_set->irq_pending); |
476 | xhci_print_ir_set(xhci, xhci->ir_set, 0); | 476 | xhci_print_ir_set(xhci, 0); |
477 | 477 | ||
478 | if (NUM_TEST_NOOPS > 0) | 478 | if (NUM_TEST_NOOPS > 0) |
479 | doorbell = xhci_setup_one_noop(xhci); | 479 | doorbell = xhci_setup_one_noop(xhci); |
@@ -528,7 +528,7 @@ void xhci_stop(struct usb_hcd *hcd) | |||
528 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); | 528 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); |
529 | xhci_writel(xhci, ER_IRQ_DISABLE(temp), | 529 | xhci_writel(xhci, ER_IRQ_DISABLE(temp), |
530 | &xhci->ir_set->irq_pending); | 530 | &xhci->ir_set->irq_pending); |
531 | xhci_print_ir_set(xhci, xhci->ir_set, 0); | 531 | xhci_print_ir_set(xhci, 0); |
532 | 532 | ||
533 | xhci_dbg(xhci, "cleaning up memory\n"); | 533 | xhci_dbg(xhci, "cleaning up memory\n"); |
534 | xhci_mem_cleanup(xhci); | 534 | xhci_mem_cleanup(xhci); |
@@ -755,7 +755,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) | |||
755 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); | 755 | temp = xhci_readl(xhci, &xhci->ir_set->irq_pending); |
756 | xhci_writel(xhci, ER_IRQ_DISABLE(temp), | 756 | xhci_writel(xhci, ER_IRQ_DISABLE(temp), |
757 | &xhci->ir_set->irq_pending); | 757 | &xhci->ir_set->irq_pending); |
758 | xhci_print_ir_set(xhci, xhci->ir_set, 0); | 758 | xhci_print_ir_set(xhci, 0); |
759 | 759 | ||
760 | xhci_dbg(xhci, "cleaning up memory\n"); | 760 | xhci_dbg(xhci, "cleaning up memory\n"); |
761 | xhci_mem_cleanup(xhci); | 761 | xhci_mem_cleanup(xhci); |
@@ -857,7 +857,7 @@ unsigned int xhci_last_valid_endpoint(u32 added_ctxs) | |||
857 | /* Returns 1 if the arguments are OK; | 857 | /* Returns 1 if the arguments are OK; |
858 | * returns 0 this is a root hub; returns -EINVAL for NULL pointers. | 858 | * returns 0 this is a root hub; returns -EINVAL for NULL pointers. |
859 | */ | 859 | */ |
860 | int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, | 860 | static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, |
861 | struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev, | 861 | struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev, |
862 | const char *func) { | 862 | const char *func) { |
863 | struct xhci_hcd *xhci; | 863 | struct xhci_hcd *xhci; |
@@ -1693,7 +1693,7 @@ static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci, | |||
1693 | xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); | 1693 | xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags)); |
1694 | } | 1694 | } |
1695 | 1695 | ||
1696 | void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, | 1696 | static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci, |
1697 | unsigned int slot_id, unsigned int ep_index, | 1697 | unsigned int slot_id, unsigned int ep_index, |
1698 | struct xhci_dequeue_state *deq_state) | 1698 | struct xhci_dequeue_state *deq_state) |
1699 | { | 1699 | { |
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 7f236fd22015..7f127df6dd55 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h | |||
@@ -1348,7 +1348,7 @@ static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci) | |||
1348 | } | 1348 | } |
1349 | 1349 | ||
1350 | /* xHCI debugging */ | 1350 | /* xHCI debugging */ |
1351 | void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num); | 1351 | void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num); |
1352 | void xhci_print_registers(struct xhci_hcd *xhci); | 1352 | void xhci_print_registers(struct xhci_hcd *xhci); |
1353 | void xhci_dbg_regs(struct xhci_hcd *xhci); | 1353 | void xhci_dbg_regs(struct xhci_hcd *xhci); |
1354 | void xhci_print_run_regs(struct xhci_hcd *xhci); | 1354 | void xhci_print_run_regs(struct xhci_hcd *xhci); |
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c index 54a8bd1047d6..c292d5c499e7 100644 --- a/drivers/usb/musb/musb_core.c +++ b/drivers/usb/musb/musb_core.c | |||
@@ -1864,6 +1864,7 @@ allocate_instance(struct device *dev, | |||
1864 | INIT_LIST_HEAD(&musb->out_bulk); | 1864 | INIT_LIST_HEAD(&musb->out_bulk); |
1865 | 1865 | ||
1866 | hcd->uses_new_polling = 1; | 1866 | hcd->uses_new_polling = 1; |
1867 | hcd->has_tt = 1; | ||
1867 | 1868 | ||
1868 | musb->vbuserr_retry = VBUSERR_RETRY_COUNT; | 1869 | musb->vbuserr_retry = VBUSERR_RETRY_COUNT; |
1869 | musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON; | 1870 | musb->a_wait_bcon = OTG_TIME_A_WAIT_BCON; |
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h index d74a8113ae74..e6400be8a0f8 100644 --- a/drivers/usb/musb/musb_core.h +++ b/drivers/usb/musb/musb_core.h | |||
@@ -488,6 +488,15 @@ struct musb { | |||
488 | unsigned set_address:1; | 488 | unsigned set_address:1; |
489 | unsigned test_mode:1; | 489 | unsigned test_mode:1; |
490 | unsigned softconnect:1; | 490 | unsigned softconnect:1; |
491 | |||
492 | u8 address; | ||
493 | u8 test_mode_nr; | ||
494 | u16 ackpend; /* ep0 */ | ||
495 | enum musb_g_ep0_state ep0_state; | ||
496 | struct usb_gadget g; /* the gadget */ | ||
497 | struct usb_gadget_driver *gadget_driver; /* its driver */ | ||
498 | #endif | ||
499 | |||
491 | /* | 500 | /* |
492 | * FIXME: Remove this flag. | 501 | * FIXME: Remove this flag. |
493 | * | 502 | * |
@@ -501,14 +510,6 @@ struct musb { | |||
501 | */ | 510 | */ |
502 | unsigned double_buffer_not_ok:1 __deprecated; | 511 | unsigned double_buffer_not_ok:1 __deprecated; |
503 | 512 | ||
504 | u8 address; | ||
505 | u8 test_mode_nr; | ||
506 | u16 ackpend; /* ep0 */ | ||
507 | enum musb_g_ep0_state ep0_state; | ||
508 | struct usb_gadget g; /* the gadget */ | ||
509 | struct usb_gadget_driver *gadget_driver; /* its driver */ | ||
510 | #endif | ||
511 | |||
512 | struct musb_hdrc_config *config; | 513 | struct musb_hdrc_config *config; |
513 | 514 | ||
514 | #ifdef MUSB_CONFIG_PROC_FS | 515 | #ifdef MUSB_CONFIG_PROC_FS |
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c index a3f12333fc41..bc8badd16897 100644 --- a/drivers/usb/musb/omap2430.c +++ b/drivers/usb/musb/omap2430.c | |||
@@ -362,6 +362,7 @@ static int omap2430_musb_init(struct musb *musb) | |||
362 | 362 | ||
363 | static int omap2430_musb_exit(struct musb *musb) | 363 | static int omap2430_musb_exit(struct musb *musb) |
364 | { | 364 | { |
365 | del_timer_sync(&musb_idle_timer); | ||
365 | 366 | ||
366 | omap2430_low_level_exit(musb); | 367 | omap2430_low_level_exit(musb); |
367 | otg_put_transceiver(musb->xceiv); | 368 | otg_put_transceiver(musb->xceiv); |
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index 7481ff8a49e4..0457813eebee 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c | |||
@@ -301,6 +301,9 @@ static const struct usb_device_id id_table[] = { | |||
301 | { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ | 301 | { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ |
302 | .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist | 302 | .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist |
303 | }, | 303 | }, |
304 | { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */ | ||
305 | .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist | ||
306 | }, | ||
304 | { USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */ | 307 | { USB_DEVICE(0x413C, 0x08133) }, /* Dell Computer Corp. Wireless 5720 VZW Mobile Broadband (EVDO Rev-A) Minicard GPS Port */ |
305 | 308 | ||
306 | { } | 309 | { } |
diff --git a/drivers/usb/serial/usb_wwan.c b/drivers/usb/serial/usb_wwan.c index b004b2a485c3..9c014e2ecd68 100644 --- a/drivers/usb/serial/usb_wwan.c +++ b/drivers/usb/serial/usb_wwan.c | |||
@@ -295,12 +295,15 @@ static void usb_wwan_indat_callback(struct urb *urb) | |||
295 | __func__, status, endpoint); | 295 | __func__, status, endpoint); |
296 | } else { | 296 | } else { |
297 | tty = tty_port_tty_get(&port->port); | 297 | tty = tty_port_tty_get(&port->port); |
298 | if (urb->actual_length) { | 298 | if (tty) { |
299 | tty_insert_flip_string(tty, data, urb->actual_length); | 299 | if (urb->actual_length) { |
300 | tty_flip_buffer_push(tty); | 300 | tty_insert_flip_string(tty, data, |
301 | } else | 301 | urb->actual_length); |
302 | dbg("%s: empty read urb received", __func__); | 302 | tty_flip_buffer_push(tty); |
303 | tty_kref_put(tty); | 303 | } else |
304 | dbg("%s: empty read urb received", __func__); | ||
305 | tty_kref_put(tty); | ||
306 | } | ||
304 | 307 | ||
305 | /* Resubmit urb so we continue receiving */ | 308 | /* Resubmit urb so we continue receiving */ |
306 | if (status != -ESHUTDOWN) { | 309 | if (status != -ESHUTDOWN) { |
diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c index 15a5d89b7f39..1c11959a7d58 100644 --- a/drivers/usb/serial/visor.c +++ b/drivers/usb/serial/visor.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/uaccess.h> | 27 | #include <linux/uaccess.h> |
28 | #include <linux/usb.h> | 28 | #include <linux/usb.h> |
29 | #include <linux/usb/serial.h> | 29 | #include <linux/usb/serial.h> |
30 | #include <linux/usb/cdc.h> | ||
30 | #include "visor.h" | 31 | #include "visor.h" |
31 | 32 | ||
32 | /* | 33 | /* |
@@ -479,6 +480,17 @@ static int visor_probe(struct usb_serial *serial, | |||
479 | 480 | ||
480 | dbg("%s", __func__); | 481 | dbg("%s", __func__); |
481 | 482 | ||
483 | /* | ||
484 | * some Samsung Android phones in modem mode have the same ID | ||
485 | * as SPH-I500, but they are ACM devices, so dont bind to them | ||
486 | */ | ||
487 | if (id->idVendor == SAMSUNG_VENDOR_ID && | ||
488 | id->idProduct == SAMSUNG_SPH_I500_ID && | ||
489 | serial->dev->descriptor.bDeviceClass == USB_CLASS_COMM && | ||
490 | serial->dev->descriptor.bDeviceSubClass == | ||
491 | USB_CDC_SUBCLASS_ACM) | ||
492 | return -ENODEV; | ||
493 | |||
482 | if (serial->dev->actconfig->desc.bConfigurationValue != 1) { | 494 | if (serial->dev->actconfig->desc.bConfigurationValue != 1) { |
483 | dev_err(&serial->dev->dev, "active config #%d != 1 ??\n", | 495 | dev_err(&serial->dev->dev, "active config #%d != 1 ??\n", |
484 | serial->dev->actconfig->desc.bConfigurationValue); | 496 | serial->dev->actconfig->desc.bConfigurationValue); |
diff --git a/drivers/video/backlight/ltv350qv.c b/drivers/video/backlight/ltv350qv.c index 8010aaeb5adb..dd0e84a9bd2f 100644 --- a/drivers/video/backlight/ltv350qv.c +++ b/drivers/video/backlight/ltv350qv.c | |||
@@ -239,11 +239,15 @@ static int __devinit ltv350qv_probe(struct spi_device *spi) | |||
239 | lcd->spi = spi; | 239 | lcd->spi = spi; |
240 | lcd->power = FB_BLANK_POWERDOWN; | 240 | lcd->power = FB_BLANK_POWERDOWN; |
241 | lcd->buffer = kzalloc(8, GFP_KERNEL); | 241 | lcd->buffer = kzalloc(8, GFP_KERNEL); |
242 | if (!lcd->buffer) { | ||
243 | ret = -ENOMEM; | ||
244 | goto out_free_lcd; | ||
245 | } | ||
242 | 246 | ||
243 | ld = lcd_device_register("ltv350qv", &spi->dev, lcd, <v_ops); | 247 | ld = lcd_device_register("ltv350qv", &spi->dev, lcd, <v_ops); |
244 | if (IS_ERR(ld)) { | 248 | if (IS_ERR(ld)) { |
245 | ret = PTR_ERR(ld); | 249 | ret = PTR_ERR(ld); |
246 | goto out_free_lcd; | 250 | goto out_free_buffer; |
247 | } | 251 | } |
248 | lcd->ld = ld; | 252 | lcd->ld = ld; |
249 | 253 | ||
@@ -257,6 +261,8 @@ static int __devinit ltv350qv_probe(struct spi_device *spi) | |||
257 | 261 | ||
258 | out_unregister: | 262 | out_unregister: |
259 | lcd_device_unregister(ld); | 263 | lcd_device_unregister(ld); |
264 | out_free_buffer: | ||
265 | kfree(lcd->buffer); | ||
260 | out_free_lcd: | 266 | out_free_lcd: |
261 | kfree(lcd); | 267 | kfree(lcd); |
262 | return ret; | 268 | return ret; |
@@ -268,6 +274,7 @@ static int __devexit ltv350qv_remove(struct spi_device *spi) | |||
268 | 274 | ||
269 | ltv350qv_power(lcd, FB_BLANK_POWERDOWN); | 275 | ltv350qv_power(lcd, FB_BLANK_POWERDOWN); |
270 | lcd_device_unregister(lcd->ld); | 276 | lcd_device_unregister(lcd->ld); |
277 | kfree(lcd->buffer); | ||
271 | kfree(lcd); | 278 | kfree(lcd); |
272 | 279 | ||
273 | return 0; | 280 | return 0; |
diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c index eca855a55c0d..3de4ba0260a5 100644 --- a/drivers/watchdog/cpwd.c +++ b/drivers/watchdog/cpwd.c | |||
@@ -646,7 +646,7 @@ static int __devexit cpwd_remove(struct platform_device *op) | |||
646 | struct cpwd *p = dev_get_drvdata(&op->dev); | 646 | struct cpwd *p = dev_get_drvdata(&op->dev); |
647 | int i; | 647 | int i; |
648 | 648 | ||
649 | for (i = 0; i < 4; i++) { | 649 | for (i = 0; i < WD_NUMDEVS; i++) { |
650 | misc_deregister(&p->devs[i].misc); | 650 | misc_deregister(&p->devs[i].misc); |
651 | 651 | ||
652 | if (!p->enabled) { | 652 | if (!p->enabled) { |
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c index 24b966d5061a..204a5603c4ae 100644 --- a/drivers/watchdog/hpwdt.c +++ b/drivers/watchdog/hpwdt.c | |||
@@ -710,7 +710,7 @@ static int __devinit hpwdt_init_nmi_decoding(struct pci_dev *dev) | |||
710 | return 0; | 710 | return 0; |
711 | } | 711 | } |
712 | 712 | ||
713 | static void __devexit hpwdt_exit_nmi_decoding(void) | 713 | static void hpwdt_exit_nmi_decoding(void) |
714 | { | 714 | { |
715 | unregister_die_notifier(&die_notifier); | 715 | unregister_die_notifier(&die_notifier); |
716 | if (cru_rom_addr) | 716 | if (cru_rom_addr) |
@@ -726,7 +726,7 @@ static int __devinit hpwdt_init_nmi_decoding(struct pci_dev *dev) | |||
726 | return 0; | 726 | return 0; |
727 | } | 727 | } |
728 | 728 | ||
729 | static void __devexit hpwdt_exit_nmi_decoding(void) | 729 | static void hpwdt_exit_nmi_decoding(void) |
730 | { | 730 | { |
731 | } | 731 | } |
732 | #endif /* CONFIG_HPWDT_NMI_DECODING */ | 732 | #endif /* CONFIG_HPWDT_NMI_DECODING */ |
diff --git a/drivers/watchdog/sbc_fitpc2_wdt.c b/drivers/watchdog/sbc_fitpc2_wdt.c index c7d67e9a7465..79906255eeb6 100644 --- a/drivers/watchdog/sbc_fitpc2_wdt.c +++ b/drivers/watchdog/sbc_fitpc2_wdt.c | |||
@@ -201,11 +201,14 @@ static struct miscdevice fitpc2_wdt_miscdev = { | |||
201 | static int __init fitpc2_wdt_init(void) | 201 | static int __init fitpc2_wdt_init(void) |
202 | { | 202 | { |
203 | int err; | 203 | int err; |
204 | const char *brd_name; | ||
204 | 205 | ||
205 | if (!strstr(dmi_get_system_info(DMI_BOARD_NAME), "SBC-FITPC2")) | 206 | brd_name = dmi_get_system_info(DMI_BOARD_NAME); |
207 | |||
208 | if (!brd_name || !strstr(brd_name, "SBC-FITPC2")) | ||
206 | return -ENODEV; | 209 | return -ENODEV; |
207 | 210 | ||
208 | pr_info("%s found\n", dmi_get_system_info(DMI_BOARD_NAME)); | 211 | pr_info("%s found\n", brd_name); |
209 | 212 | ||
210 | if (!request_region(COMMAND_PORT, 1, WATCHDOG_NAME)) { | 213 | if (!request_region(COMMAND_PORT, 1, WATCHDOG_NAME)) { |
211 | pr_err("I/O address 0x%04x already in use\n", COMMAND_PORT); | 214 | pr_err("I/O address 0x%04x already in use\n", COMMAND_PORT); |
diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c index 0461858e07d0..b61ab1c54293 100644 --- a/drivers/watchdog/sch311x_wdt.c +++ b/drivers/watchdog/sch311x_wdt.c | |||
@@ -508,7 +508,7 @@ static int __init sch311x_detect(int sio_config_port, unsigned short *addr) | |||
508 | sch311x_sio_outb(sio_config_port, 0x07, 0x0a); | 508 | sch311x_sio_outb(sio_config_port, 0x07, 0x0a); |
509 | 509 | ||
510 | /* Check if Logical Device Register is currently active */ | 510 | /* Check if Logical Device Register is currently active */ |
511 | if (sch311x_sio_inb(sio_config_port, 0x30) && 0x01 == 0) | 511 | if ((sch311x_sio_inb(sio_config_port, 0x30) & 0x01) == 0) |
512 | printk(KERN_INFO PFX "Seems that LDN 0x0a is not active...\n"); | 512 | printk(KERN_INFO PFX "Seems that LDN 0x0a is not active...\n"); |
513 | 513 | ||
514 | /* Get the base address of the runtime registers */ | 514 | /* Get the base address of the runtime registers */ |
diff --git a/drivers/watchdog/w83697ug_wdt.c b/drivers/watchdog/w83697ug_wdt.c index a6c12dec91a1..df2a64dc9672 100644 --- a/drivers/watchdog/w83697ug_wdt.c +++ b/drivers/watchdog/w83697ug_wdt.c | |||
@@ -109,7 +109,7 @@ static int w83697ug_select_wd_register(void) | |||
109 | outb_p(0x08, WDT_EFDR); /* select logical device 8 (GPIO2) */ | 109 | outb_p(0x08, WDT_EFDR); /* select logical device 8 (GPIO2) */ |
110 | outb_p(0x30, WDT_EFER); /* select CR30 */ | 110 | outb_p(0x30, WDT_EFER); /* select CR30 */ |
111 | c = inb_p(WDT_EFDR); | 111 | c = inb_p(WDT_EFDR); |
112 | outb_p(c || 0x01, WDT_EFDR); /* set bit 0 to activate GPIO2 */ | 112 | outb_p(c | 0x01, WDT_EFDR); /* set bit 0 to activate GPIO2 */ |
113 | 113 | ||
114 | return 0; | 114 | return 0; |
115 | } | 115 | } |
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 9294f25dcb2c..718050ace08f 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c | |||
@@ -296,7 +296,7 @@ static int decrease_reservation(unsigned long nr_pages) | |||
296 | /* No more mappings: invalidate P2M and add to balloon. */ | 296 | /* No more mappings: invalidate P2M and add to balloon. */ |
297 | for (i = 0; i < nr_pages; i++) { | 297 | for (i = 0; i < nr_pages; i++) { |
298 | pfn = mfn_to_pfn(frame_list[i]); | 298 | pfn = mfn_to_pfn(frame_list[i]); |
299 | set_phys_to_machine(pfn, INVALID_P2M_ENTRY); | 299 | __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); |
300 | balloon_append(pfn_to_page(pfn)); | 300 | balloon_append(pfn_to_page(pfn)); |
301 | } | 301 | } |
302 | 302 | ||
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 74681478100a..149fa875e396 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -114,7 +114,7 @@ struct cpu_evtchn_s { | |||
114 | static __initdata struct cpu_evtchn_s init_evtchn_mask = { | 114 | static __initdata struct cpu_evtchn_s init_evtchn_mask = { |
115 | .bits[0 ... (NR_EVENT_CHANNELS/BITS_PER_LONG)-1] = ~0ul, | 115 | .bits[0 ... (NR_EVENT_CHANNELS/BITS_PER_LONG)-1] = ~0ul, |
116 | }; | 116 | }; |
117 | static struct cpu_evtchn_s *cpu_evtchn_mask_p = &init_evtchn_mask; | 117 | static struct cpu_evtchn_s __refdata *cpu_evtchn_mask_p = &init_evtchn_mask; |
118 | 118 | ||
119 | static inline unsigned long *cpu_evtchn_mask(int cpu) | 119 | static inline unsigned long *cpu_evtchn_mask(int cpu) |
120 | { | 120 | { |
@@ -277,7 +277,7 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) | |||
277 | 277 | ||
278 | BUG_ON(irq == -1); | 278 | BUG_ON(irq == -1); |
279 | #ifdef CONFIG_SMP | 279 | #ifdef CONFIG_SMP |
280 | cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu)); | 280 | cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu)); |
281 | #endif | 281 | #endif |
282 | 282 | ||
283 | clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq))); | 283 | clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq))); |
@@ -294,7 +294,7 @@ static void init_evtchn_cpu_bindings(void) | |||
294 | 294 | ||
295 | /* By default all event channels notify CPU#0. */ | 295 | /* By default all event channels notify CPU#0. */ |
296 | for_each_irq_desc(i, desc) { | 296 | for_each_irq_desc(i, desc) { |
297 | cpumask_copy(desc->affinity, cpumask_of(0)); | 297 | cpumask_copy(desc->irq_data.affinity, cpumask_of(0)); |
298 | } | 298 | } |
299 | #endif | 299 | #endif |
300 | 300 | ||
@@ -376,81 +376,69 @@ static void unmask_evtchn(int port) | |||
376 | put_cpu(); | 376 | put_cpu(); |
377 | } | 377 | } |
378 | 378 | ||
379 | static int get_nr_hw_irqs(void) | 379 | static int xen_allocate_irq_dynamic(void) |
380 | { | 380 | { |
381 | int ret = 1; | 381 | int first = 0; |
382 | int irq; | ||
382 | 383 | ||
383 | #ifdef CONFIG_X86_IO_APIC | 384 | #ifdef CONFIG_X86_IO_APIC |
384 | ret = get_nr_irqs_gsi(); | 385 | /* |
386 | * For an HVM guest or domain 0 which see "real" (emulated or | ||
387 | * actual repectively) GSIs we allocate dynamic IRQs | ||
388 | * e.g. those corresponding to event channels or MSIs | ||
389 | * etc. from the range above those "real" GSIs to avoid | ||
390 | * collisions. | ||
391 | */ | ||
392 | if (xen_initial_domain() || xen_hvm_domain()) | ||
393 | first = get_nr_irqs_gsi(); | ||
385 | #endif | 394 | #endif |
386 | 395 | ||
387 | return ret; | 396 | retry: |
388 | } | 397 | irq = irq_alloc_desc_from(first, -1); |
389 | 398 | ||
390 | static int find_unbound_pirq(int type) | 399 | if (irq == -ENOMEM && first > NR_IRQS_LEGACY) { |
391 | { | 400 | printk(KERN_ERR "Out of dynamic IRQ space and eating into GSI space. You should increase nr_irqs\n"); |
392 | int rc, i; | 401 | first = max(NR_IRQS_LEGACY, first - NR_IRQS_LEGACY); |
393 | struct physdev_get_free_pirq op_get_free_pirq; | 402 | goto retry; |
394 | op_get_free_pirq.type = type; | 403 | } |
395 | 404 | ||
396 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq); | 405 | if (irq < 0) |
397 | if (!rc) | 406 | panic("No available IRQ to bind to: increase nr_irqs!\n"); |
398 | return op_get_free_pirq.pirq; | ||
399 | 407 | ||
400 | for (i = 0; i < nr_irqs; i++) { | 408 | return irq; |
401 | if (pirq_to_irq[i] < 0) | ||
402 | return i; | ||
403 | } | ||
404 | return -1; | ||
405 | } | 409 | } |
406 | 410 | ||
407 | static int find_unbound_irq(void) | 411 | static int xen_allocate_irq_gsi(unsigned gsi) |
408 | { | 412 | { |
409 | struct irq_data *data; | 413 | int irq; |
410 | int irq, res; | ||
411 | int bottom = get_nr_hw_irqs(); | ||
412 | int top = nr_irqs-1; | ||
413 | |||
414 | if (bottom == nr_irqs) | ||
415 | goto no_irqs; | ||
416 | 414 | ||
417 | /* This loop starts from the top of IRQ space and goes down. | 415 | /* |
418 | * We need this b/c if we have a PCI device in a Xen PV guest | 416 | * A PV guest has no concept of a GSI (since it has no ACPI |
419 | * we do not have an IO-APIC (though the backend might have them) | 417 | * nor access to/knowledge of the physical APICs). Therefore |
420 | * mapped in. To not have a collision of physical IRQs with the Xen | 418 | * all IRQs are dynamically allocated from the entire IRQ |
421 | * event channels start at the top of the IRQ space for virtual IRQs. | 419 | * space. |
422 | */ | 420 | */ |
423 | for (irq = top; irq > bottom; irq--) { | 421 | if (xen_pv_domain() && !xen_initial_domain()) |
424 | data = irq_get_irq_data(irq); | 422 | return xen_allocate_irq_dynamic(); |
425 | /* only 15->0 have init'd desc; handle irq > 16 */ | ||
426 | if (!data) | ||
427 | break; | ||
428 | if (data->chip == &no_irq_chip) | ||
429 | break; | ||
430 | if (data->chip != &xen_dynamic_chip) | ||
431 | continue; | ||
432 | if (irq_info[irq].type == IRQT_UNBOUND) | ||
433 | return irq; | ||
434 | } | ||
435 | |||
436 | if (irq == bottom) | ||
437 | goto no_irqs; | ||
438 | 423 | ||
439 | res = irq_alloc_desc_at(irq, -1); | 424 | /* Legacy IRQ descriptors are already allocated by the arch. */ |
425 | if (gsi < NR_IRQS_LEGACY) | ||
426 | return gsi; | ||
440 | 427 | ||
441 | if (WARN_ON(res != irq)) | 428 | irq = irq_alloc_desc_at(gsi, -1); |
442 | return -1; | 429 | if (irq < 0) |
430 | panic("Unable to allocate to IRQ%d (%d)\n", gsi, irq); | ||
443 | 431 | ||
444 | return irq; | 432 | return irq; |
445 | |||
446 | no_irqs: | ||
447 | panic("No available IRQ to bind to: increase nr_irqs!\n"); | ||
448 | } | 433 | } |
449 | 434 | ||
450 | static bool identity_mapped_irq(unsigned irq) | 435 | static void xen_free_irq(unsigned irq) |
451 | { | 436 | { |
452 | /* identity map all the hardware irqs */ | 437 | /* Legacy IRQ descriptors are managed by the arch. */ |
453 | return irq < get_nr_hw_irqs(); | 438 | if (irq < NR_IRQS_LEGACY) |
439 | return; | ||
440 | |||
441 | irq_free_desc(irq); | ||
454 | } | 442 | } |
455 | 443 | ||
456 | static void pirq_unmask_notify(int irq) | 444 | static void pirq_unmask_notify(int irq) |
@@ -486,7 +474,7 @@ static bool probing_irq(int irq) | |||
486 | return desc && desc->action == NULL; | 474 | return desc && desc->action == NULL; |
487 | } | 475 | } |
488 | 476 | ||
489 | static unsigned int startup_pirq(unsigned int irq) | 477 | static unsigned int __startup_pirq(unsigned int irq) |
490 | { | 478 | { |
491 | struct evtchn_bind_pirq bind_pirq; | 479 | struct evtchn_bind_pirq bind_pirq; |
492 | struct irq_info *info = info_for_irq(irq); | 480 | struct irq_info *info = info_for_irq(irq); |
@@ -524,9 +512,15 @@ out: | |||
524 | return 0; | 512 | return 0; |
525 | } | 513 | } |
526 | 514 | ||
527 | static void shutdown_pirq(unsigned int irq) | 515 | static unsigned int startup_pirq(struct irq_data *data) |
516 | { | ||
517 | return __startup_pirq(data->irq); | ||
518 | } | ||
519 | |||
520 | static void shutdown_pirq(struct irq_data *data) | ||
528 | { | 521 | { |
529 | struct evtchn_close close; | 522 | struct evtchn_close close; |
523 | unsigned int irq = data->irq; | ||
530 | struct irq_info *info = info_for_irq(irq); | 524 | struct irq_info *info = info_for_irq(irq); |
531 | int evtchn = evtchn_from_irq(irq); | 525 | int evtchn = evtchn_from_irq(irq); |
532 | 526 | ||
@@ -546,20 +540,20 @@ static void shutdown_pirq(unsigned int irq) | |||
546 | info->evtchn = 0; | 540 | info->evtchn = 0; |
547 | } | 541 | } |
548 | 542 | ||
549 | static void enable_pirq(unsigned int irq) | 543 | static void enable_pirq(struct irq_data *data) |
550 | { | 544 | { |
551 | startup_pirq(irq); | 545 | startup_pirq(data); |
552 | } | 546 | } |
553 | 547 | ||
554 | static void disable_pirq(unsigned int irq) | 548 | static void disable_pirq(struct irq_data *data) |
555 | { | 549 | { |
556 | } | 550 | } |
557 | 551 | ||
558 | static void ack_pirq(unsigned int irq) | 552 | static void ack_pirq(struct irq_data *data) |
559 | { | 553 | { |
560 | int evtchn = evtchn_from_irq(irq); | 554 | int evtchn = evtchn_from_irq(data->irq); |
561 | 555 | ||
562 | move_native_irq(irq); | 556 | move_native_irq(data->irq); |
563 | 557 | ||
564 | if (VALID_EVTCHN(evtchn)) { | 558 | if (VALID_EVTCHN(evtchn)) { |
565 | mask_evtchn(evtchn); | 559 | mask_evtchn(evtchn); |
@@ -567,23 +561,6 @@ static void ack_pirq(unsigned int irq) | |||
567 | } | 561 | } |
568 | } | 562 | } |
569 | 563 | ||
570 | static void end_pirq(unsigned int irq) | ||
571 | { | ||
572 | int evtchn = evtchn_from_irq(irq); | ||
573 | struct irq_desc *desc = irq_to_desc(irq); | ||
574 | |||
575 | if (WARN_ON(!desc)) | ||
576 | return; | ||
577 | |||
578 | if ((desc->status & (IRQ_DISABLED|IRQ_PENDING)) == | ||
579 | (IRQ_DISABLED|IRQ_PENDING)) { | ||
580 | shutdown_pirq(irq); | ||
581 | } else if (VALID_EVTCHN(evtchn)) { | ||
582 | unmask_evtchn(evtchn); | ||
583 | pirq_unmask_notify(irq); | ||
584 | } | ||
585 | } | ||
586 | |||
587 | static int find_irq_by_gsi(unsigned gsi) | 564 | static int find_irq_by_gsi(unsigned gsi) |
588 | { | 565 | { |
589 | int irq; | 566 | int irq; |
@@ -638,14 +615,7 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name) | |||
638 | goto out; /* XXX need refcount? */ | 615 | goto out; /* XXX need refcount? */ |
639 | } | 616 | } |
640 | 617 | ||
641 | /* If we are a PV guest, we don't have GSIs (no ACPI passed). Therefore | 618 | irq = xen_allocate_irq_gsi(gsi); |
642 | * we are using the !xen_initial_domain() to drop in the function.*/ | ||
643 | if (identity_mapped_irq(gsi) || (!xen_initial_domain() && | ||
644 | xen_pv_domain())) { | ||
645 | irq = gsi; | ||
646 | irq_alloc_desc_at(irq, -1); | ||
647 | } else | ||
648 | irq = find_unbound_irq(); | ||
649 | 619 | ||
650 | set_irq_chip_and_handler_name(irq, &xen_pirq_chip, | 620 | set_irq_chip_and_handler_name(irq, &xen_pirq_chip, |
651 | handle_level_irq, name); | 621 | handle_level_irq, name); |
@@ -658,7 +628,7 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name) | |||
658 | * this in the priv domain. */ | 628 | * this in the priv domain. */ |
659 | if (xen_initial_domain() && | 629 | if (xen_initial_domain() && |
660 | HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) { | 630 | HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) { |
661 | irq_free_desc(irq); | 631 | xen_free_irq(irq); |
662 | irq = -ENOSPC; | 632 | irq = -ENOSPC; |
663 | goto out; | 633 | goto out; |
664 | } | 634 | } |
@@ -674,87 +644,46 @@ out: | |||
674 | } | 644 | } |
675 | 645 | ||
676 | #ifdef CONFIG_PCI_MSI | 646 | #ifdef CONFIG_PCI_MSI |
677 | #include <linux/msi.h> | 647 | int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc) |
678 | #include "../pci/msi.h" | ||
679 | |||
680 | void xen_allocate_pirq_msi(char *name, int *irq, int *pirq, int alloc) | ||
681 | { | 648 | { |
682 | spin_lock(&irq_mapping_update_lock); | 649 | int rc; |
683 | 650 | struct physdev_get_free_pirq op_get_free_pirq; | |
684 | if (alloc & XEN_ALLOC_IRQ) { | ||
685 | *irq = find_unbound_irq(); | ||
686 | if (*irq == -1) | ||
687 | goto out; | ||
688 | } | ||
689 | |||
690 | if (alloc & XEN_ALLOC_PIRQ) { | ||
691 | *pirq = find_unbound_pirq(MAP_PIRQ_TYPE_MSI); | ||
692 | if (*pirq == -1) | ||
693 | goto out; | ||
694 | } | ||
695 | 651 | ||
696 | set_irq_chip_and_handler_name(*irq, &xen_pirq_chip, | 652 | op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI; |
697 | handle_level_irq, name); | 653 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq); |
698 | 654 | ||
699 | irq_info[*irq] = mk_pirq_info(0, *pirq, 0, 0); | 655 | WARN_ONCE(rc == -ENOSYS, |
700 | pirq_to_irq[*pirq] = *irq; | 656 | "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n"); |
701 | 657 | ||
702 | out: | 658 | return rc ? -1 : op_get_free_pirq.pirq; |
703 | spin_unlock(&irq_mapping_update_lock); | ||
704 | } | 659 | } |
705 | 660 | ||
706 | int xen_create_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int type) | 661 | int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, |
662 | int pirq, int vector, const char *name) | ||
707 | { | 663 | { |
708 | int irq = -1; | 664 | int irq, ret; |
709 | struct physdev_map_pirq map_irq; | ||
710 | int rc; | ||
711 | int pos; | ||
712 | u32 table_offset, bir; | ||
713 | |||
714 | memset(&map_irq, 0, sizeof(map_irq)); | ||
715 | map_irq.domid = DOMID_SELF; | ||
716 | map_irq.type = MAP_PIRQ_TYPE_MSI; | ||
717 | map_irq.index = -1; | ||
718 | map_irq.pirq = -1; | ||
719 | map_irq.bus = dev->bus->number; | ||
720 | map_irq.devfn = dev->devfn; | ||
721 | |||
722 | if (type == PCI_CAP_ID_MSIX) { | ||
723 | pos = pci_find_capability(dev, PCI_CAP_ID_MSIX); | ||
724 | |||
725 | pci_read_config_dword(dev, msix_table_offset_reg(pos), | ||
726 | &table_offset); | ||
727 | bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK); | ||
728 | |||
729 | map_irq.table_base = pci_resource_start(dev, bir); | ||
730 | map_irq.entry_nr = msidesc->msi_attrib.entry_nr; | ||
731 | } | ||
732 | 665 | ||
733 | spin_lock(&irq_mapping_update_lock); | 666 | spin_lock(&irq_mapping_update_lock); |
734 | 667 | ||
735 | irq = find_unbound_irq(); | 668 | irq = xen_allocate_irq_dynamic(); |
736 | |||
737 | if (irq == -1) | 669 | if (irq == -1) |
738 | goto out; | 670 | goto out; |
739 | 671 | ||
740 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq); | ||
741 | if (rc) { | ||
742 | printk(KERN_WARNING "xen map irq failed %d\n", rc); | ||
743 | |||
744 | irq_free_desc(irq); | ||
745 | |||
746 | irq = -1; | ||
747 | goto out; | ||
748 | } | ||
749 | irq_info[irq] = mk_pirq_info(0, map_irq.pirq, 0, map_irq.index); | ||
750 | |||
751 | set_irq_chip_and_handler_name(irq, &xen_pirq_chip, | 672 | set_irq_chip_and_handler_name(irq, &xen_pirq_chip, |
752 | handle_level_irq, | 673 | handle_level_irq, name); |
753 | (type == PCI_CAP_ID_MSIX) ? "msi-x":"msi"); | ||
754 | 674 | ||
675 | irq_info[irq] = mk_pirq_info(0, pirq, 0, vector); | ||
676 | pirq_to_irq[pirq] = irq; | ||
677 | ret = set_irq_msi(irq, msidesc); | ||
678 | if (ret < 0) | ||
679 | goto error_irq; | ||
755 | out: | 680 | out: |
756 | spin_unlock(&irq_mapping_update_lock); | 681 | spin_unlock(&irq_mapping_update_lock); |
757 | return irq; | 682 | return irq; |
683 | error_irq: | ||
684 | spin_unlock(&irq_mapping_update_lock); | ||
685 | xen_free_irq(irq); | ||
686 | return -1; | ||
758 | } | 687 | } |
759 | #endif | 688 | #endif |
760 | 689 | ||
@@ -779,11 +708,12 @@ int xen_destroy_irq(int irq) | |||
779 | printk(KERN_WARNING "unmap irq failed %d\n", rc); | 708 | printk(KERN_WARNING "unmap irq failed %d\n", rc); |
780 | goto out; | 709 | goto out; |
781 | } | 710 | } |
782 | pirq_to_irq[info->u.pirq.pirq] = -1; | ||
783 | } | 711 | } |
712 | pirq_to_irq[info->u.pirq.pirq] = -1; | ||
713 | |||
784 | irq_info[irq] = mk_unbound_info(); | 714 | irq_info[irq] = mk_unbound_info(); |
785 | 715 | ||
786 | irq_free_desc(irq); | 716 | xen_free_irq(irq); |
787 | 717 | ||
788 | out: | 718 | out: |
789 | spin_unlock(&irq_mapping_update_lock); | 719 | spin_unlock(&irq_mapping_update_lock); |
@@ -814,7 +744,7 @@ int bind_evtchn_to_irq(unsigned int evtchn) | |||
814 | irq = evtchn_to_irq[evtchn]; | 744 | irq = evtchn_to_irq[evtchn]; |
815 | 745 | ||
816 | if (irq == -1) { | 746 | if (irq == -1) { |
817 | irq = find_unbound_irq(); | 747 | irq = xen_allocate_irq_dynamic(); |
818 | 748 | ||
819 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, | 749 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, |
820 | handle_fasteoi_irq, "event"); | 750 | handle_fasteoi_irq, "event"); |
@@ -839,7 +769,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) | |||
839 | irq = per_cpu(ipi_to_irq, cpu)[ipi]; | 769 | irq = per_cpu(ipi_to_irq, cpu)[ipi]; |
840 | 770 | ||
841 | if (irq == -1) { | 771 | if (irq == -1) { |
842 | irq = find_unbound_irq(); | 772 | irq = xen_allocate_irq_dynamic(); |
843 | if (irq < 0) | 773 | if (irq < 0) |
844 | goto out; | 774 | goto out; |
845 | 775 | ||
@@ -875,7 +805,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu) | |||
875 | irq = per_cpu(virq_to_irq, cpu)[virq]; | 805 | irq = per_cpu(virq_to_irq, cpu)[virq]; |
876 | 806 | ||
877 | if (irq == -1) { | 807 | if (irq == -1) { |
878 | irq = find_unbound_irq(); | 808 | irq = xen_allocate_irq_dynamic(); |
879 | 809 | ||
880 | set_irq_chip_and_handler_name(irq, &xen_percpu_chip, | 810 | set_irq_chip_and_handler_name(irq, &xen_percpu_chip, |
881 | handle_percpu_irq, "virq"); | 811 | handle_percpu_irq, "virq"); |
@@ -934,7 +864,7 @@ static void unbind_from_irq(unsigned int irq) | |||
934 | if (irq_info[irq].type != IRQT_UNBOUND) { | 864 | if (irq_info[irq].type != IRQT_UNBOUND) { |
935 | irq_info[irq] = mk_unbound_info(); | 865 | irq_info[irq] = mk_unbound_info(); |
936 | 866 | ||
937 | irq_free_desc(irq); | 867 | xen_free_irq(irq); |
938 | } | 868 | } |
939 | 869 | ||
940 | spin_unlock(&irq_mapping_update_lock); | 870 | spin_unlock(&irq_mapping_update_lock); |
@@ -990,7 +920,7 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi, | |||
990 | if (irq < 0) | 920 | if (irq < 0) |
991 | return irq; | 921 | return irq; |
992 | 922 | ||
993 | irqflags |= IRQF_NO_SUSPEND; | 923 | irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME; |
994 | retval = request_irq(irq, handler, irqflags, devname, dev_id); | 924 | retval = request_irq(irq, handler, irqflags, devname, dev_id); |
995 | if (retval != 0) { | 925 | if (retval != 0) { |
996 | unbind_from_irq(irq); | 926 | unbind_from_irq(irq); |
@@ -1234,11 +1164,12 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) | |||
1234 | return 0; | 1164 | return 0; |
1235 | } | 1165 | } |
1236 | 1166 | ||
1237 | static int set_affinity_irq(unsigned irq, const struct cpumask *dest) | 1167 | static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest, |
1168 | bool force) | ||
1238 | { | 1169 | { |
1239 | unsigned tcpu = cpumask_first(dest); | 1170 | unsigned tcpu = cpumask_first(dest); |
1240 | 1171 | ||
1241 | return rebind_irq_to_cpu(irq, tcpu); | 1172 | return rebind_irq_to_cpu(data->irq, tcpu); |
1242 | } | 1173 | } |
1243 | 1174 | ||
1244 | int resend_irq_on_evtchn(unsigned int irq) | 1175 | int resend_irq_on_evtchn(unsigned int irq) |
@@ -1257,35 +1188,35 @@ int resend_irq_on_evtchn(unsigned int irq) | |||
1257 | return 1; | 1188 | return 1; |
1258 | } | 1189 | } |
1259 | 1190 | ||
1260 | static void enable_dynirq(unsigned int irq) | 1191 | static void enable_dynirq(struct irq_data *data) |
1261 | { | 1192 | { |
1262 | int evtchn = evtchn_from_irq(irq); | 1193 | int evtchn = evtchn_from_irq(data->irq); |
1263 | 1194 | ||
1264 | if (VALID_EVTCHN(evtchn)) | 1195 | if (VALID_EVTCHN(evtchn)) |
1265 | unmask_evtchn(evtchn); | 1196 | unmask_evtchn(evtchn); |
1266 | } | 1197 | } |
1267 | 1198 | ||
1268 | static void disable_dynirq(unsigned int irq) | 1199 | static void disable_dynirq(struct irq_data *data) |
1269 | { | 1200 | { |
1270 | int evtchn = evtchn_from_irq(irq); | 1201 | int evtchn = evtchn_from_irq(data->irq); |
1271 | 1202 | ||
1272 | if (VALID_EVTCHN(evtchn)) | 1203 | if (VALID_EVTCHN(evtchn)) |
1273 | mask_evtchn(evtchn); | 1204 | mask_evtchn(evtchn); |
1274 | } | 1205 | } |
1275 | 1206 | ||
1276 | static void ack_dynirq(unsigned int irq) | 1207 | static void ack_dynirq(struct irq_data *data) |
1277 | { | 1208 | { |
1278 | int evtchn = evtchn_from_irq(irq); | 1209 | int evtchn = evtchn_from_irq(data->irq); |
1279 | 1210 | ||
1280 | move_masked_irq(irq); | 1211 | move_masked_irq(data->irq); |
1281 | 1212 | ||
1282 | if (VALID_EVTCHN(evtchn)) | 1213 | if (VALID_EVTCHN(evtchn)) |
1283 | unmask_evtchn(evtchn); | 1214 | unmask_evtchn(evtchn); |
1284 | } | 1215 | } |
1285 | 1216 | ||
1286 | static int retrigger_dynirq(unsigned int irq) | 1217 | static int retrigger_dynirq(struct irq_data *data) |
1287 | { | 1218 | { |
1288 | int evtchn = evtchn_from_irq(irq); | 1219 | int evtchn = evtchn_from_irq(data->irq); |
1289 | struct shared_info *sh = HYPERVISOR_shared_info; | 1220 | struct shared_info *sh = HYPERVISOR_shared_info; |
1290 | int ret = 0; | 1221 | int ret = 0; |
1291 | 1222 | ||
@@ -1334,7 +1265,7 @@ static void restore_cpu_pirqs(void) | |||
1334 | 1265 | ||
1335 | printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); | 1266 | printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); |
1336 | 1267 | ||
1337 | startup_pirq(irq); | 1268 | __startup_pirq(irq); |
1338 | } | 1269 | } |
1339 | } | 1270 | } |
1340 | 1271 | ||
@@ -1445,7 +1376,6 @@ void xen_poll_irq(int irq) | |||
1445 | void xen_irq_resume(void) | 1376 | void xen_irq_resume(void) |
1446 | { | 1377 | { |
1447 | unsigned int cpu, irq, evtchn; | 1378 | unsigned int cpu, irq, evtchn; |
1448 | struct irq_desc *desc; | ||
1449 | 1379 | ||
1450 | init_evtchn_cpu_bindings(); | 1380 | init_evtchn_cpu_bindings(); |
1451 | 1381 | ||
@@ -1465,66 +1395,48 @@ void xen_irq_resume(void) | |||
1465 | restore_cpu_ipis(cpu); | 1395 | restore_cpu_ipis(cpu); |
1466 | } | 1396 | } |
1467 | 1397 | ||
1468 | /* | ||
1469 | * Unmask any IRQF_NO_SUSPEND IRQs which are enabled. These | ||
1470 | * are not handled by the IRQ core. | ||
1471 | */ | ||
1472 | for_each_irq_desc(irq, desc) { | ||
1473 | if (!desc->action || !(desc->action->flags & IRQF_NO_SUSPEND)) | ||
1474 | continue; | ||
1475 | if (desc->status & IRQ_DISABLED) | ||
1476 | continue; | ||
1477 | |||
1478 | evtchn = evtchn_from_irq(irq); | ||
1479 | if (evtchn == -1) | ||
1480 | continue; | ||
1481 | |||
1482 | unmask_evtchn(evtchn); | ||
1483 | } | ||
1484 | |||
1485 | restore_cpu_pirqs(); | 1398 | restore_cpu_pirqs(); |
1486 | } | 1399 | } |
1487 | 1400 | ||
1488 | static struct irq_chip xen_dynamic_chip __read_mostly = { | 1401 | static struct irq_chip xen_dynamic_chip __read_mostly = { |
1489 | .name = "xen-dyn", | 1402 | .name = "xen-dyn", |
1490 | 1403 | ||
1491 | .disable = disable_dynirq, | 1404 | .irq_disable = disable_dynirq, |
1492 | .mask = disable_dynirq, | 1405 | .irq_mask = disable_dynirq, |
1493 | .unmask = enable_dynirq, | 1406 | .irq_unmask = enable_dynirq, |
1494 | 1407 | ||
1495 | .eoi = ack_dynirq, | 1408 | .irq_eoi = ack_dynirq, |
1496 | .set_affinity = set_affinity_irq, | 1409 | .irq_set_affinity = set_affinity_irq, |
1497 | .retrigger = retrigger_dynirq, | 1410 | .irq_retrigger = retrigger_dynirq, |
1498 | }; | 1411 | }; |
1499 | 1412 | ||
1500 | static struct irq_chip xen_pirq_chip __read_mostly = { | 1413 | static struct irq_chip xen_pirq_chip __read_mostly = { |
1501 | .name = "xen-pirq", | 1414 | .name = "xen-pirq", |
1502 | 1415 | ||
1503 | .startup = startup_pirq, | 1416 | .irq_startup = startup_pirq, |
1504 | .shutdown = shutdown_pirq, | 1417 | .irq_shutdown = shutdown_pirq, |
1505 | 1418 | ||
1506 | .enable = enable_pirq, | 1419 | .irq_enable = enable_pirq, |
1507 | .unmask = enable_pirq, | 1420 | .irq_unmask = enable_pirq, |
1508 | 1421 | ||
1509 | .disable = disable_pirq, | 1422 | .irq_disable = disable_pirq, |
1510 | .mask = disable_pirq, | 1423 | .irq_mask = disable_pirq, |
1511 | 1424 | ||
1512 | .ack = ack_pirq, | 1425 | .irq_ack = ack_pirq, |
1513 | .end = end_pirq, | ||
1514 | 1426 | ||
1515 | .set_affinity = set_affinity_irq, | 1427 | .irq_set_affinity = set_affinity_irq, |
1516 | 1428 | ||
1517 | .retrigger = retrigger_dynirq, | 1429 | .irq_retrigger = retrigger_dynirq, |
1518 | }; | 1430 | }; |
1519 | 1431 | ||
1520 | static struct irq_chip xen_percpu_chip __read_mostly = { | 1432 | static struct irq_chip xen_percpu_chip __read_mostly = { |
1521 | .name = "xen-percpu", | 1433 | .name = "xen-percpu", |
1522 | 1434 | ||
1523 | .disable = disable_dynirq, | 1435 | .irq_disable = disable_dynirq, |
1524 | .mask = disable_dynirq, | 1436 | .irq_mask = disable_dynirq, |
1525 | .unmask = enable_dynirq, | 1437 | .irq_unmask = enable_dynirq, |
1526 | 1438 | ||
1527 | .ack = ack_dynirq, | 1439 | .irq_ack = ack_dynirq, |
1528 | }; | 1440 | }; |
1529 | 1441 | ||
1530 | int xen_set_callback_via(uint64_t via) | 1442 | int xen_set_callback_via(uint64_t via) |
diff --git a/fs/afs/write.c b/fs/afs/write.c index 15690bb1d3b5..789b3afb3423 100644 --- a/fs/afs/write.c +++ b/fs/afs/write.c | |||
@@ -140,6 +140,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping, | |||
140 | candidate->first = candidate->last = index; | 140 | candidate->first = candidate->last = index; |
141 | candidate->offset_first = from; | 141 | candidate->offset_first = from; |
142 | candidate->to_last = to; | 142 | candidate->to_last = to; |
143 | INIT_LIST_HEAD(&candidate->link); | ||
143 | candidate->usage = 1; | 144 | candidate->usage = 1; |
144 | candidate->state = AFS_WBACK_PENDING; | 145 | candidate->state = AFS_WBACK_PENDING; |
145 | init_waitqueue_head(&candidate->waitq); | 146 | init_waitqueue_head(&candidate->waitq); |
@@ -239,15 +239,23 @@ static void __put_ioctx(struct kioctx *ctx) | |||
239 | call_rcu(&ctx->rcu_head, ctx_rcu_free); | 239 | call_rcu(&ctx->rcu_head, ctx_rcu_free); |
240 | } | 240 | } |
241 | 241 | ||
242 | #define get_ioctx(kioctx) do { \ | 242 | static inline void get_ioctx(struct kioctx *kioctx) |
243 | BUG_ON(atomic_read(&(kioctx)->users) <= 0); \ | 243 | { |
244 | atomic_inc(&(kioctx)->users); \ | 244 | BUG_ON(atomic_read(&kioctx->users) <= 0); |
245 | } while (0) | 245 | atomic_inc(&kioctx->users); |
246 | #define put_ioctx(kioctx) do { \ | 246 | } |
247 | BUG_ON(atomic_read(&(kioctx)->users) <= 0); \ | 247 | |
248 | if (unlikely(atomic_dec_and_test(&(kioctx)->users))) \ | 248 | static inline int try_get_ioctx(struct kioctx *kioctx) |
249 | __put_ioctx(kioctx); \ | 249 | { |
250 | } while (0) | 250 | return atomic_inc_not_zero(&kioctx->users); |
251 | } | ||
252 | |||
253 | static inline void put_ioctx(struct kioctx *kioctx) | ||
254 | { | ||
255 | BUG_ON(atomic_read(&kioctx->users) <= 0); | ||
256 | if (unlikely(atomic_dec_and_test(&kioctx->users))) | ||
257 | __put_ioctx(kioctx); | ||
258 | } | ||
251 | 259 | ||
252 | /* ioctx_alloc | 260 | /* ioctx_alloc |
253 | * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. | 261 | * Allocates and initializes an ioctx. Returns an ERR_PTR if it failed. |
@@ -601,8 +609,13 @@ static struct kioctx *lookup_ioctx(unsigned long ctx_id) | |||
601 | rcu_read_lock(); | 609 | rcu_read_lock(); |
602 | 610 | ||
603 | hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) { | 611 | hlist_for_each_entry_rcu(ctx, n, &mm->ioctx_list, list) { |
604 | if (ctx->user_id == ctx_id && !ctx->dead) { | 612 | /* |
605 | get_ioctx(ctx); | 613 | * RCU protects us against accessing freed memory but |
614 | * we have to be careful not to get a reference when the | ||
615 | * reference count already dropped to 0 (ctx->dead test | ||
616 | * is unreliable because of races). | ||
617 | */ | ||
618 | if (ctx->user_id == ctx_id && !ctx->dead && try_get_ioctx(ctx)){ | ||
606 | ret = ctx; | 619 | ret = ctx; |
607 | break; | 620 | break; |
608 | } | 621 | } |
@@ -1629,6 +1642,23 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | |||
1629 | goto out_put_req; | 1642 | goto out_put_req; |
1630 | 1643 | ||
1631 | spin_lock_irq(&ctx->ctx_lock); | 1644 | spin_lock_irq(&ctx->ctx_lock); |
1645 | /* | ||
1646 | * We could have raced with io_destroy() and are currently holding a | ||
1647 | * reference to ctx which should be destroyed. We cannot submit IO | ||
1648 | * since ctx gets freed as soon as io_submit() puts its reference. The | ||
1649 | * check here is reliable: io_destroy() sets ctx->dead before waiting | ||
1650 | * for outstanding IO and the barrier between these two is realized by | ||
1651 | * unlock of mm->ioctx_lock and lock of ctx->ctx_lock. Analogously we | ||
1652 | * increment ctx->reqs_active before checking for ctx->dead and the | ||
1653 | * barrier is realized by unlock and lock of ctx->ctx_lock. Thus if we | ||
1654 | * don't see ctx->dead set here, io_destroy() waits for our IO to | ||
1655 | * finish. | ||
1656 | */ | ||
1657 | if (ctx->dead) { | ||
1658 | spin_unlock_irq(&ctx->ctx_lock); | ||
1659 | ret = -EINVAL; | ||
1660 | goto out_put_req; | ||
1661 | } | ||
1632 | aio_run_iocb(req); | 1662 | aio_run_iocb(req); |
1633 | if (!list_empty(&ctx->run_list)) { | 1663 | if (!list_empty(&ctx->run_list)) { |
1634 | /* drain the run list */ | 1664 | /* drain the run list */ |
diff --git a/fs/block_dev.c b/fs/block_dev.c index 4fb8a3431531..889287019599 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -873,6 +873,11 @@ int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk) | |||
873 | ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj); | 873 | ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj); |
874 | if (ret) | 874 | if (ret) |
875 | goto out_del; | 875 | goto out_del; |
876 | /* | ||
877 | * bdev could be deleted beneath us which would implicitly destroy | ||
878 | * the holder directory. Hold on to it. | ||
879 | */ | ||
880 | kobject_get(bdev->bd_part->holder_dir); | ||
876 | 881 | ||
877 | list_add(&holder->list, &bdev->bd_holder_disks); | 882 | list_add(&holder->list, &bdev->bd_holder_disks); |
878 | goto out_unlock; | 883 | goto out_unlock; |
@@ -909,6 +914,7 @@ void bd_unlink_disk_holder(struct block_device *bdev, struct gendisk *disk) | |||
909 | del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); | 914 | del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); |
910 | del_symlink(bdev->bd_part->holder_dir, | 915 | del_symlink(bdev->bd_part->holder_dir, |
911 | &disk_to_dev(disk)->kobj); | 916 | &disk_to_dev(disk)->kobj); |
917 | kobject_put(bdev->bd_part->holder_dir); | ||
912 | list_del_init(&holder->list); | 918 | list_del_init(&holder->list); |
913 | kfree(holder); | 919 | kfree(holder); |
914 | } | 920 | } |
@@ -922,14 +928,15 @@ EXPORT_SYMBOL_GPL(bd_unlink_disk_holder); | |||
922 | * flush_disk - invalidates all buffer-cache entries on a disk | 928 | * flush_disk - invalidates all buffer-cache entries on a disk |
923 | * | 929 | * |
924 | * @bdev: struct block device to be flushed | 930 | * @bdev: struct block device to be flushed |
931 | * @kill_dirty: flag to guide handling of dirty inodes | ||
925 | * | 932 | * |
926 | * Invalidates all buffer-cache entries on a disk. It should be called | 933 | * Invalidates all buffer-cache entries on a disk. It should be called |
927 | * when a disk has been changed -- either by a media change or online | 934 | * when a disk has been changed -- either by a media change or online |
928 | * resize. | 935 | * resize. |
929 | */ | 936 | */ |
930 | static void flush_disk(struct block_device *bdev) | 937 | static void flush_disk(struct block_device *bdev, bool kill_dirty) |
931 | { | 938 | { |
932 | if (__invalidate_device(bdev)) { | 939 | if (__invalidate_device(bdev, kill_dirty)) { |
933 | char name[BDEVNAME_SIZE] = ""; | 940 | char name[BDEVNAME_SIZE] = ""; |
934 | 941 | ||
935 | if (bdev->bd_disk) | 942 | if (bdev->bd_disk) |
@@ -966,7 +973,7 @@ void check_disk_size_change(struct gendisk *disk, struct block_device *bdev) | |||
966 | "%s: detected capacity change from %lld to %lld\n", | 973 | "%s: detected capacity change from %lld to %lld\n", |
967 | name, bdev_size, disk_size); | 974 | name, bdev_size, disk_size); |
968 | i_size_write(bdev->bd_inode, disk_size); | 975 | i_size_write(bdev->bd_inode, disk_size); |
969 | flush_disk(bdev); | 976 | flush_disk(bdev, false); |
970 | } | 977 | } |
971 | } | 978 | } |
972 | EXPORT_SYMBOL(check_disk_size_change); | 979 | EXPORT_SYMBOL(check_disk_size_change); |
@@ -1019,7 +1026,7 @@ int check_disk_change(struct block_device *bdev) | |||
1019 | if (!(events & DISK_EVENT_MEDIA_CHANGE)) | 1026 | if (!(events & DISK_EVENT_MEDIA_CHANGE)) |
1020 | return 0; | 1027 | return 0; |
1021 | 1028 | ||
1022 | flush_disk(bdev); | 1029 | flush_disk(bdev, true); |
1023 | if (bdops->revalidate_disk) | 1030 | if (bdops->revalidate_disk) |
1024 | bdops->revalidate_disk(bdev->bd_disk); | 1031 | bdops->revalidate_disk(bdev->bd_disk); |
1025 | return 1; | 1032 | return 1; |
@@ -1600,7 +1607,7 @@ fail: | |||
1600 | } | 1607 | } |
1601 | EXPORT_SYMBOL(lookup_bdev); | 1608 | EXPORT_SYMBOL(lookup_bdev); |
1602 | 1609 | ||
1603 | int __invalidate_device(struct block_device *bdev) | 1610 | int __invalidate_device(struct block_device *bdev, bool kill_dirty) |
1604 | { | 1611 | { |
1605 | struct super_block *sb = get_super(bdev); | 1612 | struct super_block *sb = get_super(bdev); |
1606 | int res = 0; | 1613 | int res = 0; |
@@ -1613,7 +1620,7 @@ int __invalidate_device(struct block_device *bdev) | |||
1613 | * hold). | 1620 | * hold). |
1614 | */ | 1621 | */ |
1615 | shrink_dcache_sb(sb); | 1622 | shrink_dcache_sb(sb); |
1616 | res = invalidate_inodes(sb); | 1623 | res = invalidate_inodes(sb, kill_dirty); |
1617 | drop_super(sb); | 1624 | drop_super(sb); |
1618 | } | 1625 | } |
1619 | invalidate_bdev(bdev); | 1626 | invalidate_bdev(bdev); |
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 2c98b3af6052..7f78cc78fdd0 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -729,6 +729,15 @@ struct btrfs_space_info { | |||
729 | u64 disk_total; /* total bytes on disk, takes mirrors into | 729 | u64 disk_total; /* total bytes on disk, takes mirrors into |
730 | account */ | 730 | account */ |
731 | 731 | ||
732 | /* | ||
733 | * we bump reservation progress every time we decrement | ||
734 | * bytes_reserved. This way people waiting for reservations | ||
735 | * know something good has happened and they can check | ||
736 | * for progress. The number here isn't to be trusted, it | ||
737 | * just shows reclaim activity | ||
738 | */ | ||
739 | unsigned long reservation_progress; | ||
740 | |||
732 | int full; /* indicates that we cannot allocate any more | 741 | int full; /* indicates that we cannot allocate any more |
733 | chunks for this space */ | 742 | chunks for this space */ |
734 | int force_alloc; /* set if we need to force a chunk alloc for | 743 | int force_alloc; /* set if we need to force a chunk alloc for |
@@ -1254,6 +1263,7 @@ struct btrfs_root { | |||
1254 | #define BTRFS_MOUNT_SPACE_CACHE (1 << 12) | 1263 | #define BTRFS_MOUNT_SPACE_CACHE (1 << 12) |
1255 | #define BTRFS_MOUNT_CLEAR_CACHE (1 << 13) | 1264 | #define BTRFS_MOUNT_CLEAR_CACHE (1 << 13) |
1256 | #define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14) | 1265 | #define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14) |
1266 | #define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15) | ||
1257 | 1267 | ||
1258 | #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) | 1268 | #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) |
1259 | #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) | 1269 | #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) |
@@ -2218,6 +2228,8 @@ int btrfs_error_unpin_extent_range(struct btrfs_root *root, | |||
2218 | u64 start, u64 end); | 2228 | u64 start, u64 end); |
2219 | int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr, | 2229 | int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr, |
2220 | u64 num_bytes); | 2230 | u64 num_bytes); |
2231 | int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, | ||
2232 | struct btrfs_root *root, u64 type); | ||
2221 | 2233 | ||
2222 | /* ctree.c */ | 2234 | /* ctree.c */ |
2223 | int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, | 2235 | int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index f3c96fc01439..7b3089b5c2df 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -3342,15 +3342,16 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, | |||
3342 | u64 max_reclaim; | 3342 | u64 max_reclaim; |
3343 | u64 reclaimed = 0; | 3343 | u64 reclaimed = 0; |
3344 | long time_left; | 3344 | long time_left; |
3345 | int pause = 1; | ||
3346 | int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT; | 3345 | int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT; |
3347 | int loops = 0; | 3346 | int loops = 0; |
3347 | unsigned long progress; | ||
3348 | 3348 | ||
3349 | block_rsv = &root->fs_info->delalloc_block_rsv; | 3349 | block_rsv = &root->fs_info->delalloc_block_rsv; |
3350 | space_info = block_rsv->space_info; | 3350 | space_info = block_rsv->space_info; |
3351 | 3351 | ||
3352 | smp_mb(); | 3352 | smp_mb(); |
3353 | reserved = space_info->bytes_reserved; | 3353 | reserved = space_info->bytes_reserved; |
3354 | progress = space_info->reservation_progress; | ||
3354 | 3355 | ||
3355 | if (reserved == 0) | 3356 | if (reserved == 0) |
3356 | return 0; | 3357 | return 0; |
@@ -3365,31 +3366,36 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, | |||
3365 | writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages); | 3366 | writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages); |
3366 | 3367 | ||
3367 | spin_lock(&space_info->lock); | 3368 | spin_lock(&space_info->lock); |
3368 | if (reserved > space_info->bytes_reserved) { | 3369 | if (reserved > space_info->bytes_reserved) |
3369 | loops = 0; | ||
3370 | reclaimed += reserved - space_info->bytes_reserved; | 3370 | reclaimed += reserved - space_info->bytes_reserved; |
3371 | } else { | ||
3372 | loops++; | ||
3373 | } | ||
3374 | reserved = space_info->bytes_reserved; | 3371 | reserved = space_info->bytes_reserved; |
3375 | spin_unlock(&space_info->lock); | 3372 | spin_unlock(&space_info->lock); |
3376 | 3373 | ||
3374 | loops++; | ||
3375 | |||
3377 | if (reserved == 0 || reclaimed >= max_reclaim) | 3376 | if (reserved == 0 || reclaimed >= max_reclaim) |
3378 | break; | 3377 | break; |
3379 | 3378 | ||
3380 | if (trans && trans->transaction->blocked) | 3379 | if (trans && trans->transaction->blocked) |
3381 | return -EAGAIN; | 3380 | return -EAGAIN; |
3382 | 3381 | ||
3383 | __set_current_state(TASK_INTERRUPTIBLE); | 3382 | time_left = schedule_timeout_interruptible(1); |
3384 | time_left = schedule_timeout(pause); | ||
3385 | 3383 | ||
3386 | /* We were interrupted, exit */ | 3384 | /* We were interrupted, exit */ |
3387 | if (time_left) | 3385 | if (time_left) |
3388 | break; | 3386 | break; |
3389 | 3387 | ||
3390 | pause <<= 1; | 3388 | /* we've kicked the IO a few times, if anything has been freed, |
3391 | if (pause > HZ / 10) | 3389 | * exit. There is no sense in looping here for a long time |
3392 | pause = HZ / 10; | 3390 | * when we really need to commit the transaction, or there are |
3391 | * just too many writers without enough free space | ||
3392 | */ | ||
3393 | |||
3394 | if (loops > 3) { | ||
3395 | smp_mb(); | ||
3396 | if (progress != space_info->reservation_progress) | ||
3397 | break; | ||
3398 | } | ||
3393 | 3399 | ||
3394 | } | 3400 | } |
3395 | return reclaimed >= to_reclaim; | 3401 | return reclaimed >= to_reclaim; |
@@ -3612,6 +3618,7 @@ void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv, | |||
3612 | if (num_bytes) { | 3618 | if (num_bytes) { |
3613 | spin_lock(&space_info->lock); | 3619 | spin_lock(&space_info->lock); |
3614 | space_info->bytes_reserved -= num_bytes; | 3620 | space_info->bytes_reserved -= num_bytes; |
3621 | space_info->reservation_progress++; | ||
3615 | spin_unlock(&space_info->lock); | 3622 | spin_unlock(&space_info->lock); |
3616 | } | 3623 | } |
3617 | } | 3624 | } |
@@ -3844,6 +3851,7 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info) | |||
3844 | if (block_rsv->reserved >= block_rsv->size) { | 3851 | if (block_rsv->reserved >= block_rsv->size) { |
3845 | num_bytes = block_rsv->reserved - block_rsv->size; | 3852 | num_bytes = block_rsv->reserved - block_rsv->size; |
3846 | sinfo->bytes_reserved -= num_bytes; | 3853 | sinfo->bytes_reserved -= num_bytes; |
3854 | sinfo->reservation_progress++; | ||
3847 | block_rsv->reserved = block_rsv->size; | 3855 | block_rsv->reserved = block_rsv->size; |
3848 | block_rsv->full = 1; | 3856 | block_rsv->full = 1; |
3849 | } | 3857 | } |
@@ -4005,7 +4013,6 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) | |||
4005 | to_reserve = 0; | 4013 | to_reserve = 0; |
4006 | } | 4014 | } |
4007 | spin_unlock(&BTRFS_I(inode)->accounting_lock); | 4015 | spin_unlock(&BTRFS_I(inode)->accounting_lock); |
4008 | |||
4009 | to_reserve += calc_csum_metadata_size(inode, num_bytes); | 4016 | to_reserve += calc_csum_metadata_size(inode, num_bytes); |
4010 | ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1); | 4017 | ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1); |
4011 | if (ret) | 4018 | if (ret) |
@@ -4133,6 +4140,7 @@ static int update_block_group(struct btrfs_trans_handle *trans, | |||
4133 | btrfs_set_block_group_used(&cache->item, old_val); | 4140 | btrfs_set_block_group_used(&cache->item, old_val); |
4134 | cache->reserved -= num_bytes; | 4141 | cache->reserved -= num_bytes; |
4135 | cache->space_info->bytes_reserved -= num_bytes; | 4142 | cache->space_info->bytes_reserved -= num_bytes; |
4143 | cache->space_info->reservation_progress++; | ||
4136 | cache->space_info->bytes_used += num_bytes; | 4144 | cache->space_info->bytes_used += num_bytes; |
4137 | cache->space_info->disk_used += num_bytes * factor; | 4145 | cache->space_info->disk_used += num_bytes * factor; |
4138 | spin_unlock(&cache->lock); | 4146 | spin_unlock(&cache->lock); |
@@ -4184,6 +4192,7 @@ static int pin_down_extent(struct btrfs_root *root, | |||
4184 | if (reserved) { | 4192 | if (reserved) { |
4185 | cache->reserved -= num_bytes; | 4193 | cache->reserved -= num_bytes; |
4186 | cache->space_info->bytes_reserved -= num_bytes; | 4194 | cache->space_info->bytes_reserved -= num_bytes; |
4195 | cache->space_info->reservation_progress++; | ||
4187 | } | 4196 | } |
4188 | spin_unlock(&cache->lock); | 4197 | spin_unlock(&cache->lock); |
4189 | spin_unlock(&cache->space_info->lock); | 4198 | spin_unlock(&cache->space_info->lock); |
@@ -4234,6 +4243,7 @@ static int update_reserved_bytes(struct btrfs_block_group_cache *cache, | |||
4234 | space_info->bytes_readonly += num_bytes; | 4243 | space_info->bytes_readonly += num_bytes; |
4235 | cache->reserved -= num_bytes; | 4244 | cache->reserved -= num_bytes; |
4236 | space_info->bytes_reserved -= num_bytes; | 4245 | space_info->bytes_reserved -= num_bytes; |
4246 | space_info->reservation_progress++; | ||
4237 | } | 4247 | } |
4238 | spin_unlock(&cache->lock); | 4248 | spin_unlock(&cache->lock); |
4239 | spin_unlock(&space_info->lock); | 4249 | spin_unlock(&space_info->lock); |
@@ -4712,6 +4722,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans, | |||
4712 | if (ret) { | 4722 | if (ret) { |
4713 | spin_lock(&cache->space_info->lock); | 4723 | spin_lock(&cache->space_info->lock); |
4714 | cache->space_info->bytes_reserved -= buf->len; | 4724 | cache->space_info->bytes_reserved -= buf->len; |
4725 | cache->space_info->reservation_progress++; | ||
4715 | spin_unlock(&cache->space_info->lock); | 4726 | spin_unlock(&cache->space_info->lock); |
4716 | } | 4727 | } |
4717 | goto out; | 4728 | goto out; |
@@ -5376,7 +5387,7 @@ again: | |||
5376 | num_bytes, data, 1); | 5387 | num_bytes, data, 1); |
5377 | goto again; | 5388 | goto again; |
5378 | } | 5389 | } |
5379 | if (ret == -ENOSPC) { | 5390 | if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) { |
5380 | struct btrfs_space_info *sinfo; | 5391 | struct btrfs_space_info *sinfo; |
5381 | 5392 | ||
5382 | sinfo = __find_space_info(root->fs_info, data); | 5393 | sinfo = __find_space_info(root->fs_info, data); |
@@ -8065,6 +8076,13 @@ out: | |||
8065 | return ret; | 8076 | return ret; |
8066 | } | 8077 | } |
8067 | 8078 | ||
8079 | int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans, | ||
8080 | struct btrfs_root *root, u64 type) | ||
8081 | { | ||
8082 | u64 alloc_flags = get_alloc_profile(root, type); | ||
8083 | return do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1); | ||
8084 | } | ||
8085 | |||
8068 | /* | 8086 | /* |
8069 | * helper to account the unused space of all the readonly block group in the | 8087 | * helper to account the unused space of all the readonly block group in the |
8070 | * list. takes mirrors into account. | 8088 | * list. takes mirrors into account. |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 92ac5192c518..714adc4ac4c2 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -1433,12 +1433,13 @@ int extent_clear_unlock_delalloc(struct inode *inode, | |||
1433 | */ | 1433 | */ |
1434 | u64 count_range_bits(struct extent_io_tree *tree, | 1434 | u64 count_range_bits(struct extent_io_tree *tree, |
1435 | u64 *start, u64 search_end, u64 max_bytes, | 1435 | u64 *start, u64 search_end, u64 max_bytes, |
1436 | unsigned long bits) | 1436 | unsigned long bits, int contig) |
1437 | { | 1437 | { |
1438 | struct rb_node *node; | 1438 | struct rb_node *node; |
1439 | struct extent_state *state; | 1439 | struct extent_state *state; |
1440 | u64 cur_start = *start; | 1440 | u64 cur_start = *start; |
1441 | u64 total_bytes = 0; | 1441 | u64 total_bytes = 0; |
1442 | u64 last = 0; | ||
1442 | int found = 0; | 1443 | int found = 0; |
1443 | 1444 | ||
1444 | if (search_end <= cur_start) { | 1445 | if (search_end <= cur_start) { |
@@ -1463,7 +1464,9 @@ u64 count_range_bits(struct extent_io_tree *tree, | |||
1463 | state = rb_entry(node, struct extent_state, rb_node); | 1464 | state = rb_entry(node, struct extent_state, rb_node); |
1464 | if (state->start > search_end) | 1465 | if (state->start > search_end) |
1465 | break; | 1466 | break; |
1466 | if (state->end >= cur_start && (state->state & bits)) { | 1467 | if (contig && found && state->start > last + 1) |
1468 | break; | ||
1469 | if (state->end >= cur_start && (state->state & bits) == bits) { | ||
1467 | total_bytes += min(search_end, state->end) + 1 - | 1470 | total_bytes += min(search_end, state->end) + 1 - |
1468 | max(cur_start, state->start); | 1471 | max(cur_start, state->start); |
1469 | if (total_bytes >= max_bytes) | 1472 | if (total_bytes >= max_bytes) |
@@ -1472,6 +1475,9 @@ u64 count_range_bits(struct extent_io_tree *tree, | |||
1472 | *start = state->start; | 1475 | *start = state->start; |
1473 | found = 1; | 1476 | found = 1; |
1474 | } | 1477 | } |
1478 | last = state->end; | ||
1479 | } else if (contig && found) { | ||
1480 | break; | ||
1475 | } | 1481 | } |
1476 | node = rb_next(node); | 1482 | node = rb_next(node); |
1477 | if (!node) | 1483 | if (!node) |
@@ -2912,6 +2918,46 @@ out: | |||
2912 | return sector; | 2918 | return sector; |
2913 | } | 2919 | } |
2914 | 2920 | ||
2921 | /* | ||
2922 | * helper function for fiemap, which doesn't want to see any holes. | ||
2923 | * This maps until we find something past 'last' | ||
2924 | */ | ||
2925 | static struct extent_map *get_extent_skip_holes(struct inode *inode, | ||
2926 | u64 offset, | ||
2927 | u64 last, | ||
2928 | get_extent_t *get_extent) | ||
2929 | { | ||
2930 | u64 sectorsize = BTRFS_I(inode)->root->sectorsize; | ||
2931 | struct extent_map *em; | ||
2932 | u64 len; | ||
2933 | |||
2934 | if (offset >= last) | ||
2935 | return NULL; | ||
2936 | |||
2937 | while(1) { | ||
2938 | len = last - offset; | ||
2939 | if (len == 0) | ||
2940 | break; | ||
2941 | len = (len + sectorsize - 1) & ~(sectorsize - 1); | ||
2942 | em = get_extent(inode, NULL, 0, offset, len, 0); | ||
2943 | if (!em || IS_ERR(em)) | ||
2944 | return em; | ||
2945 | |||
2946 | /* if this isn't a hole return it */ | ||
2947 | if (!test_bit(EXTENT_FLAG_VACANCY, &em->flags) && | ||
2948 | em->block_start != EXTENT_MAP_HOLE) { | ||
2949 | return em; | ||
2950 | } | ||
2951 | |||
2952 | /* this is a hole, advance to the next extent */ | ||
2953 | offset = extent_map_end(em); | ||
2954 | free_extent_map(em); | ||
2955 | if (offset >= last) | ||
2956 | break; | ||
2957 | } | ||
2958 | return NULL; | ||
2959 | } | ||
2960 | |||
2915 | int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | 2961 | int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, |
2916 | __u64 start, __u64 len, get_extent_t *get_extent) | 2962 | __u64 start, __u64 len, get_extent_t *get_extent) |
2917 | { | 2963 | { |
@@ -2921,16 +2967,19 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
2921 | u32 flags = 0; | 2967 | u32 flags = 0; |
2922 | u32 found_type; | 2968 | u32 found_type; |
2923 | u64 last; | 2969 | u64 last; |
2970 | u64 last_for_get_extent = 0; | ||
2924 | u64 disko = 0; | 2971 | u64 disko = 0; |
2972 | u64 isize = i_size_read(inode); | ||
2925 | struct btrfs_key found_key; | 2973 | struct btrfs_key found_key; |
2926 | struct extent_map *em = NULL; | 2974 | struct extent_map *em = NULL; |
2927 | struct extent_state *cached_state = NULL; | 2975 | struct extent_state *cached_state = NULL; |
2928 | struct btrfs_path *path; | 2976 | struct btrfs_path *path; |
2929 | struct btrfs_file_extent_item *item; | 2977 | struct btrfs_file_extent_item *item; |
2930 | int end = 0; | 2978 | int end = 0; |
2931 | u64 em_start = 0, em_len = 0; | 2979 | u64 em_start = 0; |
2980 | u64 em_len = 0; | ||
2981 | u64 em_end = 0; | ||
2932 | unsigned long emflags; | 2982 | unsigned long emflags; |
2933 | int hole = 0; | ||
2934 | 2983 | ||
2935 | if (len == 0) | 2984 | if (len == 0) |
2936 | return -EINVAL; | 2985 | return -EINVAL; |
@@ -2940,6 +2989,10 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
2940 | return -ENOMEM; | 2989 | return -ENOMEM; |
2941 | path->leave_spinning = 1; | 2990 | path->leave_spinning = 1; |
2942 | 2991 | ||
2992 | /* | ||
2993 | * lookup the last file extent. We're not using i_size here | ||
2994 | * because there might be preallocation past i_size | ||
2995 | */ | ||
2943 | ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root, | 2996 | ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root, |
2944 | path, inode->i_ino, -1, 0); | 2997 | path, inode->i_ino, -1, 0); |
2945 | if (ret < 0) { | 2998 | if (ret < 0) { |
@@ -2953,18 +3006,38 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
2953 | btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); | 3006 | btrfs_item_key_to_cpu(path->nodes[0], &found_key, path->slots[0]); |
2954 | found_type = btrfs_key_type(&found_key); | 3007 | found_type = btrfs_key_type(&found_key); |
2955 | 3008 | ||
2956 | /* No extents, just return */ | 3009 | /* No extents, but there might be delalloc bits */ |
2957 | if (found_key.objectid != inode->i_ino || | 3010 | if (found_key.objectid != inode->i_ino || |
2958 | found_type != BTRFS_EXTENT_DATA_KEY) { | 3011 | found_type != BTRFS_EXTENT_DATA_KEY) { |
2959 | btrfs_free_path(path); | 3012 | /* have to trust i_size as the end */ |
2960 | return 0; | 3013 | last = (u64)-1; |
3014 | last_for_get_extent = isize; | ||
3015 | } else { | ||
3016 | /* | ||
3017 | * remember the start of the last extent. There are a | ||
3018 | * bunch of different factors that go into the length of the | ||
3019 | * extent, so its much less complex to remember where it started | ||
3020 | */ | ||
3021 | last = found_key.offset; | ||
3022 | last_for_get_extent = last + 1; | ||
2961 | } | 3023 | } |
2962 | last = found_key.offset; | ||
2963 | btrfs_free_path(path); | 3024 | btrfs_free_path(path); |
2964 | 3025 | ||
3026 | /* | ||
3027 | * we might have some extents allocated but more delalloc past those | ||
3028 | * extents. so, we trust isize unless the start of the last extent is | ||
3029 | * beyond isize | ||
3030 | */ | ||
3031 | if (last < isize) { | ||
3032 | last = (u64)-1; | ||
3033 | last_for_get_extent = isize; | ||
3034 | } | ||
3035 | |||
2965 | lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, | 3036 | lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + len, 0, |
2966 | &cached_state, GFP_NOFS); | 3037 | &cached_state, GFP_NOFS); |
2967 | em = get_extent(inode, NULL, 0, off, max - off, 0); | 3038 | |
3039 | em = get_extent_skip_holes(inode, off, last_for_get_extent, | ||
3040 | get_extent); | ||
2968 | if (!em) | 3041 | if (!em) |
2969 | goto out; | 3042 | goto out; |
2970 | if (IS_ERR(em)) { | 3043 | if (IS_ERR(em)) { |
@@ -2973,22 +3046,38 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
2973 | } | 3046 | } |
2974 | 3047 | ||
2975 | while (!end) { | 3048 | while (!end) { |
2976 | hole = 0; | 3049 | u64 offset_in_extent; |
2977 | off = em->start + em->len; | ||
2978 | if (off >= max) | ||
2979 | end = 1; | ||
2980 | 3050 | ||
2981 | if (em->block_start == EXTENT_MAP_HOLE) { | 3051 | /* break if the extent we found is outside the range */ |
2982 | hole = 1; | 3052 | if (em->start >= max || extent_map_end(em) < off) |
2983 | goto next; | 3053 | break; |
2984 | } | ||
2985 | 3054 | ||
2986 | em_start = em->start; | 3055 | /* |
2987 | em_len = em->len; | 3056 | * get_extent may return an extent that starts before our |
3057 | * requested range. We have to make sure the ranges | ||
3058 | * we return to fiemap always move forward and don't | ||
3059 | * overlap, so adjust the offsets here | ||
3060 | */ | ||
3061 | em_start = max(em->start, off); | ||
2988 | 3062 | ||
3063 | /* | ||
3064 | * record the offset from the start of the extent | ||
3065 | * for adjusting the disk offset below | ||
3066 | */ | ||
3067 | offset_in_extent = em_start - em->start; | ||
3068 | em_end = extent_map_end(em); | ||
3069 | em_len = em_end - em_start; | ||
3070 | emflags = em->flags; | ||
2989 | disko = 0; | 3071 | disko = 0; |
2990 | flags = 0; | 3072 | flags = 0; |
2991 | 3073 | ||
3074 | /* | ||
3075 | * bump off for our next call to get_extent | ||
3076 | */ | ||
3077 | off = extent_map_end(em); | ||
3078 | if (off >= max) | ||
3079 | end = 1; | ||
3080 | |||
2992 | if (em->block_start == EXTENT_MAP_LAST_BYTE) { | 3081 | if (em->block_start == EXTENT_MAP_LAST_BYTE) { |
2993 | end = 1; | 3082 | end = 1; |
2994 | flags |= FIEMAP_EXTENT_LAST; | 3083 | flags |= FIEMAP_EXTENT_LAST; |
@@ -2999,42 +3088,34 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
2999 | flags |= (FIEMAP_EXTENT_DELALLOC | | 3088 | flags |= (FIEMAP_EXTENT_DELALLOC | |
3000 | FIEMAP_EXTENT_UNKNOWN); | 3089 | FIEMAP_EXTENT_UNKNOWN); |
3001 | } else { | 3090 | } else { |
3002 | disko = em->block_start; | 3091 | disko = em->block_start + offset_in_extent; |
3003 | } | 3092 | } |
3004 | if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) | 3093 | if (test_bit(EXTENT_FLAG_COMPRESSED, &em->flags)) |
3005 | flags |= FIEMAP_EXTENT_ENCODED; | 3094 | flags |= FIEMAP_EXTENT_ENCODED; |
3006 | 3095 | ||
3007 | next: | ||
3008 | emflags = em->flags; | ||
3009 | free_extent_map(em); | 3096 | free_extent_map(em); |
3010 | em = NULL; | 3097 | em = NULL; |
3011 | if (!end) { | 3098 | if ((em_start >= last) || em_len == (u64)-1 || |
3012 | em = get_extent(inode, NULL, 0, off, max - off, 0); | 3099 | (last == (u64)-1 && isize <= em_end)) { |
3013 | if (!em) | ||
3014 | goto out; | ||
3015 | if (IS_ERR(em)) { | ||
3016 | ret = PTR_ERR(em); | ||
3017 | goto out; | ||
3018 | } | ||
3019 | emflags = em->flags; | ||
3020 | } | ||
3021 | |||
3022 | if (test_bit(EXTENT_FLAG_VACANCY, &emflags)) { | ||
3023 | flags |= FIEMAP_EXTENT_LAST; | 3100 | flags |= FIEMAP_EXTENT_LAST; |
3024 | end = 1; | 3101 | end = 1; |
3025 | } | 3102 | } |
3026 | 3103 | ||
3027 | if (em_start == last) { | 3104 | /* now scan forward to see if this is really the last extent. */ |
3105 | em = get_extent_skip_holes(inode, off, last_for_get_extent, | ||
3106 | get_extent); | ||
3107 | if (IS_ERR(em)) { | ||
3108 | ret = PTR_ERR(em); | ||
3109 | goto out; | ||
3110 | } | ||
3111 | if (!em) { | ||
3028 | flags |= FIEMAP_EXTENT_LAST; | 3112 | flags |= FIEMAP_EXTENT_LAST; |
3029 | end = 1; | 3113 | end = 1; |
3030 | } | 3114 | } |
3031 | 3115 | ret = fiemap_fill_next_extent(fieinfo, em_start, disko, | |
3032 | if (!hole) { | 3116 | em_len, flags); |
3033 | ret = fiemap_fill_next_extent(fieinfo, em_start, disko, | 3117 | if (ret) |
3034 | em_len, flags); | 3118 | goto out_free; |
3035 | if (ret) | ||
3036 | goto out_free; | ||
3037 | } | ||
3038 | } | 3119 | } |
3039 | out_free: | 3120 | out_free: |
3040 | free_extent_map(em); | 3121 | free_extent_map(em); |
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 7083cfafd061..9318dfefd59c 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h | |||
@@ -191,7 +191,7 @@ void extent_io_exit(void); | |||
191 | 191 | ||
192 | u64 count_range_bits(struct extent_io_tree *tree, | 192 | u64 count_range_bits(struct extent_io_tree *tree, |
193 | u64 *start, u64 search_end, | 193 | u64 *start, u64 search_end, |
194 | u64 max_bytes, unsigned long bits); | 194 | u64 max_bytes, unsigned long bits, int contig); |
195 | 195 | ||
196 | void free_extent_state(struct extent_state *state); | 196 | void free_extent_state(struct extent_state *state); |
197 | int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, | 197 | int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, |
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 7084140d5940..f447b783bb84 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
@@ -70,6 +70,19 @@ static noinline int btrfs_copy_from_user(loff_t pos, int num_pages, | |||
70 | 70 | ||
71 | /* Flush processor's dcache for this page */ | 71 | /* Flush processor's dcache for this page */ |
72 | flush_dcache_page(page); | 72 | flush_dcache_page(page); |
73 | |||
74 | /* | ||
75 | * if we get a partial write, we can end up with | ||
76 | * partially up to date pages. These add | ||
77 | * a lot of complexity, so make sure they don't | ||
78 | * happen by forcing this copy to be retried. | ||
79 | * | ||
80 | * The rest of the btrfs_file_write code will fall | ||
81 | * back to page at a time copies after we return 0. | ||
82 | */ | ||
83 | if (!PageUptodate(page) && copied < count) | ||
84 | copied = 0; | ||
85 | |||
73 | iov_iter_advance(i, copied); | 86 | iov_iter_advance(i, copied); |
74 | write_bytes -= copied; | 87 | write_bytes -= copied; |
75 | total_copied += copied; | 88 | total_copied += copied; |
@@ -763,6 +776,27 @@ out: | |||
763 | } | 776 | } |
764 | 777 | ||
765 | /* | 778 | /* |
779 | * on error we return an unlocked page and the error value | ||
780 | * on success we return a locked page and 0 | ||
781 | */ | ||
782 | static int prepare_uptodate_page(struct page *page, u64 pos) | ||
783 | { | ||
784 | int ret = 0; | ||
785 | |||
786 | if ((pos & (PAGE_CACHE_SIZE - 1)) && !PageUptodate(page)) { | ||
787 | ret = btrfs_readpage(NULL, page); | ||
788 | if (ret) | ||
789 | return ret; | ||
790 | lock_page(page); | ||
791 | if (!PageUptodate(page)) { | ||
792 | unlock_page(page); | ||
793 | return -EIO; | ||
794 | } | ||
795 | } | ||
796 | return 0; | ||
797 | } | ||
798 | |||
799 | /* | ||
766 | * this gets pages into the page cache and locks them down, it also properly | 800 | * this gets pages into the page cache and locks them down, it also properly |
767 | * waits for data=ordered extents to finish before allowing the pages to be | 801 | * waits for data=ordered extents to finish before allowing the pages to be |
768 | * modified. | 802 | * modified. |
@@ -777,6 +811,7 @@ static noinline int prepare_pages(struct btrfs_root *root, struct file *file, | |||
777 | unsigned long index = pos >> PAGE_CACHE_SHIFT; | 811 | unsigned long index = pos >> PAGE_CACHE_SHIFT; |
778 | struct inode *inode = fdentry(file)->d_inode; | 812 | struct inode *inode = fdentry(file)->d_inode; |
779 | int err = 0; | 813 | int err = 0; |
814 | int faili = 0; | ||
780 | u64 start_pos; | 815 | u64 start_pos; |
781 | u64 last_pos; | 816 | u64 last_pos; |
782 | 817 | ||
@@ -794,15 +829,24 @@ again: | |||
794 | for (i = 0; i < num_pages; i++) { | 829 | for (i = 0; i < num_pages; i++) { |
795 | pages[i] = grab_cache_page(inode->i_mapping, index + i); | 830 | pages[i] = grab_cache_page(inode->i_mapping, index + i); |
796 | if (!pages[i]) { | 831 | if (!pages[i]) { |
797 | int c; | 832 | faili = i - 1; |
798 | for (c = i - 1; c >= 0; c--) { | 833 | err = -ENOMEM; |
799 | unlock_page(pages[c]); | 834 | goto fail; |
800 | page_cache_release(pages[c]); | 835 | } |
801 | } | 836 | |
802 | return -ENOMEM; | 837 | if (i == 0) |
838 | err = prepare_uptodate_page(pages[i], pos); | ||
839 | if (i == num_pages - 1) | ||
840 | err = prepare_uptodate_page(pages[i], | ||
841 | pos + write_bytes); | ||
842 | if (err) { | ||
843 | page_cache_release(pages[i]); | ||
844 | faili = i - 1; | ||
845 | goto fail; | ||
803 | } | 846 | } |
804 | wait_on_page_writeback(pages[i]); | 847 | wait_on_page_writeback(pages[i]); |
805 | } | 848 | } |
849 | err = 0; | ||
806 | if (start_pos < inode->i_size) { | 850 | if (start_pos < inode->i_size) { |
807 | struct btrfs_ordered_extent *ordered; | 851 | struct btrfs_ordered_extent *ordered; |
808 | lock_extent_bits(&BTRFS_I(inode)->io_tree, | 852 | lock_extent_bits(&BTRFS_I(inode)->io_tree, |
@@ -842,6 +886,14 @@ again: | |||
842 | WARN_ON(!PageLocked(pages[i])); | 886 | WARN_ON(!PageLocked(pages[i])); |
843 | } | 887 | } |
844 | return 0; | 888 | return 0; |
889 | fail: | ||
890 | while (faili >= 0) { | ||
891 | unlock_page(pages[faili]); | ||
892 | page_cache_release(pages[faili]); | ||
893 | faili--; | ||
894 | } | ||
895 | return err; | ||
896 | |||
845 | } | 897 | } |
846 | 898 | ||
847 | static ssize_t btrfs_file_aio_write(struct kiocb *iocb, | 899 | static ssize_t btrfs_file_aio_write(struct kiocb *iocb, |
@@ -851,7 +903,6 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, | |||
851 | struct file *file = iocb->ki_filp; | 903 | struct file *file = iocb->ki_filp; |
852 | struct inode *inode = fdentry(file)->d_inode; | 904 | struct inode *inode = fdentry(file)->d_inode; |
853 | struct btrfs_root *root = BTRFS_I(inode)->root; | 905 | struct btrfs_root *root = BTRFS_I(inode)->root; |
854 | struct page *pinned[2]; | ||
855 | struct page **pages = NULL; | 906 | struct page **pages = NULL; |
856 | struct iov_iter i; | 907 | struct iov_iter i; |
857 | loff_t *ppos = &iocb->ki_pos; | 908 | loff_t *ppos = &iocb->ki_pos; |
@@ -872,9 +923,6 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, | |||
872 | will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) || | 923 | will_write = ((file->f_flags & O_DSYNC) || IS_SYNC(inode) || |
873 | (file->f_flags & O_DIRECT)); | 924 | (file->f_flags & O_DIRECT)); |
874 | 925 | ||
875 | pinned[0] = NULL; | ||
876 | pinned[1] = NULL; | ||
877 | |||
878 | start_pos = pos; | 926 | start_pos = pos; |
879 | 927 | ||
880 | vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); | 928 | vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); |
@@ -962,32 +1010,6 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, | |||
962 | first_index = pos >> PAGE_CACHE_SHIFT; | 1010 | first_index = pos >> PAGE_CACHE_SHIFT; |
963 | last_index = (pos + iov_iter_count(&i)) >> PAGE_CACHE_SHIFT; | 1011 | last_index = (pos + iov_iter_count(&i)) >> PAGE_CACHE_SHIFT; |
964 | 1012 | ||
965 | /* | ||
966 | * there are lots of better ways to do this, but this code | ||
967 | * makes sure the first and last page in the file range are | ||
968 | * up to date and ready for cow | ||
969 | */ | ||
970 | if ((pos & (PAGE_CACHE_SIZE - 1))) { | ||
971 | pinned[0] = grab_cache_page(inode->i_mapping, first_index); | ||
972 | if (!PageUptodate(pinned[0])) { | ||
973 | ret = btrfs_readpage(NULL, pinned[0]); | ||
974 | BUG_ON(ret); | ||
975 | wait_on_page_locked(pinned[0]); | ||
976 | } else { | ||
977 | unlock_page(pinned[0]); | ||
978 | } | ||
979 | } | ||
980 | if ((pos + iov_iter_count(&i)) & (PAGE_CACHE_SIZE - 1)) { | ||
981 | pinned[1] = grab_cache_page(inode->i_mapping, last_index); | ||
982 | if (!PageUptodate(pinned[1])) { | ||
983 | ret = btrfs_readpage(NULL, pinned[1]); | ||
984 | BUG_ON(ret); | ||
985 | wait_on_page_locked(pinned[1]); | ||
986 | } else { | ||
987 | unlock_page(pinned[1]); | ||
988 | } | ||
989 | } | ||
990 | |||
991 | while (iov_iter_count(&i) > 0) { | 1013 | while (iov_iter_count(&i) > 0) { |
992 | size_t offset = pos & (PAGE_CACHE_SIZE - 1); | 1014 | size_t offset = pos & (PAGE_CACHE_SIZE - 1); |
993 | size_t write_bytes = min(iov_iter_count(&i), | 1015 | size_t write_bytes = min(iov_iter_count(&i), |
@@ -1024,8 +1046,20 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb, | |||
1024 | 1046 | ||
1025 | copied = btrfs_copy_from_user(pos, num_pages, | 1047 | copied = btrfs_copy_from_user(pos, num_pages, |
1026 | write_bytes, pages, &i); | 1048 | write_bytes, pages, &i); |
1027 | dirty_pages = (copied + offset + PAGE_CACHE_SIZE - 1) >> | 1049 | |
1028 | PAGE_CACHE_SHIFT; | 1050 | /* |
1051 | * if we have trouble faulting in the pages, fall | ||
1052 | * back to one page at a time | ||
1053 | */ | ||
1054 | if (copied < write_bytes) | ||
1055 | nrptrs = 1; | ||
1056 | |||
1057 | if (copied == 0) | ||
1058 | dirty_pages = 0; | ||
1059 | else | ||
1060 | dirty_pages = (copied + offset + | ||
1061 | PAGE_CACHE_SIZE - 1) >> | ||
1062 | PAGE_CACHE_SHIFT; | ||
1029 | 1063 | ||
1030 | if (num_pages > dirty_pages) { | 1064 | if (num_pages > dirty_pages) { |
1031 | if (copied > 0) | 1065 | if (copied > 0) |
@@ -1069,10 +1103,6 @@ out: | |||
1069 | err = ret; | 1103 | err = ret; |
1070 | 1104 | ||
1071 | kfree(pages); | 1105 | kfree(pages); |
1072 | if (pinned[0]) | ||
1073 | page_cache_release(pinned[0]); | ||
1074 | if (pinned[1]) | ||
1075 | page_cache_release(pinned[1]); | ||
1076 | *ppos = pos; | 1106 | *ppos = pos; |
1077 | 1107 | ||
1078 | /* | 1108 | /* |
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index fb9bd7832b6d..9007bbd01dbf 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -1913,7 +1913,7 @@ static int btrfs_clean_io_failures(struct inode *inode, u64 start) | |||
1913 | 1913 | ||
1914 | private = 0; | 1914 | private = 0; |
1915 | if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private, | 1915 | if (count_range_bits(&BTRFS_I(inode)->io_failure_tree, &private, |
1916 | (u64)-1, 1, EXTENT_DIRTY)) { | 1916 | (u64)-1, 1, EXTENT_DIRTY, 0)) { |
1917 | ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, | 1917 | ret = get_state_private(&BTRFS_I(inode)->io_failure_tree, |
1918 | start, &private_failure); | 1918 | start, &private_failure); |
1919 | if (ret == 0) { | 1919 | if (ret == 0) { |
@@ -4821,10 +4821,11 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir, | |||
4821 | goto fail; | 4821 | goto fail; |
4822 | 4822 | ||
4823 | /* | 4823 | /* |
4824 | * 1 item for inode ref | 4824 | * 2 items for inode and inode ref |
4825 | * 2 items for dir items | 4825 | * 2 items for dir items |
4826 | * 1 item for parent inode | ||
4826 | */ | 4827 | */ |
4827 | trans = btrfs_start_transaction(root, 3); | 4828 | trans = btrfs_start_transaction(root, 5); |
4828 | if (IS_ERR(trans)) { | 4829 | if (IS_ERR(trans)) { |
4829 | err = PTR_ERR(trans); | 4830 | err = PTR_ERR(trans); |
4830 | goto fail; | 4831 | goto fail; |
@@ -5280,6 +5281,128 @@ out: | |||
5280 | return em; | 5281 | return em; |
5281 | } | 5282 | } |
5282 | 5283 | ||
5284 | struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *page, | ||
5285 | size_t pg_offset, u64 start, u64 len, | ||
5286 | int create) | ||
5287 | { | ||
5288 | struct extent_map *em; | ||
5289 | struct extent_map *hole_em = NULL; | ||
5290 | u64 range_start = start; | ||
5291 | u64 end; | ||
5292 | u64 found; | ||
5293 | u64 found_end; | ||
5294 | int err = 0; | ||
5295 | |||
5296 | em = btrfs_get_extent(inode, page, pg_offset, start, len, create); | ||
5297 | if (IS_ERR(em)) | ||
5298 | return em; | ||
5299 | if (em) { | ||
5300 | /* | ||
5301 | * if our em maps to a hole, there might | ||
5302 | * actually be delalloc bytes behind it | ||
5303 | */ | ||
5304 | if (em->block_start != EXTENT_MAP_HOLE) | ||
5305 | return em; | ||
5306 | else | ||
5307 | hole_em = em; | ||
5308 | } | ||
5309 | |||
5310 | /* check to see if we've wrapped (len == -1 or similar) */ | ||
5311 | end = start + len; | ||
5312 | if (end < start) | ||
5313 | end = (u64)-1; | ||
5314 | else | ||
5315 | end -= 1; | ||
5316 | |||
5317 | em = NULL; | ||
5318 | |||
5319 | /* ok, we didn't find anything, lets look for delalloc */ | ||
5320 | found = count_range_bits(&BTRFS_I(inode)->io_tree, &range_start, | ||
5321 | end, len, EXTENT_DELALLOC, 1); | ||
5322 | found_end = range_start + found; | ||
5323 | if (found_end < range_start) | ||
5324 | found_end = (u64)-1; | ||
5325 | |||
5326 | /* | ||
5327 | * we didn't find anything useful, return | ||
5328 | * the original results from get_extent() | ||
5329 | */ | ||
5330 | if (range_start > end || found_end <= start) { | ||
5331 | em = hole_em; | ||
5332 | hole_em = NULL; | ||
5333 | goto out; | ||
5334 | } | ||
5335 | |||
5336 | /* adjust the range_start to make sure it doesn't | ||
5337 | * go backwards from the start they passed in | ||
5338 | */ | ||
5339 | range_start = max(start,range_start); | ||
5340 | found = found_end - range_start; | ||
5341 | |||
5342 | if (found > 0) { | ||
5343 | u64 hole_start = start; | ||
5344 | u64 hole_len = len; | ||
5345 | |||
5346 | em = alloc_extent_map(GFP_NOFS); | ||
5347 | if (!em) { | ||
5348 | err = -ENOMEM; | ||
5349 | goto out; | ||
5350 | } | ||
5351 | /* | ||
5352 | * when btrfs_get_extent can't find anything it | ||
5353 | * returns one huge hole | ||
5354 | * | ||
5355 | * make sure what it found really fits our range, and | ||
5356 | * adjust to make sure it is based on the start from | ||
5357 | * the caller | ||
5358 | */ | ||
5359 | if (hole_em) { | ||
5360 | u64 calc_end = extent_map_end(hole_em); | ||
5361 | |||
5362 | if (calc_end <= start || (hole_em->start > end)) { | ||
5363 | free_extent_map(hole_em); | ||
5364 | hole_em = NULL; | ||
5365 | } else { | ||
5366 | hole_start = max(hole_em->start, start); | ||
5367 | hole_len = calc_end - hole_start; | ||
5368 | } | ||
5369 | } | ||
5370 | em->bdev = NULL; | ||
5371 | if (hole_em && range_start > hole_start) { | ||
5372 | /* our hole starts before our delalloc, so we | ||
5373 | * have to return just the parts of the hole | ||
5374 | * that go until the delalloc starts | ||
5375 | */ | ||
5376 | em->len = min(hole_len, | ||
5377 | range_start - hole_start); | ||
5378 | em->start = hole_start; | ||
5379 | em->orig_start = hole_start; | ||
5380 | /* | ||
5381 | * don't adjust block start at all, | ||
5382 | * it is fixed at EXTENT_MAP_HOLE | ||
5383 | */ | ||
5384 | em->block_start = hole_em->block_start; | ||
5385 | em->block_len = hole_len; | ||
5386 | } else { | ||
5387 | em->start = range_start; | ||
5388 | em->len = found; | ||
5389 | em->orig_start = range_start; | ||
5390 | em->block_start = EXTENT_MAP_DELALLOC; | ||
5391 | em->block_len = found; | ||
5392 | } | ||
5393 | } else if (hole_em) { | ||
5394 | return hole_em; | ||
5395 | } | ||
5396 | out: | ||
5397 | |||
5398 | free_extent_map(hole_em); | ||
5399 | if (err) { | ||
5400 | free_extent_map(em); | ||
5401 | return ERR_PTR(err); | ||
5402 | } | ||
5403 | return em; | ||
5404 | } | ||
5405 | |||
5283 | static struct extent_map *btrfs_new_extent_direct(struct inode *inode, | 5406 | static struct extent_map *btrfs_new_extent_direct(struct inode *inode, |
5284 | u64 start, u64 len) | 5407 | u64 start, u64 len) |
5285 | { | 5408 | { |
@@ -5934,6 +6057,7 @@ static void btrfs_submit_direct(int rw, struct bio *bio, struct inode *inode, | |||
5934 | if (!skip_sum) { | 6057 | if (!skip_sum) { |
5935 | dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS); | 6058 | dip->csums = kmalloc(sizeof(u32) * bio->bi_vcnt, GFP_NOFS); |
5936 | if (!dip->csums) { | 6059 | if (!dip->csums) { |
6060 | kfree(dip); | ||
5937 | ret = -ENOMEM; | 6061 | ret = -ENOMEM; |
5938 | goto free_ordered; | 6062 | goto free_ordered; |
5939 | } | 6063 | } |
@@ -6102,7 +6226,7 @@ out: | |||
6102 | static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | 6226 | static int btrfs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, |
6103 | __u64 start, __u64 len) | 6227 | __u64 start, __u64 len) |
6104 | { | 6228 | { |
6105 | return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent); | 6229 | return extent_fiemap(inode, fieinfo, start, len, btrfs_get_extent_fiemap); |
6106 | } | 6230 | } |
6107 | 6231 | ||
6108 | int btrfs_readpage(struct file *file, struct page *page) | 6232 | int btrfs_readpage(struct file *file, struct page *page) |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index be2d4f6aaa5e..5fdb2abc4fa7 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -1071,12 +1071,15 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file, | |||
1071 | if (copy_from_user(&flags, arg, sizeof(flags))) | 1071 | if (copy_from_user(&flags, arg, sizeof(flags))) |
1072 | return -EFAULT; | 1072 | return -EFAULT; |
1073 | 1073 | ||
1074 | if (flags & ~BTRFS_SUBVOL_CREATE_ASYNC) | 1074 | if (flags & BTRFS_SUBVOL_CREATE_ASYNC) |
1075 | return -EINVAL; | 1075 | return -EINVAL; |
1076 | 1076 | ||
1077 | if (flags & ~BTRFS_SUBVOL_RDONLY) | 1077 | if (flags & ~BTRFS_SUBVOL_RDONLY) |
1078 | return -EOPNOTSUPP; | 1078 | return -EOPNOTSUPP; |
1079 | 1079 | ||
1080 | if (!is_owner_or_cap(inode)) | ||
1081 | return -EACCES; | ||
1082 | |||
1080 | down_write(&root->fs_info->subvol_sem); | 1083 | down_write(&root->fs_info->subvol_sem); |
1081 | 1084 | ||
1082 | /* nothing to do */ | 1085 | /* nothing to do */ |
@@ -1097,7 +1100,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file, | |||
1097 | goto out_reset; | 1100 | goto out_reset; |
1098 | } | 1101 | } |
1099 | 1102 | ||
1100 | ret = btrfs_update_root(trans, root, | 1103 | ret = btrfs_update_root(trans, root->fs_info->tree_root, |
1101 | &root->root_key, &root->root_item); | 1104 | &root->root_key, &root->root_item); |
1102 | 1105 | ||
1103 | btrfs_commit_transaction(trans, root); | 1106 | btrfs_commit_transaction(trans, root); |
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c index cc9b450399df..a178f5ebea78 100644 --- a/fs/btrfs/lzo.c +++ b/fs/btrfs/lzo.c | |||
@@ -280,6 +280,7 @@ static int lzo_decompress_biovec(struct list_head *ws, | |||
280 | unsigned long tot_out; | 280 | unsigned long tot_out; |
281 | unsigned long tot_len; | 281 | unsigned long tot_len; |
282 | char *buf; | 282 | char *buf; |
283 | bool may_late_unmap, need_unmap; | ||
283 | 284 | ||
284 | data_in = kmap(pages_in[0]); | 285 | data_in = kmap(pages_in[0]); |
285 | tot_len = read_compress_length(data_in); | 286 | tot_len = read_compress_length(data_in); |
@@ -300,11 +301,13 @@ static int lzo_decompress_biovec(struct list_head *ws, | |||
300 | 301 | ||
301 | tot_in += in_len; | 302 | tot_in += in_len; |
302 | working_bytes = in_len; | 303 | working_bytes = in_len; |
304 | may_late_unmap = need_unmap = false; | ||
303 | 305 | ||
304 | /* fast path: avoid using the working buffer */ | 306 | /* fast path: avoid using the working buffer */ |
305 | if (in_page_bytes_left >= in_len) { | 307 | if (in_page_bytes_left >= in_len) { |
306 | buf = data_in + in_offset; | 308 | buf = data_in + in_offset; |
307 | bytes = in_len; | 309 | bytes = in_len; |
310 | may_late_unmap = true; | ||
308 | goto cont; | 311 | goto cont; |
309 | } | 312 | } |
310 | 313 | ||
@@ -329,14 +332,17 @@ cont: | |||
329 | if (working_bytes == 0 && tot_in >= tot_len) | 332 | if (working_bytes == 0 && tot_in >= tot_len) |
330 | break; | 333 | break; |
331 | 334 | ||
332 | kunmap(pages_in[page_in_index]); | 335 | if (page_in_index + 1 >= total_pages_in) { |
333 | page_in_index++; | ||
334 | if (page_in_index >= total_pages_in) { | ||
335 | ret = -1; | 336 | ret = -1; |
336 | data_in = NULL; | ||
337 | goto done; | 337 | goto done; |
338 | } | 338 | } |
339 | data_in = kmap(pages_in[page_in_index]); | 339 | |
340 | if (may_late_unmap) | ||
341 | need_unmap = true; | ||
342 | else | ||
343 | kunmap(pages_in[page_in_index]); | ||
344 | |||
345 | data_in = kmap(pages_in[++page_in_index]); | ||
340 | 346 | ||
341 | in_page_bytes_left = PAGE_CACHE_SIZE; | 347 | in_page_bytes_left = PAGE_CACHE_SIZE; |
342 | in_offset = 0; | 348 | in_offset = 0; |
@@ -346,6 +352,8 @@ cont: | |||
346 | out_len = lzo1x_worst_compress(PAGE_CACHE_SIZE); | 352 | out_len = lzo1x_worst_compress(PAGE_CACHE_SIZE); |
347 | ret = lzo1x_decompress_safe(buf, in_len, workspace->buf, | 353 | ret = lzo1x_decompress_safe(buf, in_len, workspace->buf, |
348 | &out_len); | 354 | &out_len); |
355 | if (need_unmap) | ||
356 | kunmap(pages_in[page_in_index - 1]); | ||
349 | if (ret != LZO_E_OK) { | 357 | if (ret != LZO_E_OK) { |
350 | printk(KERN_WARNING "btrfs decompress failed\n"); | 358 | printk(KERN_WARNING "btrfs decompress failed\n"); |
351 | ret = -1; | 359 | ret = -1; |
@@ -363,8 +371,7 @@ cont: | |||
363 | break; | 371 | break; |
364 | } | 372 | } |
365 | done: | 373 | done: |
366 | if (data_in) | 374 | kunmap(pages_in[page_in_index]); |
367 | kunmap(pages_in[page_in_index]); | ||
368 | return ret; | 375 | return ret; |
369 | } | 376 | } |
370 | 377 | ||
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 0825e4ed9447..31ade5802ae8 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c | |||
@@ -3654,6 +3654,7 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) | |||
3654 | u32 item_size; | 3654 | u32 item_size; |
3655 | int ret; | 3655 | int ret; |
3656 | int err = 0; | 3656 | int err = 0; |
3657 | int progress = 0; | ||
3657 | 3658 | ||
3658 | path = btrfs_alloc_path(); | 3659 | path = btrfs_alloc_path(); |
3659 | if (!path) | 3660 | if (!path) |
@@ -3666,9 +3667,10 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) | |||
3666 | } | 3667 | } |
3667 | 3668 | ||
3668 | while (1) { | 3669 | while (1) { |
3670 | progress++; | ||
3669 | trans = btrfs_start_transaction(rc->extent_root, 0); | 3671 | trans = btrfs_start_transaction(rc->extent_root, 0); |
3670 | BUG_ON(IS_ERR(trans)); | 3672 | BUG_ON(IS_ERR(trans)); |
3671 | 3673 | restart: | |
3672 | if (update_backref_cache(trans, &rc->backref_cache)) { | 3674 | if (update_backref_cache(trans, &rc->backref_cache)) { |
3673 | btrfs_end_transaction(trans, rc->extent_root); | 3675 | btrfs_end_transaction(trans, rc->extent_root); |
3674 | continue; | 3676 | continue; |
@@ -3781,6 +3783,15 @@ static noinline_for_stack int relocate_block_group(struct reloc_control *rc) | |||
3781 | } | 3783 | } |
3782 | } | 3784 | } |
3783 | } | 3785 | } |
3786 | if (trans && progress && err == -ENOSPC) { | ||
3787 | ret = btrfs_force_chunk_alloc(trans, rc->extent_root, | ||
3788 | rc->block_group->flags); | ||
3789 | if (ret == 0) { | ||
3790 | err = 0; | ||
3791 | progress = 0; | ||
3792 | goto restart; | ||
3793 | } | ||
3794 | } | ||
3784 | 3795 | ||
3785 | btrfs_release_path(rc->extent_root, path); | 3796 | btrfs_release_path(rc->extent_root, path); |
3786 | clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY, | 3797 | clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY, |
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index a004008f7d28..d39a9895d932 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c | |||
@@ -155,7 +155,8 @@ enum { | |||
155 | Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, Opt_compress, | 155 | Opt_nossd, Opt_ssd_spread, Opt_thread_pool, Opt_noacl, Opt_compress, |
156 | Opt_compress_type, Opt_compress_force, Opt_compress_force_type, | 156 | Opt_compress_type, Opt_compress_force, Opt_compress_force_type, |
157 | Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, | 157 | Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, |
158 | Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, Opt_err, | 158 | Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, |
159 | Opt_enospc_debug, Opt_err, | ||
159 | }; | 160 | }; |
160 | 161 | ||
161 | static match_table_t tokens = { | 162 | static match_table_t tokens = { |
@@ -184,6 +185,7 @@ static match_table_t tokens = { | |||
184 | {Opt_space_cache, "space_cache"}, | 185 | {Opt_space_cache, "space_cache"}, |
185 | {Opt_clear_cache, "clear_cache"}, | 186 | {Opt_clear_cache, "clear_cache"}, |
186 | {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"}, | 187 | {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"}, |
188 | {Opt_enospc_debug, "enospc_debug"}, | ||
187 | {Opt_err, NULL}, | 189 | {Opt_err, NULL}, |
188 | }; | 190 | }; |
189 | 191 | ||
@@ -358,6 +360,9 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) | |||
358 | case Opt_user_subvol_rm_allowed: | 360 | case Opt_user_subvol_rm_allowed: |
359 | btrfs_set_opt(info->mount_opt, USER_SUBVOL_RM_ALLOWED); | 361 | btrfs_set_opt(info->mount_opt, USER_SUBVOL_RM_ALLOWED); |
360 | break; | 362 | break; |
363 | case Opt_enospc_debug: | ||
364 | btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG); | ||
365 | break; | ||
361 | case Opt_err: | 366 | case Opt_err: |
362 | printk(KERN_INFO "btrfs: unrecognized mount option " | 367 | printk(KERN_INFO "btrfs: unrecognized mount option " |
363 | "'%s'\n", p); | 368 | "'%s'\n", p); |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index af7dbca15276..dd13eb81ee40 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -1338,11 +1338,11 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) | |||
1338 | 1338 | ||
1339 | ret = btrfs_shrink_device(device, 0); | 1339 | ret = btrfs_shrink_device(device, 0); |
1340 | if (ret) | 1340 | if (ret) |
1341 | goto error_brelse; | 1341 | goto error_undo; |
1342 | 1342 | ||
1343 | ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device); | 1343 | ret = btrfs_rm_dev_item(root->fs_info->chunk_root, device); |
1344 | if (ret) | 1344 | if (ret) |
1345 | goto error_brelse; | 1345 | goto error_undo; |
1346 | 1346 | ||
1347 | device->in_fs_metadata = 0; | 1347 | device->in_fs_metadata = 0; |
1348 | 1348 | ||
@@ -1416,6 +1416,13 @@ out: | |||
1416 | mutex_unlock(&root->fs_info->volume_mutex); | 1416 | mutex_unlock(&root->fs_info->volume_mutex); |
1417 | mutex_unlock(&uuid_mutex); | 1417 | mutex_unlock(&uuid_mutex); |
1418 | return ret; | 1418 | return ret; |
1419 | error_undo: | ||
1420 | if (device->writeable) { | ||
1421 | list_add(&device->dev_alloc_list, | ||
1422 | &root->fs_info->fs_devices->alloc_list); | ||
1423 | root->fs_info->fs_devices->rw_devices++; | ||
1424 | } | ||
1425 | goto error_brelse; | ||
1419 | } | 1426 | } |
1420 | 1427 | ||
1421 | /* | 1428 | /* |
@@ -1633,7 +1640,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) | |||
1633 | device->dev_root = root->fs_info->dev_root; | 1640 | device->dev_root = root->fs_info->dev_root; |
1634 | device->bdev = bdev; | 1641 | device->bdev = bdev; |
1635 | device->in_fs_metadata = 1; | 1642 | device->in_fs_metadata = 1; |
1636 | device->mode = 0; | 1643 | device->mode = FMODE_EXCL; |
1637 | set_blocksize(device->bdev, 4096); | 1644 | set_blocksize(device->bdev, 4096); |
1638 | 1645 | ||
1639 | if (seeding_dev) { | 1646 | if (seeding_dev) { |
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index f0aef787a102..ebafa65a29b6 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c | |||
@@ -60,7 +60,6 @@ int ceph_init_dentry(struct dentry *dentry) | |||
60 | } | 60 | } |
61 | di->dentry = dentry; | 61 | di->dentry = dentry; |
62 | di->lease_session = NULL; | 62 | di->lease_session = NULL; |
63 | di->parent_inode = igrab(dentry->d_parent->d_inode); | ||
64 | dentry->d_fsdata = di; | 63 | dentry->d_fsdata = di; |
65 | dentry->d_time = jiffies; | 64 | dentry->d_time = jiffies; |
66 | ceph_dentry_lru_add(dentry); | 65 | ceph_dentry_lru_add(dentry); |
@@ -410,7 +409,7 @@ more: | |||
410 | spin_lock(&inode->i_lock); | 409 | spin_lock(&inode->i_lock); |
411 | if (ci->i_release_count == fi->dir_release_count) { | 410 | if (ci->i_release_count == fi->dir_release_count) { |
412 | dout(" marking %p complete\n", inode); | 411 | dout(" marking %p complete\n", inode); |
413 | ci->i_ceph_flags |= CEPH_I_COMPLETE; | 412 | /* ci->i_ceph_flags |= CEPH_I_COMPLETE; */ |
414 | ci->i_max_offset = filp->f_pos; | 413 | ci->i_max_offset = filp->f_pos; |
415 | } | 414 | } |
416 | spin_unlock(&inode->i_lock); | 415 | spin_unlock(&inode->i_lock); |
@@ -497,6 +496,7 @@ struct dentry *ceph_finish_lookup(struct ceph_mds_request *req, | |||
497 | 496 | ||
498 | /* .snap dir? */ | 497 | /* .snap dir? */ |
499 | if (err == -ENOENT && | 498 | if (err == -ENOENT && |
499 | ceph_snap(parent) == CEPH_NOSNAP && | ||
500 | strcmp(dentry->d_name.name, | 500 | strcmp(dentry->d_name.name, |
501 | fsc->mount_options->snapdir_name) == 0) { | 501 | fsc->mount_options->snapdir_name) == 0) { |
502 | struct inode *inode = ceph_get_snapdir(parent); | 502 | struct inode *inode = ceph_get_snapdir(parent); |
@@ -993,7 +993,7 @@ static int ceph_d_revalidate(struct dentry *dentry, struct nameidata *nd) | |||
993 | { | 993 | { |
994 | struct inode *dir; | 994 | struct inode *dir; |
995 | 995 | ||
996 | if (nd->flags & LOOKUP_RCU) | 996 | if (nd && nd->flags & LOOKUP_RCU) |
997 | return -ECHILD; | 997 | return -ECHILD; |
998 | 998 | ||
999 | dir = dentry->d_parent->d_inode; | 999 | dir = dentry->d_parent->d_inode; |
@@ -1030,28 +1030,8 @@ out_touch: | |||
1030 | static void ceph_dentry_release(struct dentry *dentry) | 1030 | static void ceph_dentry_release(struct dentry *dentry) |
1031 | { | 1031 | { |
1032 | struct ceph_dentry_info *di = ceph_dentry(dentry); | 1032 | struct ceph_dentry_info *di = ceph_dentry(dentry); |
1033 | struct inode *parent_inode = NULL; | ||
1034 | u64 snapid = CEPH_NOSNAP; | ||
1035 | 1033 | ||
1036 | if (!IS_ROOT(dentry)) { | 1034 | dout("dentry_release %p\n", dentry); |
1037 | parent_inode = di->parent_inode; | ||
1038 | if (parent_inode) | ||
1039 | snapid = ceph_snap(parent_inode); | ||
1040 | } | ||
1041 | dout("dentry_release %p parent %p\n", dentry, parent_inode); | ||
1042 | if (parent_inode && snapid != CEPH_SNAPDIR) { | ||
1043 | struct ceph_inode_info *ci = ceph_inode(parent_inode); | ||
1044 | |||
1045 | spin_lock(&parent_inode->i_lock); | ||
1046 | if (ci->i_shared_gen == di->lease_shared_gen || | ||
1047 | snapid <= CEPH_MAXSNAP) { | ||
1048 | dout(" clearing %p complete (d_release)\n", | ||
1049 | parent_inode); | ||
1050 | ci->i_ceph_flags &= ~CEPH_I_COMPLETE; | ||
1051 | ci->i_release_count++; | ||
1052 | } | ||
1053 | spin_unlock(&parent_inode->i_lock); | ||
1054 | } | ||
1055 | if (di) { | 1035 | if (di) { |
1056 | ceph_dentry_lru_del(dentry); | 1036 | ceph_dentry_lru_del(dentry); |
1057 | if (di->lease_session) | 1037 | if (di->lease_session) |
@@ -1059,8 +1039,6 @@ static void ceph_dentry_release(struct dentry *dentry) | |||
1059 | kmem_cache_free(ceph_dentry_cachep, di); | 1039 | kmem_cache_free(ceph_dentry_cachep, di); |
1060 | dentry->d_fsdata = NULL; | 1040 | dentry->d_fsdata = NULL; |
1061 | } | 1041 | } |
1062 | if (parent_inode) | ||
1063 | iput(parent_inode); | ||
1064 | } | 1042 | } |
1065 | 1043 | ||
1066 | static int ceph_snapdir_d_revalidate(struct dentry *dentry, | 1044 | static int ceph_snapdir_d_revalidate(struct dentry *dentry, |
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 5625463aa479..193bfa5e9cbd 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c | |||
@@ -707,7 +707,7 @@ static int fill_inode(struct inode *inode, | |||
707 | (issued & CEPH_CAP_FILE_EXCL) == 0 && | 707 | (issued & CEPH_CAP_FILE_EXCL) == 0 && |
708 | (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) { | 708 | (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) { |
709 | dout(" marking %p complete (empty)\n", inode); | 709 | dout(" marking %p complete (empty)\n", inode); |
710 | ci->i_ceph_flags |= CEPH_I_COMPLETE; | 710 | /* ci->i_ceph_flags |= CEPH_I_COMPLETE; */ |
711 | ci->i_max_offset = 2; | 711 | ci->i_max_offset = 2; |
712 | } | 712 | } |
713 | break; | 713 | break; |
diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 88fcaa21b801..20b907d76ae2 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h | |||
@@ -207,7 +207,6 @@ struct ceph_dentry_info { | |||
207 | struct dentry *dentry; | 207 | struct dentry *dentry; |
208 | u64 time; | 208 | u64 time; |
209 | u64 offset; | 209 | u64 offset; |
210 | struct inode *parent_inode; | ||
211 | }; | 210 | }; |
212 | 211 | ||
213 | struct ceph_inode_xattrs_info { | 212 | struct ceph_inode_xattrs_info { |
diff --git a/fs/compat.c b/fs/compat.c index f6fd0a00e6cc..691c3fd8ce1d 100644 --- a/fs/compat.c +++ b/fs/compat.c | |||
@@ -1228,7 +1228,9 @@ compat_sys_preadv(unsigned long fd, const struct compat_iovec __user *vec, | |||
1228 | file = fget_light(fd, &fput_needed); | 1228 | file = fget_light(fd, &fput_needed); |
1229 | if (!file) | 1229 | if (!file) |
1230 | return -EBADF; | 1230 | return -EBADF; |
1231 | ret = compat_readv(file, vec, vlen, &pos); | 1231 | ret = -ESPIPE; |
1232 | if (file->f_mode & FMODE_PREAD) | ||
1233 | ret = compat_readv(file, vec, vlen, &pos); | ||
1232 | fput_light(file, fput_needed); | 1234 | fput_light(file, fput_needed); |
1233 | return ret; | 1235 | return ret; |
1234 | } | 1236 | } |
@@ -1285,7 +1287,9 @@ compat_sys_pwritev(unsigned long fd, const struct compat_iovec __user *vec, | |||
1285 | file = fget_light(fd, &fput_needed); | 1287 | file = fget_light(fd, &fput_needed); |
1286 | if (!file) | 1288 | if (!file) |
1287 | return -EBADF; | 1289 | return -EBADF; |
1288 | ret = compat_writev(file, vec, vlen, &pos); | 1290 | ret = -ESPIPE; |
1291 | if (file->f_mode & FMODE_PWRITE) | ||
1292 | ret = compat_writev(file, vec, vlen, &pos); | ||
1289 | fput_light(file, fput_needed); | 1293 | fput_light(file, fput_needed); |
1290 | return ret; | 1294 | return ret; |
1291 | } | 1295 | } |
diff --git a/fs/dcache.c b/fs/dcache.c index 2a6bd9a4ae97..611ffe928c03 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -1523,6 +1523,28 @@ struct dentry * d_alloc_root(struct inode * root_inode) | |||
1523 | } | 1523 | } |
1524 | EXPORT_SYMBOL(d_alloc_root); | 1524 | EXPORT_SYMBOL(d_alloc_root); |
1525 | 1525 | ||
1526 | static struct dentry * __d_find_any_alias(struct inode *inode) | ||
1527 | { | ||
1528 | struct dentry *alias; | ||
1529 | |||
1530 | if (list_empty(&inode->i_dentry)) | ||
1531 | return NULL; | ||
1532 | alias = list_first_entry(&inode->i_dentry, struct dentry, d_alias); | ||
1533 | __dget(alias); | ||
1534 | return alias; | ||
1535 | } | ||
1536 | |||
1537 | static struct dentry * d_find_any_alias(struct inode *inode) | ||
1538 | { | ||
1539 | struct dentry *de; | ||
1540 | |||
1541 | spin_lock(&inode->i_lock); | ||
1542 | de = __d_find_any_alias(inode); | ||
1543 | spin_unlock(&inode->i_lock); | ||
1544 | return de; | ||
1545 | } | ||
1546 | |||
1547 | |||
1526 | /** | 1548 | /** |
1527 | * d_obtain_alias - find or allocate a dentry for a given inode | 1549 | * d_obtain_alias - find or allocate a dentry for a given inode |
1528 | * @inode: inode to allocate the dentry for | 1550 | * @inode: inode to allocate the dentry for |
@@ -1552,7 +1574,7 @@ struct dentry *d_obtain_alias(struct inode *inode) | |||
1552 | if (IS_ERR(inode)) | 1574 | if (IS_ERR(inode)) |
1553 | return ERR_CAST(inode); | 1575 | return ERR_CAST(inode); |
1554 | 1576 | ||
1555 | res = d_find_alias(inode); | 1577 | res = d_find_any_alias(inode); |
1556 | if (res) | 1578 | if (res) |
1557 | goto out_iput; | 1579 | goto out_iput; |
1558 | 1580 | ||
@@ -1565,7 +1587,7 @@ struct dentry *d_obtain_alias(struct inode *inode) | |||
1565 | 1587 | ||
1566 | 1588 | ||
1567 | spin_lock(&inode->i_lock); | 1589 | spin_lock(&inode->i_lock); |
1568 | res = __d_find_alias(inode, 0); | 1590 | res = __d_find_any_alias(inode); |
1569 | if (res) { | 1591 | if (res) { |
1570 | spin_unlock(&inode->i_lock); | 1592 | spin_unlock(&inode->i_lock); |
1571 | dput(tmp); | 1593 | dput(tmp); |
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 267d0ada4541..4a09af9e9a63 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c | |||
@@ -63,6 +63,13 @@ | |||
63 | * cleanup path and it is also acquired by eventpoll_release_file() | 63 | * cleanup path and it is also acquired by eventpoll_release_file() |
64 | * if a file has been pushed inside an epoll set and it is then | 64 | * if a file has been pushed inside an epoll set and it is then |
65 | * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL). | 65 | * close()d without a previous call toepoll_ctl(EPOLL_CTL_DEL). |
66 | * It is also acquired when inserting an epoll fd onto another epoll | ||
67 | * fd. We do this so that we walk the epoll tree and ensure that this | ||
68 | * insertion does not create a cycle of epoll file descriptors, which | ||
69 | * could lead to deadlock. We need a global mutex to prevent two | ||
70 | * simultaneous inserts (A into B and B into A) from racing and | ||
71 | * constructing a cycle without either insert observing that it is | ||
72 | * going to. | ||
66 | * It is possible to drop the "ep->mtx" and to use the global | 73 | * It is possible to drop the "ep->mtx" and to use the global |
67 | * mutex "epmutex" (together with "ep->lock") to have it working, | 74 | * mutex "epmutex" (together with "ep->lock") to have it working, |
68 | * but having "ep->mtx" will make the interface more scalable. | 75 | * but having "ep->mtx" will make the interface more scalable. |
@@ -224,6 +231,9 @@ static long max_user_watches __read_mostly; | |||
224 | */ | 231 | */ |
225 | static DEFINE_MUTEX(epmutex); | 232 | static DEFINE_MUTEX(epmutex); |
226 | 233 | ||
234 | /* Used to check for epoll file descriptor inclusion loops */ | ||
235 | static struct nested_calls poll_loop_ncalls; | ||
236 | |||
227 | /* Used for safe wake up implementation */ | 237 | /* Used for safe wake up implementation */ |
228 | static struct nested_calls poll_safewake_ncalls; | 238 | static struct nested_calls poll_safewake_ncalls; |
229 | 239 | ||
@@ -1198,6 +1208,62 @@ retry: | |||
1198 | return res; | 1208 | return res; |
1199 | } | 1209 | } |
1200 | 1210 | ||
1211 | /** | ||
1212 | * ep_loop_check_proc - Callback function to be passed to the @ep_call_nested() | ||
1213 | * API, to verify that adding an epoll file inside another | ||
1214 | * epoll structure, does not violate the constraints, in | ||
1215 | * terms of closed loops, or too deep chains (which can | ||
1216 | * result in excessive stack usage). | ||
1217 | * | ||
1218 | * @priv: Pointer to the epoll file to be currently checked. | ||
1219 | * @cookie: Original cookie for this call. This is the top-of-the-chain epoll | ||
1220 | * data structure pointer. | ||
1221 | * @call_nests: Current dept of the @ep_call_nested() call stack. | ||
1222 | * | ||
1223 | * Returns: Returns zero if adding the epoll @file inside current epoll | ||
1224 | * structure @ep does not violate the constraints, or -1 otherwise. | ||
1225 | */ | ||
1226 | static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) | ||
1227 | { | ||
1228 | int error = 0; | ||
1229 | struct file *file = priv; | ||
1230 | struct eventpoll *ep = file->private_data; | ||
1231 | struct rb_node *rbp; | ||
1232 | struct epitem *epi; | ||
1233 | |||
1234 | mutex_lock(&ep->mtx); | ||
1235 | for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) { | ||
1236 | epi = rb_entry(rbp, struct epitem, rbn); | ||
1237 | if (unlikely(is_file_epoll(epi->ffd.file))) { | ||
1238 | error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, | ||
1239 | ep_loop_check_proc, epi->ffd.file, | ||
1240 | epi->ffd.file->private_data, current); | ||
1241 | if (error != 0) | ||
1242 | break; | ||
1243 | } | ||
1244 | } | ||
1245 | mutex_unlock(&ep->mtx); | ||
1246 | |||
1247 | return error; | ||
1248 | } | ||
1249 | |||
1250 | /** | ||
1251 | * ep_loop_check - Performs a check to verify that adding an epoll file (@file) | ||
1252 | * another epoll file (represented by @ep) does not create | ||
1253 | * closed loops or too deep chains. | ||
1254 | * | ||
1255 | * @ep: Pointer to the epoll private data structure. | ||
1256 | * @file: Pointer to the epoll file to be checked. | ||
1257 | * | ||
1258 | * Returns: Returns zero if adding the epoll @file inside current epoll | ||
1259 | * structure @ep does not violate the constraints, or -1 otherwise. | ||
1260 | */ | ||
1261 | static int ep_loop_check(struct eventpoll *ep, struct file *file) | ||
1262 | { | ||
1263 | return ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, | ||
1264 | ep_loop_check_proc, file, ep, current); | ||
1265 | } | ||
1266 | |||
1201 | /* | 1267 | /* |
1202 | * Open an eventpoll file descriptor. | 1268 | * Open an eventpoll file descriptor. |
1203 | */ | 1269 | */ |
@@ -1246,6 +1312,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, | |||
1246 | struct epoll_event __user *, event) | 1312 | struct epoll_event __user *, event) |
1247 | { | 1313 | { |
1248 | int error; | 1314 | int error; |
1315 | int did_lock_epmutex = 0; | ||
1249 | struct file *file, *tfile; | 1316 | struct file *file, *tfile; |
1250 | struct eventpoll *ep; | 1317 | struct eventpoll *ep; |
1251 | struct epitem *epi; | 1318 | struct epitem *epi; |
@@ -1287,6 +1354,25 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, | |||
1287 | */ | 1354 | */ |
1288 | ep = file->private_data; | 1355 | ep = file->private_data; |
1289 | 1356 | ||
1357 | /* | ||
1358 | * When we insert an epoll file descriptor, inside another epoll file | ||
1359 | * descriptor, there is the change of creating closed loops, which are | ||
1360 | * better be handled here, than in more critical paths. | ||
1361 | * | ||
1362 | * We hold epmutex across the loop check and the insert in this case, in | ||
1363 | * order to prevent two separate inserts from racing and each doing the | ||
1364 | * insert "at the same time" such that ep_loop_check passes on both | ||
1365 | * before either one does the insert, thereby creating a cycle. | ||
1366 | */ | ||
1367 | if (unlikely(is_file_epoll(tfile) && op == EPOLL_CTL_ADD)) { | ||
1368 | mutex_lock(&epmutex); | ||
1369 | did_lock_epmutex = 1; | ||
1370 | error = -ELOOP; | ||
1371 | if (ep_loop_check(ep, tfile) != 0) | ||
1372 | goto error_tgt_fput; | ||
1373 | } | ||
1374 | |||
1375 | |||
1290 | mutex_lock(&ep->mtx); | 1376 | mutex_lock(&ep->mtx); |
1291 | 1377 | ||
1292 | /* | 1378 | /* |
@@ -1322,6 +1408,9 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, | |||
1322 | mutex_unlock(&ep->mtx); | 1408 | mutex_unlock(&ep->mtx); |
1323 | 1409 | ||
1324 | error_tgt_fput: | 1410 | error_tgt_fput: |
1411 | if (unlikely(did_lock_epmutex)) | ||
1412 | mutex_unlock(&epmutex); | ||
1413 | |||
1325 | fput(tfile); | 1414 | fput(tfile); |
1326 | error_fput: | 1415 | error_fput: |
1327 | fput(file); | 1416 | fput(file); |
@@ -1441,6 +1530,12 @@ static int __init eventpoll_init(void) | |||
1441 | EP_ITEM_COST; | 1530 | EP_ITEM_COST; |
1442 | BUG_ON(max_user_watches < 0); | 1531 | BUG_ON(max_user_watches < 0); |
1443 | 1532 | ||
1533 | /* | ||
1534 | * Initialize the structure used to perform epoll file descriptor | ||
1535 | * inclusion loops checks. | ||
1536 | */ | ||
1537 | ep_nested_calls_init(&poll_loop_ncalls); | ||
1538 | |||
1444 | /* Initialize the structure used to perform safe poll wait head wake ups */ | 1539 | /* Initialize the structure used to perform safe poll wait head wake ups */ |
1445 | ep_nested_calls_init(&poll_safewake_ncalls); | 1540 | ep_nested_calls_init(&poll_safewake_ncalls); |
1446 | 1541 | ||
diff --git a/fs/exofs/namei.c b/fs/exofs/namei.c index 264e95d02830..4d70db110cfc 100644 --- a/fs/exofs/namei.c +++ b/fs/exofs/namei.c | |||
@@ -272,7 +272,6 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
272 | new_de = exofs_find_entry(new_dir, new_dentry, &new_page); | 272 | new_de = exofs_find_entry(new_dir, new_dentry, &new_page); |
273 | if (!new_de) | 273 | if (!new_de) |
274 | goto out_dir; | 274 | goto out_dir; |
275 | inode_inc_link_count(old_inode); | ||
276 | err = exofs_set_link(new_dir, new_de, new_page, old_inode); | 275 | err = exofs_set_link(new_dir, new_de, new_page, old_inode); |
277 | new_inode->i_ctime = CURRENT_TIME; | 276 | new_inode->i_ctime = CURRENT_TIME; |
278 | if (dir_de) | 277 | if (dir_de) |
@@ -286,12 +285,9 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
286 | if (new_dir->i_nlink >= EXOFS_LINK_MAX) | 285 | if (new_dir->i_nlink >= EXOFS_LINK_MAX) |
287 | goto out_dir; | 286 | goto out_dir; |
288 | } | 287 | } |
289 | inode_inc_link_count(old_inode); | ||
290 | err = exofs_add_link(new_dentry, old_inode); | 288 | err = exofs_add_link(new_dentry, old_inode); |
291 | if (err) { | 289 | if (err) |
292 | inode_dec_link_count(old_inode); | ||
293 | goto out_dir; | 290 | goto out_dir; |
294 | } | ||
295 | if (dir_de) | 291 | if (dir_de) |
296 | inode_inc_link_count(new_dir); | 292 | inode_inc_link_count(new_dir); |
297 | } | 293 | } |
@@ -299,7 +295,7 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
299 | old_inode->i_ctime = CURRENT_TIME; | 295 | old_inode->i_ctime = CURRENT_TIME; |
300 | 296 | ||
301 | exofs_delete_entry(old_de, old_page); | 297 | exofs_delete_entry(old_de, old_page); |
302 | inode_dec_link_count(old_inode); | 298 | mark_inode_dirty(old_inode); |
303 | 299 | ||
304 | if (dir_de) { | 300 | if (dir_de) { |
305 | err = exofs_set_link(old_inode, dir_de, dir_page, new_dir); | 301 | err = exofs_set_link(old_inode, dir_de, dir_page, new_dir); |
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index 2e1d8341d827..adb91855ccd0 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c | |||
@@ -344,7 +344,6 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, | |||
344 | new_de = ext2_find_entry (new_dir, &new_dentry->d_name, &new_page); | 344 | new_de = ext2_find_entry (new_dir, &new_dentry->d_name, &new_page); |
345 | if (!new_de) | 345 | if (!new_de) |
346 | goto out_dir; | 346 | goto out_dir; |
347 | inode_inc_link_count(old_inode); | ||
348 | ext2_set_link(new_dir, new_de, new_page, old_inode, 1); | 347 | ext2_set_link(new_dir, new_de, new_page, old_inode, 1); |
349 | new_inode->i_ctime = CURRENT_TIME_SEC; | 348 | new_inode->i_ctime = CURRENT_TIME_SEC; |
350 | if (dir_de) | 349 | if (dir_de) |
@@ -356,12 +355,9 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, | |||
356 | if (new_dir->i_nlink >= EXT2_LINK_MAX) | 355 | if (new_dir->i_nlink >= EXT2_LINK_MAX) |
357 | goto out_dir; | 356 | goto out_dir; |
358 | } | 357 | } |
359 | inode_inc_link_count(old_inode); | ||
360 | err = ext2_add_link(new_dentry, old_inode); | 358 | err = ext2_add_link(new_dentry, old_inode); |
361 | if (err) { | 359 | if (err) |
362 | inode_dec_link_count(old_inode); | ||
363 | goto out_dir; | 360 | goto out_dir; |
364 | } | ||
365 | if (dir_de) | 361 | if (dir_de) |
366 | inode_inc_link_count(new_dir); | 362 | inode_inc_link_count(new_dir); |
367 | } | 363 | } |
@@ -369,12 +365,11 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, | |||
369 | /* | 365 | /* |
370 | * Like most other Unix systems, set the ctime for inodes on a | 366 | * Like most other Unix systems, set the ctime for inodes on a |
371 | * rename. | 367 | * rename. |
372 | * inode_dec_link_count() will mark the inode dirty. | ||
373 | */ | 368 | */ |
374 | old_inode->i_ctime = CURRENT_TIME_SEC; | 369 | old_inode->i_ctime = CURRENT_TIME_SEC; |
370 | mark_inode_dirty(old_inode); | ||
375 | 371 | ||
376 | ext2_delete_entry (old_de, old_page); | 372 | ext2_delete_entry (old_de, old_page); |
377 | inode_dec_link_count(old_inode); | ||
378 | 373 | ||
379 | if (dir_de) { | 374 | if (dir_de) { |
380 | if (old_dir != new_dir) | 375 | if (old_dir != new_dir) |
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c index f88f752babd9..adae3fb7451a 100644 --- a/fs/fat/namei_vfat.c +++ b/fs/fat/namei_vfat.c | |||
@@ -43,7 +43,7 @@ static int vfat_revalidate_shortname(struct dentry *dentry) | |||
43 | 43 | ||
44 | static int vfat_revalidate(struct dentry *dentry, struct nameidata *nd) | 44 | static int vfat_revalidate(struct dentry *dentry, struct nameidata *nd) |
45 | { | 45 | { |
46 | if (nd->flags & LOOKUP_RCU) | 46 | if (nd && nd->flags & LOOKUP_RCU) |
47 | return -ECHILD; | 47 | return -ECHILD; |
48 | 48 | ||
49 | /* This is not negative dentry. Always valid. */ | 49 | /* This is not negative dentry. Always valid. */ |
@@ -54,7 +54,7 @@ static int vfat_revalidate(struct dentry *dentry, struct nameidata *nd) | |||
54 | 54 | ||
55 | static int vfat_revalidate_ci(struct dentry *dentry, struct nameidata *nd) | 55 | static int vfat_revalidate_ci(struct dentry *dentry, struct nameidata *nd) |
56 | { | 56 | { |
57 | if (nd->flags & LOOKUP_RCU) | 57 | if (nd && nd->flags & LOOKUP_RCU) |
58 | return -ECHILD; | 58 | return -ECHILD; |
59 | 59 | ||
60 | /* | 60 | /* |
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index bfed8447ed80..8bd0ef9286c3 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c | |||
@@ -158,7 +158,7 @@ static int fuse_dentry_revalidate(struct dentry *entry, struct nameidata *nd) | |||
158 | { | 158 | { |
159 | struct inode *inode; | 159 | struct inode *inode; |
160 | 160 | ||
161 | if (nd->flags & LOOKUP_RCU) | 161 | if (nd && nd->flags & LOOKUP_RCU) |
162 | return -ECHILD; | 162 | return -ECHILD; |
163 | 163 | ||
164 | inode = entry->d_inode; | 164 | inode = entry->d_inode; |
@@ -1283,8 +1283,11 @@ static int fuse_do_setattr(struct dentry *entry, struct iattr *attr, | |||
1283 | if (err) | 1283 | if (err) |
1284 | return err; | 1284 | return err; |
1285 | 1285 | ||
1286 | if ((attr->ia_valid & ATTR_OPEN) && fc->atomic_o_trunc) | 1286 | if (attr->ia_valid & ATTR_OPEN) { |
1287 | return 0; | 1287 | if (fc->atomic_o_trunc) |
1288 | return 0; | ||
1289 | file = NULL; | ||
1290 | } | ||
1288 | 1291 | ||
1289 | if (attr->ia_valid & ATTR_SIZE) | 1292 | if (attr->ia_valid & ATTR_SIZE) |
1290 | is_truncate = true; | 1293 | is_truncate = true; |
diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 95da1bc1c826..9e0832dbb1e3 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c | |||
@@ -86,18 +86,52 @@ struct fuse_file *fuse_file_get(struct fuse_file *ff) | |||
86 | return ff; | 86 | return ff; |
87 | } | 87 | } |
88 | 88 | ||
89 | static void fuse_release_async(struct work_struct *work) | ||
90 | { | ||
91 | struct fuse_req *req; | ||
92 | struct fuse_conn *fc; | ||
93 | struct path path; | ||
94 | |||
95 | req = container_of(work, struct fuse_req, misc.release.work); | ||
96 | path = req->misc.release.path; | ||
97 | fc = get_fuse_conn(path.dentry->d_inode); | ||
98 | |||
99 | fuse_put_request(fc, req); | ||
100 | path_put(&path); | ||
101 | } | ||
102 | |||
89 | static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) | 103 | static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) |
90 | { | 104 | { |
91 | path_put(&req->misc.release.path); | 105 | if (fc->destroy_req) { |
106 | /* | ||
107 | * If this is a fuseblk mount, then it's possible that | ||
108 | * releasing the path will result in releasing the | ||
109 | * super block and sending the DESTROY request. If | ||
110 | * the server is single threaded, this would hang. | ||
111 | * For this reason do the path_put() in a separate | ||
112 | * thread. | ||
113 | */ | ||
114 | atomic_inc(&req->count); | ||
115 | INIT_WORK(&req->misc.release.work, fuse_release_async); | ||
116 | schedule_work(&req->misc.release.work); | ||
117 | } else { | ||
118 | path_put(&req->misc.release.path); | ||
119 | } | ||
92 | } | 120 | } |
93 | 121 | ||
94 | static void fuse_file_put(struct fuse_file *ff) | 122 | static void fuse_file_put(struct fuse_file *ff, bool sync) |
95 | { | 123 | { |
96 | if (atomic_dec_and_test(&ff->count)) { | 124 | if (atomic_dec_and_test(&ff->count)) { |
97 | struct fuse_req *req = ff->reserved_req; | 125 | struct fuse_req *req = ff->reserved_req; |
98 | 126 | ||
99 | req->end = fuse_release_end; | 127 | if (sync) { |
100 | fuse_request_send_background(ff->fc, req); | 128 | fuse_request_send(ff->fc, req); |
129 | path_put(&req->misc.release.path); | ||
130 | fuse_put_request(ff->fc, req); | ||
131 | } else { | ||
132 | req->end = fuse_release_end; | ||
133 | fuse_request_send_background(ff->fc, req); | ||
134 | } | ||
101 | kfree(ff); | 135 | kfree(ff); |
102 | } | 136 | } |
103 | } | 137 | } |
@@ -219,8 +253,12 @@ void fuse_release_common(struct file *file, int opcode) | |||
219 | * Normally this will send the RELEASE request, however if | 253 | * Normally this will send the RELEASE request, however if |
220 | * some asynchronous READ or WRITE requests are outstanding, | 254 | * some asynchronous READ or WRITE requests are outstanding, |
221 | * the sending will be delayed. | 255 | * the sending will be delayed. |
256 | * | ||
257 | * Make the release synchronous if this is a fuseblk mount, | ||
258 | * synchronous RELEASE is allowed (and desirable) in this case | ||
259 | * because the server can be trusted not to screw up. | ||
222 | */ | 260 | */ |
223 | fuse_file_put(ff); | 261 | fuse_file_put(ff, ff->fc->destroy_req != NULL); |
224 | } | 262 | } |
225 | 263 | ||
226 | static int fuse_open(struct inode *inode, struct file *file) | 264 | static int fuse_open(struct inode *inode, struct file *file) |
@@ -558,7 +596,7 @@ static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req) | |||
558 | page_cache_release(page); | 596 | page_cache_release(page); |
559 | } | 597 | } |
560 | if (req->ff) | 598 | if (req->ff) |
561 | fuse_file_put(req->ff); | 599 | fuse_file_put(req->ff, false); |
562 | } | 600 | } |
563 | 601 | ||
564 | static void fuse_send_readpages(struct fuse_req *req, struct file *file) | 602 | static void fuse_send_readpages(struct fuse_req *req, struct file *file) |
@@ -1137,7 +1175,7 @@ static ssize_t fuse_direct_write(struct file *file, const char __user *buf, | |||
1137 | static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req) | 1175 | static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req) |
1138 | { | 1176 | { |
1139 | __free_page(req->pages[0]); | 1177 | __free_page(req->pages[0]); |
1140 | fuse_file_put(req->ff); | 1178 | fuse_file_put(req->ff, false); |
1141 | } | 1179 | } |
1142 | 1180 | ||
1143 | static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req) | 1181 | static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req) |
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index ae5744a2f9e9..d4286947bc2c 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/rwsem.h> | 21 | #include <linux/rwsem.h> |
22 | #include <linux/rbtree.h> | 22 | #include <linux/rbtree.h> |
23 | #include <linux/poll.h> | 23 | #include <linux/poll.h> |
24 | #include <linux/workqueue.h> | ||
24 | 25 | ||
25 | /** Max number of pages that can be used in a single read request */ | 26 | /** Max number of pages that can be used in a single read request */ |
26 | #define FUSE_MAX_PAGES_PER_REQ 32 | 27 | #define FUSE_MAX_PAGES_PER_REQ 32 |
@@ -262,7 +263,10 @@ struct fuse_req { | |||
262 | /** Data for asynchronous requests */ | 263 | /** Data for asynchronous requests */ |
263 | union { | 264 | union { |
264 | struct { | 265 | struct { |
265 | struct fuse_release_in in; | 266 | union { |
267 | struct fuse_release_in in; | ||
268 | struct work_struct work; | ||
269 | }; | ||
266 | struct path path; | 270 | struct path path; |
267 | } release; | 271 | } release; |
268 | struct fuse_init_in init_in; | 272 | struct fuse_init_in init_in; |
diff --git a/fs/gfs2/dentry.c b/fs/gfs2/dentry.c index 4a456338b873..0da8da2c991d 100644 --- a/fs/gfs2/dentry.c +++ b/fs/gfs2/dentry.c | |||
@@ -44,7 +44,7 @@ static int gfs2_drevalidate(struct dentry *dentry, struct nameidata *nd) | |||
44 | int error; | 44 | int error; |
45 | int had_lock = 0; | 45 | int had_lock = 0; |
46 | 46 | ||
47 | if (nd->flags & LOOKUP_RCU) | 47 | if (nd && nd->flags & LOOKUP_RCU) |
48 | return -ECHILD; | 48 | return -ECHILD; |
49 | 49 | ||
50 | parent = dget_parent(dentry); | 50 | parent = dget_parent(dentry); |
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index 85ba027d1c4d..72c31a315d96 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c | |||
@@ -59,14 +59,7 @@ static void gfs2_init_gl_aspace_once(void *foo) | |||
59 | struct address_space *mapping = (struct address_space *)(gl + 1); | 59 | struct address_space *mapping = (struct address_space *)(gl + 1); |
60 | 60 | ||
61 | gfs2_init_glock_once(gl); | 61 | gfs2_init_glock_once(gl); |
62 | memset(mapping, 0, sizeof(*mapping)); | 62 | address_space_init_once(mapping); |
63 | INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC); | ||
64 | spin_lock_init(&mapping->tree_lock); | ||
65 | spin_lock_init(&mapping->i_mmap_lock); | ||
66 | INIT_LIST_HEAD(&mapping->private_list); | ||
67 | spin_lock_init(&mapping->private_lock); | ||
68 | INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap); | ||
69 | INIT_LIST_HEAD(&mapping->i_mmap_nonlinear); | ||
70 | } | 63 | } |
71 | 64 | ||
72 | /** | 65 | /** |
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c index afa66aaa2237..b4d70b13be92 100644 --- a/fs/hfs/dir.c +++ b/fs/hfs/dir.c | |||
@@ -238,46 +238,22 @@ static int hfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
238 | } | 238 | } |
239 | 239 | ||
240 | /* | 240 | /* |
241 | * hfs_unlink() | 241 | * hfs_remove() |
242 | * | 242 | * |
243 | * This is the unlink() entry in the inode_operations structure for | 243 | * This serves as both unlink() and rmdir() in the inode_operations |
244 | * regular HFS directories. The purpose is to delete an existing | 244 | * structure for regular HFS directories. The purpose is to delete |
245 | * file, given the inode for the parent directory and the name | 245 | * an existing child, given the inode for the parent directory and |
246 | * (and its length) of the existing file. | 246 | * the name (and its length) of the existing directory. |
247 | */ | ||
248 | static int hfs_unlink(struct inode *dir, struct dentry *dentry) | ||
249 | { | ||
250 | struct inode *inode; | ||
251 | int res; | ||
252 | |||
253 | inode = dentry->d_inode; | ||
254 | res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name); | ||
255 | if (res) | ||
256 | return res; | ||
257 | |||
258 | drop_nlink(inode); | ||
259 | hfs_delete_inode(inode); | ||
260 | inode->i_ctime = CURRENT_TIME_SEC; | ||
261 | mark_inode_dirty(inode); | ||
262 | |||
263 | return res; | ||
264 | } | ||
265 | |||
266 | /* | ||
267 | * hfs_rmdir() | ||
268 | * | 247 | * |
269 | * This is the rmdir() entry in the inode_operations structure for | 248 | * HFS does not have hardlinks, so both rmdir and unlink set the |
270 | * regular HFS directories. The purpose is to delete an existing | 249 | * link count to 0. The only difference is the emptiness check. |
271 | * directory, given the inode for the parent directory and the name | ||
272 | * (and its length) of the existing directory. | ||
273 | */ | 250 | */ |
274 | static int hfs_rmdir(struct inode *dir, struct dentry *dentry) | 251 | static int hfs_remove(struct inode *dir, struct dentry *dentry) |
275 | { | 252 | { |
276 | struct inode *inode; | 253 | struct inode *inode = dentry->d_inode; |
277 | int res; | 254 | int res; |
278 | 255 | ||
279 | inode = dentry->d_inode; | 256 | if (S_ISDIR(inode->i_mode) && inode->i_size != 2) |
280 | if (inode->i_size != 2) | ||
281 | return -ENOTEMPTY; | 257 | return -ENOTEMPTY; |
282 | res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name); | 258 | res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name); |
283 | if (res) | 259 | if (res) |
@@ -307,7 +283,7 @@ static int hfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
307 | 283 | ||
308 | /* Unlink destination if it already exists */ | 284 | /* Unlink destination if it already exists */ |
309 | if (new_dentry->d_inode) { | 285 | if (new_dentry->d_inode) { |
310 | res = hfs_unlink(new_dir, new_dentry); | 286 | res = hfs_remove(new_dir, new_dentry); |
311 | if (res) | 287 | if (res) |
312 | return res; | 288 | return res; |
313 | } | 289 | } |
@@ -332,9 +308,9 @@ const struct file_operations hfs_dir_operations = { | |||
332 | const struct inode_operations hfs_dir_inode_operations = { | 308 | const struct inode_operations hfs_dir_inode_operations = { |
333 | .create = hfs_create, | 309 | .create = hfs_create, |
334 | .lookup = hfs_lookup, | 310 | .lookup = hfs_lookup, |
335 | .unlink = hfs_unlink, | 311 | .unlink = hfs_remove, |
336 | .mkdir = hfs_mkdir, | 312 | .mkdir = hfs_mkdir, |
337 | .rmdir = hfs_rmdir, | 313 | .rmdir = hfs_remove, |
338 | .rename = hfs_rename, | 314 | .rename = hfs_rename, |
339 | .setattr = hfs_inode_setattr, | 315 | .setattr = hfs_inode_setattr, |
340 | }; | 316 | }; |
diff --git a/fs/inode.c b/fs/inode.c index da85e56378f3..0647d80accf6 100644 --- a/fs/inode.c +++ b/fs/inode.c | |||
@@ -295,6 +295,20 @@ static void destroy_inode(struct inode *inode) | |||
295 | call_rcu(&inode->i_rcu, i_callback); | 295 | call_rcu(&inode->i_rcu, i_callback); |
296 | } | 296 | } |
297 | 297 | ||
298 | void address_space_init_once(struct address_space *mapping) | ||
299 | { | ||
300 | memset(mapping, 0, sizeof(*mapping)); | ||
301 | INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC); | ||
302 | spin_lock_init(&mapping->tree_lock); | ||
303 | spin_lock_init(&mapping->i_mmap_lock); | ||
304 | INIT_LIST_HEAD(&mapping->private_list); | ||
305 | spin_lock_init(&mapping->private_lock); | ||
306 | INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap); | ||
307 | INIT_LIST_HEAD(&mapping->i_mmap_nonlinear); | ||
308 | mutex_init(&mapping->unmap_mutex); | ||
309 | } | ||
310 | EXPORT_SYMBOL(address_space_init_once); | ||
311 | |||
298 | /* | 312 | /* |
299 | * These are initializations that only need to be done | 313 | * These are initializations that only need to be done |
300 | * once, because the fields are idempotent across use | 314 | * once, because the fields are idempotent across use |
@@ -308,13 +322,7 @@ void inode_init_once(struct inode *inode) | |||
308 | INIT_LIST_HEAD(&inode->i_devices); | 322 | INIT_LIST_HEAD(&inode->i_devices); |
309 | INIT_LIST_HEAD(&inode->i_wb_list); | 323 | INIT_LIST_HEAD(&inode->i_wb_list); |
310 | INIT_LIST_HEAD(&inode->i_lru); | 324 | INIT_LIST_HEAD(&inode->i_lru); |
311 | INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC); | 325 | address_space_init_once(&inode->i_data); |
312 | spin_lock_init(&inode->i_data.tree_lock); | ||
313 | spin_lock_init(&inode->i_data.i_mmap_lock); | ||
314 | INIT_LIST_HEAD(&inode->i_data.private_list); | ||
315 | spin_lock_init(&inode->i_data.private_lock); | ||
316 | INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap); | ||
317 | INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear); | ||
318 | i_size_ordered_init(inode); | 326 | i_size_ordered_init(inode); |
319 | #ifdef CONFIG_FSNOTIFY | 327 | #ifdef CONFIG_FSNOTIFY |
320 | INIT_HLIST_HEAD(&inode->i_fsnotify_marks); | 328 | INIT_HLIST_HEAD(&inode->i_fsnotify_marks); |
@@ -540,11 +548,14 @@ void evict_inodes(struct super_block *sb) | |||
540 | /** | 548 | /** |
541 | * invalidate_inodes - attempt to free all inodes on a superblock | 549 | * invalidate_inodes - attempt to free all inodes on a superblock |
542 | * @sb: superblock to operate on | 550 | * @sb: superblock to operate on |
551 | * @kill_dirty: flag to guide handling of dirty inodes | ||
543 | * | 552 | * |
544 | * Attempts to free all inodes for a given superblock. If there were any | 553 | * Attempts to free all inodes for a given superblock. If there were any |
545 | * busy inodes return a non-zero value, else zero. | 554 | * busy inodes return a non-zero value, else zero. |
555 | * If @kill_dirty is set, discard dirty inodes too, otherwise treat | ||
556 | * them as busy. | ||
546 | */ | 557 | */ |
547 | int invalidate_inodes(struct super_block *sb) | 558 | int invalidate_inodes(struct super_block *sb, bool kill_dirty) |
548 | { | 559 | { |
549 | int busy = 0; | 560 | int busy = 0; |
550 | struct inode *inode, *next; | 561 | struct inode *inode, *next; |
@@ -556,6 +567,10 @@ int invalidate_inodes(struct super_block *sb) | |||
556 | list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { | 567 | list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { |
557 | if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) | 568 | if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) |
558 | continue; | 569 | continue; |
570 | if (inode->i_state & I_DIRTY && !kill_dirty) { | ||
571 | busy = 1; | ||
572 | continue; | ||
573 | } | ||
559 | if (atomic_read(&inode->i_count)) { | 574 | if (atomic_read(&inode->i_count)) { |
560 | busy = 1; | 575 | busy = 1; |
561 | continue; | 576 | continue; |
diff --git a/fs/internal.h b/fs/internal.h index 0663568b1247..9b976b57d7fe 100644 --- a/fs/internal.h +++ b/fs/internal.h | |||
@@ -112,4 +112,4 @@ extern void release_open_intent(struct nameidata *); | |||
112 | */ | 112 | */ |
113 | extern int get_nr_dirty_inodes(void); | 113 | extern int get_nr_dirty_inodes(void); |
114 | extern void evict_inodes(struct super_block *); | 114 | extern void evict_inodes(struct super_block *); |
115 | extern int invalidate_inodes(struct super_block *); | 115 | extern int invalidate_inodes(struct super_block *, bool); |
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c index 81ead850ddb6..5a2b269428a6 100644 --- a/fs/jfs/namei.c +++ b/fs/jfs/namei.c | |||
@@ -1600,7 +1600,7 @@ out: | |||
1600 | 1600 | ||
1601 | static int jfs_ci_revalidate(struct dentry *dentry, struct nameidata *nd) | 1601 | static int jfs_ci_revalidate(struct dentry *dentry, struct nameidata *nd) |
1602 | { | 1602 | { |
1603 | if (nd->flags & LOOKUP_RCU) | 1603 | if (nd && nd->flags & LOOKUP_RCU) |
1604 | return -ECHILD; | 1604 | return -ECHILD; |
1605 | /* | 1605 | /* |
1606 | * This is not negative dentry. Always valid. | 1606 | * This is not negative dentry. Always valid. |
diff --git a/fs/minix/namei.c b/fs/minix/namei.c index ce7337ddfdbf..6e6777f1b4b2 100644 --- a/fs/minix/namei.c +++ b/fs/minix/namei.c | |||
@@ -213,7 +213,6 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry, | |||
213 | new_de = minix_find_entry(new_dentry, &new_page); | 213 | new_de = minix_find_entry(new_dentry, &new_page); |
214 | if (!new_de) | 214 | if (!new_de) |
215 | goto out_dir; | 215 | goto out_dir; |
216 | inode_inc_link_count(old_inode); | ||
217 | minix_set_link(new_de, new_page, old_inode); | 216 | minix_set_link(new_de, new_page, old_inode); |
218 | new_inode->i_ctime = CURRENT_TIME_SEC; | 217 | new_inode->i_ctime = CURRENT_TIME_SEC; |
219 | if (dir_de) | 218 | if (dir_de) |
@@ -225,18 +224,15 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry, | |||
225 | if (new_dir->i_nlink >= info->s_link_max) | 224 | if (new_dir->i_nlink >= info->s_link_max) |
226 | goto out_dir; | 225 | goto out_dir; |
227 | } | 226 | } |
228 | inode_inc_link_count(old_inode); | ||
229 | err = minix_add_link(new_dentry, old_inode); | 227 | err = minix_add_link(new_dentry, old_inode); |
230 | if (err) { | 228 | if (err) |
231 | inode_dec_link_count(old_inode); | ||
232 | goto out_dir; | 229 | goto out_dir; |
233 | } | ||
234 | if (dir_de) | 230 | if (dir_de) |
235 | inode_inc_link_count(new_dir); | 231 | inode_inc_link_count(new_dir); |
236 | } | 232 | } |
237 | 233 | ||
238 | minix_delete_entry(old_de, old_page); | 234 | minix_delete_entry(old_de, old_page); |
239 | inode_dec_link_count(old_inode); | 235 | mark_inode_dirty(old_inode); |
240 | 236 | ||
241 | if (dir_de) { | 237 | if (dir_de) { |
242 | minix_set_link(dir_de, dir_page, new_dir); | 238 | minix_set_link(dir_de, dir_page, new_dir); |
diff --git a/fs/namei.c b/fs/namei.c index 0087cf9c2c6b..a4689eb2df28 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -1546,6 +1546,7 @@ static int path_walk(const char *name, struct nameidata *nd) | |||
1546 | /* nd->path had been dropped */ | 1546 | /* nd->path had been dropped */ |
1547 | current->total_link_count = 0; | 1547 | current->total_link_count = 0; |
1548 | nd->path = save; | 1548 | nd->path = save; |
1549 | nd->inode = save.dentry->d_inode; | ||
1549 | path_get(&nd->path); | 1550 | path_get(&nd->path); |
1550 | nd->flags |= LOOKUP_REVAL; | 1551 | nd->flags |= LOOKUP_REVAL; |
1551 | result = link_path_walk(name, nd); | 1552 | result = link_path_walk(name, nd); |
@@ -2455,22 +2456,29 @@ struct file *do_filp_open(int dfd, const char *pathname, | |||
2455 | /* !O_CREAT, simple open */ | 2456 | /* !O_CREAT, simple open */ |
2456 | error = do_path_lookup(dfd, pathname, flags, &nd); | 2457 | error = do_path_lookup(dfd, pathname, flags, &nd); |
2457 | if (unlikely(error)) | 2458 | if (unlikely(error)) |
2458 | goto out_filp; | 2459 | goto out_filp2; |
2459 | error = -ELOOP; | 2460 | error = -ELOOP; |
2460 | if (!(nd.flags & LOOKUP_FOLLOW)) { | 2461 | if (!(nd.flags & LOOKUP_FOLLOW)) { |
2461 | if (nd.inode->i_op->follow_link) | 2462 | if (nd.inode->i_op->follow_link) |
2462 | goto out_path; | 2463 | goto out_path2; |
2463 | } | 2464 | } |
2464 | error = -ENOTDIR; | 2465 | error = -ENOTDIR; |
2465 | if (nd.flags & LOOKUP_DIRECTORY) { | 2466 | if (nd.flags & LOOKUP_DIRECTORY) { |
2466 | if (!nd.inode->i_op->lookup) | 2467 | if (!nd.inode->i_op->lookup) |
2467 | goto out_path; | 2468 | goto out_path2; |
2468 | } | 2469 | } |
2469 | audit_inode(pathname, nd.path.dentry); | 2470 | audit_inode(pathname, nd.path.dentry); |
2470 | filp = finish_open(&nd, open_flag, acc_mode); | 2471 | filp = finish_open(&nd, open_flag, acc_mode); |
2472 | out2: | ||
2471 | release_open_intent(&nd); | 2473 | release_open_intent(&nd); |
2472 | return filp; | 2474 | return filp; |
2473 | 2475 | ||
2476 | out_path2: | ||
2477 | path_put(&nd.path); | ||
2478 | out_filp2: | ||
2479 | filp = ERR_PTR(error); | ||
2480 | goto out2; | ||
2481 | |||
2474 | creat: | 2482 | creat: |
2475 | /* OK, have to create the file. Find the parent. */ | 2483 | /* OK, have to create the file. Find the parent. */ |
2476 | error = path_init_rcu(dfd, pathname, | 2484 | error = path_init_rcu(dfd, pathname, |
diff --git a/fs/namespace.c b/fs/namespace.c index 7b0b95371696..d1edf26025dc 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -1244,7 +1244,7 @@ static int do_umount(struct vfsmount *mnt, int flags) | |||
1244 | */ | 1244 | */ |
1245 | br_write_lock(vfsmount_lock); | 1245 | br_write_lock(vfsmount_lock); |
1246 | if (mnt_get_count(mnt) != 2) { | 1246 | if (mnt_get_count(mnt) != 2) { |
1247 | br_write_lock(vfsmount_lock); | 1247 | br_write_unlock(vfsmount_lock); |
1248 | return -EBUSY; | 1248 | return -EBUSY; |
1249 | } | 1249 | } |
1250 | br_write_unlock(vfsmount_lock); | 1250 | br_write_unlock(vfsmount_lock); |
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index 1cc600e77bb4..2f8e61816d75 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/inet.h> | 37 | #include <linux/inet.h> |
38 | #include <linux/nfs_xdr.h> | 38 | #include <linux/nfs_xdr.h> |
39 | #include <linux/slab.h> | 39 | #include <linux/slab.h> |
40 | #include <linux/compat.h> | ||
40 | 41 | ||
41 | #include <asm/system.h> | 42 | #include <asm/system.h> |
42 | #include <asm/uaccess.h> | 43 | #include <asm/uaccess.h> |
@@ -89,7 +90,11 @@ int nfs_wait_bit_killable(void *word) | |||
89 | */ | 90 | */ |
90 | u64 nfs_compat_user_ino64(u64 fileid) | 91 | u64 nfs_compat_user_ino64(u64 fileid) |
91 | { | 92 | { |
92 | int ino; | 93 | #ifdef CONFIG_COMPAT |
94 | compat_ulong_t ino; | ||
95 | #else | ||
96 | unsigned long ino; | ||
97 | #endif | ||
93 | 98 | ||
94 | if (enable_ino64) | 99 | if (enable_ino64) |
95 | return fileid; | 100 | return fileid; |
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h index 7a7474073148..1be36cf65bfc 100644 --- a/fs/nfs/nfs4_fs.h +++ b/fs/nfs/nfs4_fs.h | |||
@@ -298,6 +298,11 @@ struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp); | |||
298 | #if defined(CONFIG_NFS_V4_1) | 298 | #if defined(CONFIG_NFS_V4_1) |
299 | struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp); | 299 | struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp); |
300 | struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp); | 300 | struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp); |
301 | extern void nfs4_schedule_session_recovery(struct nfs4_session *); | ||
302 | #else | ||
303 | static inline void nfs4_schedule_session_recovery(struct nfs4_session *session) | ||
304 | { | ||
305 | } | ||
301 | #endif /* CONFIG_NFS_V4_1 */ | 306 | #endif /* CONFIG_NFS_V4_1 */ |
302 | 307 | ||
303 | extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *); | 308 | extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *); |
@@ -307,10 +312,9 @@ extern void nfs4_put_open_state(struct nfs4_state *); | |||
307 | extern void nfs4_close_state(struct path *, struct nfs4_state *, fmode_t); | 312 | extern void nfs4_close_state(struct path *, struct nfs4_state *, fmode_t); |
308 | extern void nfs4_close_sync(struct path *, struct nfs4_state *, fmode_t); | 313 | extern void nfs4_close_sync(struct path *, struct nfs4_state *, fmode_t); |
309 | extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t); | 314 | extern void nfs4_state_set_mode_locked(struct nfs4_state *, fmode_t); |
310 | extern void nfs4_schedule_state_recovery(struct nfs_client *); | 315 | extern void nfs4_schedule_lease_recovery(struct nfs_client *); |
311 | extern void nfs4_schedule_state_manager(struct nfs_client *); | 316 | extern void nfs4_schedule_state_manager(struct nfs_client *); |
312 | extern int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state); | 317 | extern void nfs4_schedule_stateid_recovery(const struct nfs_server *, struct nfs4_state *); |
313 | extern int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state); | ||
314 | extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags); | 318 | extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags); |
315 | extern void nfs41_handle_recall_slot(struct nfs_client *clp); | 319 | extern void nfs41_handle_recall_slot(struct nfs_client *clp); |
316 | extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp); | 320 | extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp); |
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c index f5c9b125e8cc..b73c34375f60 100644 --- a/fs/nfs/nfs4filelayoutdev.c +++ b/fs/nfs/nfs4filelayoutdev.c | |||
@@ -219,6 +219,10 @@ decode_and_add_ds(__be32 **pp, struct inode *inode) | |||
219 | goto out_err; | 219 | goto out_err; |
220 | } | 220 | } |
221 | buf = kmalloc(rlen + 1, GFP_KERNEL); | 221 | buf = kmalloc(rlen + 1, GFP_KERNEL); |
222 | if (!buf) { | ||
223 | dprintk("%s: Not enough memory\n", __func__); | ||
224 | goto out_err; | ||
225 | } | ||
222 | buf[rlen] = '\0'; | 226 | buf[rlen] = '\0'; |
223 | memcpy(buf, r_addr, rlen); | 227 | memcpy(buf, r_addr, rlen); |
224 | 228 | ||
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 78936a8f40ab..0a07e353a961 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -256,12 +256,13 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, | |||
256 | case -NFS4ERR_OPENMODE: | 256 | case -NFS4ERR_OPENMODE: |
257 | if (state == NULL) | 257 | if (state == NULL) |
258 | break; | 258 | break; |
259 | nfs4_state_mark_reclaim_nograce(clp, state); | 259 | nfs4_schedule_stateid_recovery(server, state); |
260 | goto do_state_recovery; | 260 | goto wait_on_recovery; |
261 | case -NFS4ERR_STALE_STATEID: | 261 | case -NFS4ERR_STALE_STATEID: |
262 | case -NFS4ERR_STALE_CLIENTID: | 262 | case -NFS4ERR_STALE_CLIENTID: |
263 | case -NFS4ERR_EXPIRED: | 263 | case -NFS4ERR_EXPIRED: |
264 | goto do_state_recovery; | 264 | nfs4_schedule_lease_recovery(clp); |
265 | goto wait_on_recovery; | ||
265 | #if defined(CONFIG_NFS_V4_1) | 266 | #if defined(CONFIG_NFS_V4_1) |
266 | case -NFS4ERR_BADSESSION: | 267 | case -NFS4ERR_BADSESSION: |
267 | case -NFS4ERR_BADSLOT: | 268 | case -NFS4ERR_BADSLOT: |
@@ -272,7 +273,7 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, | |||
272 | case -NFS4ERR_SEQ_MISORDERED: | 273 | case -NFS4ERR_SEQ_MISORDERED: |
273 | dprintk("%s ERROR: %d Reset session\n", __func__, | 274 | dprintk("%s ERROR: %d Reset session\n", __func__, |
274 | errorcode); | 275 | errorcode); |
275 | nfs4_schedule_state_recovery(clp); | 276 | nfs4_schedule_session_recovery(clp->cl_session); |
276 | exception->retry = 1; | 277 | exception->retry = 1; |
277 | break; | 278 | break; |
278 | #endif /* defined(CONFIG_NFS_V4_1) */ | 279 | #endif /* defined(CONFIG_NFS_V4_1) */ |
@@ -295,8 +296,7 @@ static int nfs4_handle_exception(const struct nfs_server *server, int errorcode, | |||
295 | } | 296 | } |
296 | /* We failed to handle the error */ | 297 | /* We failed to handle the error */ |
297 | return nfs4_map_errors(ret); | 298 | return nfs4_map_errors(ret); |
298 | do_state_recovery: | 299 | wait_on_recovery: |
299 | nfs4_schedule_state_recovery(clp); | ||
300 | ret = nfs4_wait_clnt_recover(clp); | 300 | ret = nfs4_wait_clnt_recover(clp); |
301 | if (ret == 0) | 301 | if (ret == 0) |
302 | exception->retry = 1; | 302 | exception->retry = 1; |
@@ -435,8 +435,8 @@ static int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res * | |||
435 | clp = res->sr_session->clp; | 435 | clp = res->sr_session->clp; |
436 | do_renew_lease(clp, timestamp); | 436 | do_renew_lease(clp, timestamp); |
437 | /* Check sequence flags */ | 437 | /* Check sequence flags */ |
438 | if (atomic_read(&clp->cl_count) > 1) | 438 | if (res->sr_status_flags != 0) |
439 | nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); | 439 | nfs4_schedule_lease_recovery(clp); |
440 | break; | 440 | break; |
441 | case -NFS4ERR_DELAY: | 441 | case -NFS4ERR_DELAY: |
442 | /* The server detected a resend of the RPC call and | 442 | /* The server detected a resend of the RPC call and |
@@ -1255,14 +1255,13 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state | |||
1255 | case -NFS4ERR_BAD_HIGH_SLOT: | 1255 | case -NFS4ERR_BAD_HIGH_SLOT: |
1256 | case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: | 1256 | case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: |
1257 | case -NFS4ERR_DEADSESSION: | 1257 | case -NFS4ERR_DEADSESSION: |
1258 | nfs4_schedule_state_recovery( | 1258 | nfs4_schedule_session_recovery(server->nfs_client->cl_session); |
1259 | server->nfs_client); | ||
1260 | goto out; | 1259 | goto out; |
1261 | case -NFS4ERR_STALE_CLIENTID: | 1260 | case -NFS4ERR_STALE_CLIENTID: |
1262 | case -NFS4ERR_STALE_STATEID: | 1261 | case -NFS4ERR_STALE_STATEID: |
1263 | case -NFS4ERR_EXPIRED: | 1262 | case -NFS4ERR_EXPIRED: |
1264 | /* Don't recall a delegation if it was lost */ | 1263 | /* Don't recall a delegation if it was lost */ |
1265 | nfs4_schedule_state_recovery(server->nfs_client); | 1264 | nfs4_schedule_lease_recovery(server->nfs_client); |
1266 | goto out; | 1265 | goto out; |
1267 | case -ERESTARTSYS: | 1266 | case -ERESTARTSYS: |
1268 | /* | 1267 | /* |
@@ -1271,7 +1270,7 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state | |||
1271 | */ | 1270 | */ |
1272 | case -NFS4ERR_ADMIN_REVOKED: | 1271 | case -NFS4ERR_ADMIN_REVOKED: |
1273 | case -NFS4ERR_BAD_STATEID: | 1272 | case -NFS4ERR_BAD_STATEID: |
1274 | nfs4_state_mark_reclaim_nograce(server->nfs_client, state); | 1273 | nfs4_schedule_stateid_recovery(server, state); |
1275 | case -EKEYEXPIRED: | 1274 | case -EKEYEXPIRED: |
1276 | /* | 1275 | /* |
1277 | * User RPCSEC_GSS context has expired. | 1276 | * User RPCSEC_GSS context has expired. |
@@ -1587,7 +1586,7 @@ static int nfs4_recover_expired_lease(struct nfs_server *server) | |||
1587 | if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) && | 1586 | if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) && |
1588 | !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state)) | 1587 | !test_bit(NFS4CLNT_CHECK_LEASE,&clp->cl_state)) |
1589 | break; | 1588 | break; |
1590 | nfs4_schedule_state_recovery(clp); | 1589 | nfs4_schedule_state_manager(clp); |
1591 | ret = -EIO; | 1590 | ret = -EIO; |
1592 | } | 1591 | } |
1593 | return ret; | 1592 | return ret; |
@@ -3178,7 +3177,7 @@ static void nfs4_renew_done(struct rpc_task *task, void *calldata) | |||
3178 | if (task->tk_status < 0) { | 3177 | if (task->tk_status < 0) { |
3179 | /* Unless we're shutting down, schedule state recovery! */ | 3178 | /* Unless we're shutting down, schedule state recovery! */ |
3180 | if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) != 0) | 3179 | if (test_bit(NFS_CS_RENEWD, &clp->cl_res_state) != 0) |
3181 | nfs4_schedule_state_recovery(clp); | 3180 | nfs4_schedule_lease_recovery(clp); |
3182 | return; | 3181 | return; |
3183 | } | 3182 | } |
3184 | do_renew_lease(clp, timestamp); | 3183 | do_renew_lease(clp, timestamp); |
@@ -3252,6 +3251,35 @@ static void buf_to_pages(const void *buf, size_t buflen, | |||
3252 | } | 3251 | } |
3253 | } | 3252 | } |
3254 | 3253 | ||
3254 | static int buf_to_pages_noslab(const void *buf, size_t buflen, | ||
3255 | struct page **pages, unsigned int *pgbase) | ||
3256 | { | ||
3257 | struct page *newpage, **spages; | ||
3258 | int rc = 0; | ||
3259 | size_t len; | ||
3260 | spages = pages; | ||
3261 | |||
3262 | do { | ||
3263 | len = min_t(size_t, PAGE_CACHE_SIZE, buflen); | ||
3264 | newpage = alloc_page(GFP_KERNEL); | ||
3265 | |||
3266 | if (newpage == NULL) | ||
3267 | goto unwind; | ||
3268 | memcpy(page_address(newpage), buf, len); | ||
3269 | buf += len; | ||
3270 | buflen -= len; | ||
3271 | *pages++ = newpage; | ||
3272 | rc++; | ||
3273 | } while (buflen != 0); | ||
3274 | |||
3275 | return rc; | ||
3276 | |||
3277 | unwind: | ||
3278 | for(; rc > 0; rc--) | ||
3279 | __free_page(spages[rc-1]); | ||
3280 | return -ENOMEM; | ||
3281 | } | ||
3282 | |||
3255 | struct nfs4_cached_acl { | 3283 | struct nfs4_cached_acl { |
3256 | int cached; | 3284 | int cached; |
3257 | size_t len; | 3285 | size_t len; |
@@ -3420,13 +3448,23 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl | |||
3420 | .rpc_argp = &arg, | 3448 | .rpc_argp = &arg, |
3421 | .rpc_resp = &res, | 3449 | .rpc_resp = &res, |
3422 | }; | 3450 | }; |
3423 | int ret; | 3451 | int ret, i; |
3424 | 3452 | ||
3425 | if (!nfs4_server_supports_acls(server)) | 3453 | if (!nfs4_server_supports_acls(server)) |
3426 | return -EOPNOTSUPP; | 3454 | return -EOPNOTSUPP; |
3455 | i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase); | ||
3456 | if (i < 0) | ||
3457 | return i; | ||
3427 | nfs_inode_return_delegation(inode); | 3458 | nfs_inode_return_delegation(inode); |
3428 | buf_to_pages(buf, buflen, arg.acl_pages, &arg.acl_pgbase); | ||
3429 | ret = nfs4_call_sync(server, &msg, &arg, &res, 1); | 3459 | ret = nfs4_call_sync(server, &msg, &arg, &res, 1); |
3460 | |||
3461 | /* | ||
3462 | * Free each page after tx, so the only ref left is | ||
3463 | * held by the network stack | ||
3464 | */ | ||
3465 | for (; i > 0; i--) | ||
3466 | put_page(pages[i-1]); | ||
3467 | |||
3430 | /* | 3468 | /* |
3431 | * Acl update can result in inode attribute update. | 3469 | * Acl update can result in inode attribute update. |
3432 | * so mark the attribute cache invalid. | 3470 | * so mark the attribute cache invalid. |
@@ -3464,12 +3502,13 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, | |||
3464 | case -NFS4ERR_OPENMODE: | 3502 | case -NFS4ERR_OPENMODE: |
3465 | if (state == NULL) | 3503 | if (state == NULL) |
3466 | break; | 3504 | break; |
3467 | nfs4_state_mark_reclaim_nograce(clp, state); | 3505 | nfs4_schedule_stateid_recovery(server, state); |
3468 | goto do_state_recovery; | 3506 | goto wait_on_recovery; |
3469 | case -NFS4ERR_STALE_STATEID: | 3507 | case -NFS4ERR_STALE_STATEID: |
3470 | case -NFS4ERR_STALE_CLIENTID: | 3508 | case -NFS4ERR_STALE_CLIENTID: |
3471 | case -NFS4ERR_EXPIRED: | 3509 | case -NFS4ERR_EXPIRED: |
3472 | goto do_state_recovery; | 3510 | nfs4_schedule_lease_recovery(clp); |
3511 | goto wait_on_recovery; | ||
3473 | #if defined(CONFIG_NFS_V4_1) | 3512 | #if defined(CONFIG_NFS_V4_1) |
3474 | case -NFS4ERR_BADSESSION: | 3513 | case -NFS4ERR_BADSESSION: |
3475 | case -NFS4ERR_BADSLOT: | 3514 | case -NFS4ERR_BADSLOT: |
@@ -3480,7 +3519,7 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, | |||
3480 | case -NFS4ERR_SEQ_MISORDERED: | 3519 | case -NFS4ERR_SEQ_MISORDERED: |
3481 | dprintk("%s ERROR %d, Reset session\n", __func__, | 3520 | dprintk("%s ERROR %d, Reset session\n", __func__, |
3482 | task->tk_status); | 3521 | task->tk_status); |
3483 | nfs4_schedule_state_recovery(clp); | 3522 | nfs4_schedule_session_recovery(clp->cl_session); |
3484 | task->tk_status = 0; | 3523 | task->tk_status = 0; |
3485 | return -EAGAIN; | 3524 | return -EAGAIN; |
3486 | #endif /* CONFIG_NFS_V4_1 */ | 3525 | #endif /* CONFIG_NFS_V4_1 */ |
@@ -3497,9 +3536,8 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server, | |||
3497 | } | 3536 | } |
3498 | task->tk_status = nfs4_map_errors(task->tk_status); | 3537 | task->tk_status = nfs4_map_errors(task->tk_status); |
3499 | return 0; | 3538 | return 0; |
3500 | do_state_recovery: | 3539 | wait_on_recovery: |
3501 | rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); | 3540 | rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL); |
3502 | nfs4_schedule_state_recovery(clp); | ||
3503 | if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) | 3541 | if (test_bit(NFS4CLNT_MANAGER_RUNNING, &clp->cl_state) == 0) |
3504 | rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); | 3542 | rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task); |
3505 | task->tk_status = 0; | 3543 | task->tk_status = 0; |
@@ -4110,7 +4148,7 @@ static void nfs4_lock_release(void *calldata) | |||
4110 | task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, | 4148 | task = nfs4_do_unlck(&data->fl, data->ctx, data->lsp, |
4111 | data->arg.lock_seqid); | 4149 | data->arg.lock_seqid); |
4112 | if (!IS_ERR(task)) | 4150 | if (!IS_ERR(task)) |
4113 | rpc_put_task(task); | 4151 | rpc_put_task_async(task); |
4114 | dprintk("%s: cancelling lock!\n", __func__); | 4152 | dprintk("%s: cancelling lock!\n", __func__); |
4115 | } else | 4153 | } else |
4116 | nfs_free_seqid(data->arg.lock_seqid); | 4154 | nfs_free_seqid(data->arg.lock_seqid); |
@@ -4134,23 +4172,18 @@ static const struct rpc_call_ops nfs4_recover_lock_ops = { | |||
4134 | 4172 | ||
4135 | static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) | 4173 | static void nfs4_handle_setlk_error(struct nfs_server *server, struct nfs4_lock_state *lsp, int new_lock_owner, int error) |
4136 | { | 4174 | { |
4137 | struct nfs_client *clp = server->nfs_client; | ||
4138 | struct nfs4_state *state = lsp->ls_state; | ||
4139 | |||
4140 | switch (error) { | 4175 | switch (error) { |
4141 | case -NFS4ERR_ADMIN_REVOKED: | 4176 | case -NFS4ERR_ADMIN_REVOKED: |
4142 | case -NFS4ERR_BAD_STATEID: | 4177 | case -NFS4ERR_BAD_STATEID: |
4143 | case -NFS4ERR_EXPIRED: | 4178 | lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; |
4144 | if (new_lock_owner != 0 || | 4179 | if (new_lock_owner != 0 || |
4145 | (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) | 4180 | (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) |
4146 | nfs4_state_mark_reclaim_nograce(clp, state); | 4181 | nfs4_schedule_stateid_recovery(server, lsp->ls_state); |
4147 | lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; | ||
4148 | break; | 4182 | break; |
4149 | case -NFS4ERR_STALE_STATEID: | 4183 | case -NFS4ERR_STALE_STATEID: |
4150 | if (new_lock_owner != 0 || | ||
4151 | (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0) | ||
4152 | nfs4_state_mark_reclaim_reboot(clp, state); | ||
4153 | lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; | 4184 | lsp->ls_seqid.flags &= ~NFS_SEQID_CONFIRMED; |
4185 | case -NFS4ERR_EXPIRED: | ||
4186 | nfs4_schedule_lease_recovery(server->nfs_client); | ||
4154 | }; | 4187 | }; |
4155 | } | 4188 | } |
4156 | 4189 | ||
@@ -4366,12 +4399,14 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl) | |||
4366 | case -NFS4ERR_EXPIRED: | 4399 | case -NFS4ERR_EXPIRED: |
4367 | case -NFS4ERR_STALE_CLIENTID: | 4400 | case -NFS4ERR_STALE_CLIENTID: |
4368 | case -NFS4ERR_STALE_STATEID: | 4401 | case -NFS4ERR_STALE_STATEID: |
4402 | nfs4_schedule_lease_recovery(server->nfs_client); | ||
4403 | goto out; | ||
4369 | case -NFS4ERR_BADSESSION: | 4404 | case -NFS4ERR_BADSESSION: |
4370 | case -NFS4ERR_BADSLOT: | 4405 | case -NFS4ERR_BADSLOT: |
4371 | case -NFS4ERR_BAD_HIGH_SLOT: | 4406 | case -NFS4ERR_BAD_HIGH_SLOT: |
4372 | case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: | 4407 | case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: |
4373 | case -NFS4ERR_DEADSESSION: | 4408 | case -NFS4ERR_DEADSESSION: |
4374 | nfs4_schedule_state_recovery(server->nfs_client); | 4409 | nfs4_schedule_session_recovery(server->nfs_client->cl_session); |
4375 | goto out; | 4410 | goto out; |
4376 | case -ERESTARTSYS: | 4411 | case -ERESTARTSYS: |
4377 | /* | 4412 | /* |
@@ -4381,7 +4416,7 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl) | |||
4381 | case -NFS4ERR_ADMIN_REVOKED: | 4416 | case -NFS4ERR_ADMIN_REVOKED: |
4382 | case -NFS4ERR_BAD_STATEID: | 4417 | case -NFS4ERR_BAD_STATEID: |
4383 | case -NFS4ERR_OPENMODE: | 4418 | case -NFS4ERR_OPENMODE: |
4384 | nfs4_state_mark_reclaim_nograce(server->nfs_client, state); | 4419 | nfs4_schedule_stateid_recovery(server, state); |
4385 | err = 0; | 4420 | err = 0; |
4386 | goto out; | 4421 | goto out; |
4387 | case -EKEYEXPIRED: | 4422 | case -EKEYEXPIRED: |
@@ -4988,10 +5023,20 @@ int nfs4_proc_create_session(struct nfs_client *clp) | |||
4988 | int status; | 5023 | int status; |
4989 | unsigned *ptr; | 5024 | unsigned *ptr; |
4990 | struct nfs4_session *session = clp->cl_session; | 5025 | struct nfs4_session *session = clp->cl_session; |
5026 | long timeout = 0; | ||
5027 | int err; | ||
4991 | 5028 | ||
4992 | dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); | 5029 | dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); |
4993 | 5030 | ||
4994 | status = _nfs4_proc_create_session(clp); | 5031 | do { |
5032 | status = _nfs4_proc_create_session(clp); | ||
5033 | if (status == -NFS4ERR_DELAY) { | ||
5034 | err = nfs4_delay(clp->cl_rpcclient, &timeout); | ||
5035 | if (err) | ||
5036 | status = err; | ||
5037 | } | ||
5038 | } while (status == -NFS4ERR_DELAY); | ||
5039 | |||
4995 | if (status) | 5040 | if (status) |
4996 | goto out; | 5041 | goto out; |
4997 | 5042 | ||
@@ -5100,7 +5145,7 @@ static int nfs41_sequence_handle_errors(struct rpc_task *task, struct nfs_client | |||
5100 | rpc_delay(task, NFS4_POLL_RETRY_MAX); | 5145 | rpc_delay(task, NFS4_POLL_RETRY_MAX); |
5101 | return -EAGAIN; | 5146 | return -EAGAIN; |
5102 | default: | 5147 | default: |
5103 | nfs4_schedule_state_recovery(clp); | 5148 | nfs4_schedule_lease_recovery(clp); |
5104 | } | 5149 | } |
5105 | return 0; | 5150 | return 0; |
5106 | } | 5151 | } |
@@ -5187,7 +5232,7 @@ static int nfs41_proc_async_sequence(struct nfs_client *clp, struct rpc_cred *cr | |||
5187 | if (IS_ERR(task)) | 5232 | if (IS_ERR(task)) |
5188 | ret = PTR_ERR(task); | 5233 | ret = PTR_ERR(task); |
5189 | else | 5234 | else |
5190 | rpc_put_task(task); | 5235 | rpc_put_task_async(task); |
5191 | dprintk("<-- %s status=%d\n", __func__, ret); | 5236 | dprintk("<-- %s status=%d\n", __func__, ret); |
5192 | return ret; | 5237 | return ret; |
5193 | } | 5238 | } |
@@ -5203,8 +5248,13 @@ static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred) | |||
5203 | goto out; | 5248 | goto out; |
5204 | } | 5249 | } |
5205 | ret = rpc_wait_for_completion_task(task); | 5250 | ret = rpc_wait_for_completion_task(task); |
5206 | if (!ret) | 5251 | if (!ret) { |
5252 | struct nfs4_sequence_res *res = task->tk_msg.rpc_resp; | ||
5253 | |||
5254 | if (task->tk_status == 0) | ||
5255 | nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags); | ||
5207 | ret = task->tk_status; | 5256 | ret = task->tk_status; |
5257 | } | ||
5208 | rpc_put_task(task); | 5258 | rpc_put_task(task); |
5209 | out: | 5259 | out: |
5210 | dprintk("<-- %s status=%d\n", __func__, ret); | 5260 | dprintk("<-- %s status=%d\n", __func__, ret); |
@@ -5241,7 +5291,7 @@ static int nfs41_reclaim_complete_handle_errors(struct rpc_task *task, struct nf | |||
5241 | rpc_delay(task, NFS4_POLL_RETRY_MAX); | 5291 | rpc_delay(task, NFS4_POLL_RETRY_MAX); |
5242 | return -EAGAIN; | 5292 | return -EAGAIN; |
5243 | default: | 5293 | default: |
5244 | nfs4_schedule_state_recovery(clp); | 5294 | nfs4_schedule_lease_recovery(clp); |
5245 | } | 5295 | } |
5246 | return 0; | 5296 | return 0; |
5247 | } | 5297 | } |
@@ -5309,6 +5359,9 @@ static int nfs41_proc_reclaim_complete(struct nfs_client *clp) | |||
5309 | status = PTR_ERR(task); | 5359 | status = PTR_ERR(task); |
5310 | goto out; | 5360 | goto out; |
5311 | } | 5361 | } |
5362 | status = nfs4_wait_for_completion_rpc_task(task); | ||
5363 | if (status == 0) | ||
5364 | status = task->tk_status; | ||
5312 | rpc_put_task(task); | 5365 | rpc_put_task(task); |
5313 | return 0; | 5366 | return 0; |
5314 | out: | 5367 | out: |
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index e6742b57a04c..0592288f9f06 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
@@ -1007,9 +1007,9 @@ void nfs4_schedule_state_manager(struct nfs_client *clp) | |||
1007 | } | 1007 | } |
1008 | 1008 | ||
1009 | /* | 1009 | /* |
1010 | * Schedule a state recovery attempt | 1010 | * Schedule a lease recovery attempt |
1011 | */ | 1011 | */ |
1012 | void nfs4_schedule_state_recovery(struct nfs_client *clp) | 1012 | void nfs4_schedule_lease_recovery(struct nfs_client *clp) |
1013 | { | 1013 | { |
1014 | if (!clp) | 1014 | if (!clp) |
1015 | return; | 1015 | return; |
@@ -1018,7 +1018,7 @@ void nfs4_schedule_state_recovery(struct nfs_client *clp) | |||
1018 | nfs4_schedule_state_manager(clp); | 1018 | nfs4_schedule_state_manager(clp); |
1019 | } | 1019 | } |
1020 | 1020 | ||
1021 | int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state) | 1021 | static int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *state) |
1022 | { | 1022 | { |
1023 | 1023 | ||
1024 | set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); | 1024 | set_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); |
@@ -1032,7 +1032,7 @@ int nfs4_state_mark_reclaim_reboot(struct nfs_client *clp, struct nfs4_state *st | |||
1032 | return 1; | 1032 | return 1; |
1033 | } | 1033 | } |
1034 | 1034 | ||
1035 | int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state) | 1035 | static int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *state) |
1036 | { | 1036 | { |
1037 | set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags); | 1037 | set_bit(NFS_STATE_RECLAIM_NOGRACE, &state->flags); |
1038 | clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); | 1038 | clear_bit(NFS_STATE_RECLAIM_REBOOT, &state->flags); |
@@ -1041,6 +1041,14 @@ int nfs4_state_mark_reclaim_nograce(struct nfs_client *clp, struct nfs4_state *s | |||
1041 | return 1; | 1041 | return 1; |
1042 | } | 1042 | } |
1043 | 1043 | ||
1044 | void nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4_state *state) | ||
1045 | { | ||
1046 | struct nfs_client *clp = server->nfs_client; | ||
1047 | |||
1048 | nfs4_state_mark_reclaim_nograce(clp, state); | ||
1049 | nfs4_schedule_state_manager(clp); | ||
1050 | } | ||
1051 | |||
1044 | static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops) | 1052 | static int nfs4_reclaim_locks(struct nfs4_state *state, const struct nfs4_state_recovery_ops *ops) |
1045 | { | 1053 | { |
1046 | struct inode *inode = state->inode; | 1054 | struct inode *inode = state->inode; |
@@ -1436,10 +1444,15 @@ static int nfs4_reclaim_lease(struct nfs_client *clp) | |||
1436 | } | 1444 | } |
1437 | 1445 | ||
1438 | #ifdef CONFIG_NFS_V4_1 | 1446 | #ifdef CONFIG_NFS_V4_1 |
1447 | void nfs4_schedule_session_recovery(struct nfs4_session *session) | ||
1448 | { | ||
1449 | nfs4_schedule_lease_recovery(session->clp); | ||
1450 | } | ||
1451 | |||
1439 | void nfs41_handle_recall_slot(struct nfs_client *clp) | 1452 | void nfs41_handle_recall_slot(struct nfs_client *clp) |
1440 | { | 1453 | { |
1441 | set_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state); | 1454 | set_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state); |
1442 | nfs4_schedule_state_recovery(clp); | 1455 | nfs4_schedule_state_manager(clp); |
1443 | } | 1456 | } |
1444 | 1457 | ||
1445 | static void nfs4_reset_all_state(struct nfs_client *clp) | 1458 | static void nfs4_reset_all_state(struct nfs_client *clp) |
@@ -1447,7 +1460,7 @@ static void nfs4_reset_all_state(struct nfs_client *clp) | |||
1447 | if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) { | 1460 | if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) { |
1448 | clp->cl_boot_time = CURRENT_TIME; | 1461 | clp->cl_boot_time = CURRENT_TIME; |
1449 | nfs4_state_start_reclaim_nograce(clp); | 1462 | nfs4_state_start_reclaim_nograce(clp); |
1450 | nfs4_schedule_state_recovery(clp); | 1463 | nfs4_schedule_state_manager(clp); |
1451 | } | 1464 | } |
1452 | } | 1465 | } |
1453 | 1466 | ||
@@ -1455,7 +1468,7 @@ static void nfs41_handle_server_reboot(struct nfs_client *clp) | |||
1455 | { | 1468 | { |
1456 | if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) { | 1469 | if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) { |
1457 | nfs4_state_start_reclaim_reboot(clp); | 1470 | nfs4_state_start_reclaim_reboot(clp); |
1458 | nfs4_schedule_state_recovery(clp); | 1471 | nfs4_schedule_state_manager(clp); |
1459 | } | 1472 | } |
1460 | } | 1473 | } |
1461 | 1474 | ||
@@ -1475,7 +1488,7 @@ static void nfs41_handle_cb_path_down(struct nfs_client *clp) | |||
1475 | { | 1488 | { |
1476 | nfs_expire_all_delegations(clp); | 1489 | nfs_expire_all_delegations(clp); |
1477 | if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0) | 1490 | if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0) |
1478 | nfs4_schedule_state_recovery(clp); | 1491 | nfs4_schedule_state_manager(clp); |
1479 | } | 1492 | } |
1480 | 1493 | ||
1481 | void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags) | 1494 | void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags) |
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 4e2c168b6ee9..94d50e86a124 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c | |||
@@ -1660,7 +1660,7 @@ static void encode_create_session(struct xdr_stream *xdr, | |||
1660 | 1660 | ||
1661 | p = reserve_space(xdr, 20 + 2*28 + 20 + len + 12); | 1661 | p = reserve_space(xdr, 20 + 2*28 + 20 + len + 12); |
1662 | *p++ = cpu_to_be32(OP_CREATE_SESSION); | 1662 | *p++ = cpu_to_be32(OP_CREATE_SESSION); |
1663 | p = xdr_encode_hyper(p, clp->cl_ex_clid); | 1663 | p = xdr_encode_hyper(p, clp->cl_clientid); |
1664 | *p++ = cpu_to_be32(clp->cl_seqid); /*Sequence id */ | 1664 | *p++ = cpu_to_be32(clp->cl_seqid); /*Sequence id */ |
1665 | *p++ = cpu_to_be32(args->flags); /*flags */ | 1665 | *p++ = cpu_to_be32(args->flags); /*flags */ |
1666 | 1666 | ||
@@ -4694,7 +4694,7 @@ static int decode_exchange_id(struct xdr_stream *xdr, | |||
4694 | p = xdr_inline_decode(xdr, 8); | 4694 | p = xdr_inline_decode(xdr, 8); |
4695 | if (unlikely(!p)) | 4695 | if (unlikely(!p)) |
4696 | goto out_overflow; | 4696 | goto out_overflow; |
4697 | xdr_decode_hyper(p, &clp->cl_ex_clid); | 4697 | xdr_decode_hyper(p, &clp->cl_clientid); |
4698 | p = xdr_inline_decode(xdr, 12); | 4698 | p = xdr_inline_decode(xdr, 12); |
4699 | if (unlikely(!p)) | 4699 | if (unlikely(!p)) |
4700 | goto out_overflow; | 4700 | goto out_overflow; |
diff --git a/fs/nfs/nfsroot.c b/fs/nfs/nfsroot.c index 903908a20023..c541093a5bf2 100644 --- a/fs/nfs/nfsroot.c +++ b/fs/nfs/nfsroot.c | |||
@@ -86,11 +86,14 @@ | |||
86 | /* Default path we try to mount. "%s" gets replaced by our IP address */ | 86 | /* Default path we try to mount. "%s" gets replaced by our IP address */ |
87 | #define NFS_ROOT "/tftpboot/%s" | 87 | #define NFS_ROOT "/tftpboot/%s" |
88 | 88 | ||
89 | /* Default NFSROOT mount options. */ | ||
90 | #define NFS_DEF_OPTIONS "udp" | ||
91 | |||
89 | /* Parameters passed from the kernel command line */ | 92 | /* Parameters passed from the kernel command line */ |
90 | static char nfs_root_parms[256] __initdata = ""; | 93 | static char nfs_root_parms[256] __initdata = ""; |
91 | 94 | ||
92 | /* Text-based mount options passed to super.c */ | 95 | /* Text-based mount options passed to super.c */ |
93 | static char nfs_root_options[256] __initdata = ""; | 96 | static char nfs_root_options[256] __initdata = NFS_DEF_OPTIONS; |
94 | 97 | ||
95 | /* Address of NFS server */ | 98 | /* Address of NFS server */ |
96 | static __be32 servaddr __initdata = htonl(INADDR_NONE); | 99 | static __be32 servaddr __initdata = htonl(INADDR_NONE); |
@@ -160,8 +163,14 @@ static int __init root_nfs_copy(char *dest, const char *src, | |||
160 | } | 163 | } |
161 | 164 | ||
162 | static int __init root_nfs_cat(char *dest, const char *src, | 165 | static int __init root_nfs_cat(char *dest, const char *src, |
163 | const size_t destlen) | 166 | const size_t destlen) |
164 | { | 167 | { |
168 | size_t len = strlen(dest); | ||
169 | |||
170 | if (len && dest[len - 1] != ',') | ||
171 | if (strlcat(dest, ",", destlen) > destlen) | ||
172 | return -1; | ||
173 | |||
165 | if (strlcat(dest, src, destlen) > destlen) | 174 | if (strlcat(dest, src, destlen) > destlen) |
166 | return -1; | 175 | return -1; |
167 | return 0; | 176 | return 0; |
@@ -194,16 +203,6 @@ static int __init root_nfs_parse_options(char *incoming, char *exppath, | |||
194 | if (root_nfs_cat(nfs_root_options, incoming, | 203 | if (root_nfs_cat(nfs_root_options, incoming, |
195 | sizeof(nfs_root_options))) | 204 | sizeof(nfs_root_options))) |
196 | return -1; | 205 | return -1; |
197 | |||
198 | /* | ||
199 | * Possibly prepare for more options to be appended | ||
200 | */ | ||
201 | if (nfs_root_options[0] != '\0' && | ||
202 | nfs_root_options[strlen(nfs_root_options)] != ',') | ||
203 | if (root_nfs_cat(nfs_root_options, ",", | ||
204 | sizeof(nfs_root_options))) | ||
205 | return -1; | ||
206 | |||
207 | return 0; | 206 | return 0; |
208 | } | 207 | } |
209 | 208 | ||
@@ -217,7 +216,7 @@ static int __init root_nfs_parse_options(char *incoming, char *exppath, | |||
217 | */ | 216 | */ |
218 | static int __init root_nfs_data(char *cmdline) | 217 | static int __init root_nfs_data(char *cmdline) |
219 | { | 218 | { |
220 | char addr_option[sizeof("nolock,addr=") + INET_ADDRSTRLEN + 1]; | 219 | char mand_options[sizeof("nolock,addr=") + INET_ADDRSTRLEN + 1]; |
221 | int len, retval = -1; | 220 | int len, retval = -1; |
222 | char *tmp = NULL; | 221 | char *tmp = NULL; |
223 | const size_t tmplen = sizeof(nfs_export_path); | 222 | const size_t tmplen = sizeof(nfs_export_path); |
@@ -244,9 +243,9 @@ static int __init root_nfs_data(char *cmdline) | |||
244 | * Append mandatory options for nfsroot so they override | 243 | * Append mandatory options for nfsroot so they override |
245 | * what has come before | 244 | * what has come before |
246 | */ | 245 | */ |
247 | snprintf(addr_option, sizeof(addr_option), "nolock,addr=%pI4", | 246 | snprintf(mand_options, sizeof(mand_options), "nolock,addr=%pI4", |
248 | &servaddr); | 247 | &servaddr); |
249 | if (root_nfs_cat(nfs_root_options, addr_option, | 248 | if (root_nfs_cat(nfs_root_options, mand_options, |
250 | sizeof(nfs_root_options))) | 249 | sizeof(nfs_root_options))) |
251 | goto out_optionstoolong; | 250 | goto out_optionstoolong; |
252 | 251 | ||
diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c index e313a51acdd1..6481d537d69d 100644 --- a/fs/nfs/unlink.c +++ b/fs/nfs/unlink.c | |||
@@ -180,7 +180,7 @@ static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct n | |||
180 | task_setup_data.rpc_client = NFS_CLIENT(dir); | 180 | task_setup_data.rpc_client = NFS_CLIENT(dir); |
181 | task = rpc_run_task(&task_setup_data); | 181 | task = rpc_run_task(&task_setup_data); |
182 | if (!IS_ERR(task)) | 182 | if (!IS_ERR(task)) |
183 | rpc_put_task(task); | 183 | rpc_put_task_async(task); |
184 | return 1; | 184 | return 1; |
185 | } | 185 | } |
186 | 186 | ||
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index c8278f4046cb..42b92d7a9cc4 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -1292,6 +1292,8 @@ static int nfs_commit_rpcsetup(struct list_head *head, | |||
1292 | task = rpc_run_task(&task_setup_data); | 1292 | task = rpc_run_task(&task_setup_data); |
1293 | if (IS_ERR(task)) | 1293 | if (IS_ERR(task)) |
1294 | return PTR_ERR(task); | 1294 | return PTR_ERR(task); |
1295 | if (how & FLUSH_SYNC) | ||
1296 | rpc_wait_for_completion_task(task); | ||
1295 | rpc_put_task(task); | 1297 | rpc_put_task(task); |
1296 | return 0; | 1298 | return 0; |
1297 | } | 1299 | } |
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c index cde36cb0f348..02eb4edf0ece 100644 --- a/fs/nfsd/nfs4callback.c +++ b/fs/nfsd/nfs4callback.c | |||
@@ -432,7 +432,7 @@ static int decode_cb_sequence4resok(struct xdr_stream *xdr, | |||
432 | * If the server returns different values for sessionID, slotID or | 432 | * If the server returns different values for sessionID, slotID or |
433 | * sequence number, the server is looney tunes. | 433 | * sequence number, the server is looney tunes. |
434 | */ | 434 | */ |
435 | p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN + 4 + 4); | 435 | p = xdr_inline_decode(xdr, NFS4_MAX_SESSIONID_LEN + 4 + 4 + 4 + 4); |
436 | if (unlikely(p == NULL)) | 436 | if (unlikely(p == NULL)) |
437 | goto out_overflow; | 437 | goto out_overflow; |
438 | memcpy(id.data, p, NFS4_MAX_SESSIONID_LEN); | 438 | memcpy(id.data, p, NFS4_MAX_SESSIONID_LEN); |
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 54b60bfceb8d..7b566ec14e18 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c | |||
@@ -2445,15 +2445,16 @@ nfs4_check_delegmode(struct nfs4_delegation *dp, int flags) | |||
2445 | static struct nfs4_delegation * | 2445 | static struct nfs4_delegation * |
2446 | find_delegation_file(struct nfs4_file *fp, stateid_t *stid) | 2446 | find_delegation_file(struct nfs4_file *fp, stateid_t *stid) |
2447 | { | 2447 | { |
2448 | struct nfs4_delegation *dp = NULL; | 2448 | struct nfs4_delegation *dp; |
2449 | 2449 | ||
2450 | spin_lock(&recall_lock); | 2450 | spin_lock(&recall_lock); |
2451 | list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) { | 2451 | list_for_each_entry(dp, &fp->fi_delegations, dl_perfile) |
2452 | if (dp->dl_stateid.si_stateownerid == stid->si_stateownerid) | 2452 | if (dp->dl_stateid.si_stateownerid == stid->si_stateownerid) { |
2453 | break; | 2453 | spin_unlock(&recall_lock); |
2454 | } | 2454 | return dp; |
2455 | } | ||
2455 | spin_unlock(&recall_lock); | 2456 | spin_unlock(&recall_lock); |
2456 | return dp; | 2457 | return NULL; |
2457 | } | 2458 | } |
2458 | 2459 | ||
2459 | int share_access_to_flags(u32 share_access) | 2460 | int share_access_to_flags(u32 share_access) |
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 1275b8655070..615f0a9f0600 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c | |||
@@ -1142,7 +1142,7 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp, | |||
1142 | 1142 | ||
1143 | u32 dummy; | 1143 | u32 dummy; |
1144 | char *machine_name; | 1144 | char *machine_name; |
1145 | int i; | 1145 | int i, j; |
1146 | int nr_secflavs; | 1146 | int nr_secflavs; |
1147 | 1147 | ||
1148 | READ_BUF(16); | 1148 | READ_BUF(16); |
@@ -1215,7 +1215,7 @@ nfsd4_decode_create_session(struct nfsd4_compoundargs *argp, | |||
1215 | READ_BUF(4); | 1215 | READ_BUF(4); |
1216 | READ32(dummy); | 1216 | READ32(dummy); |
1217 | READ_BUF(dummy * 4); | 1217 | READ_BUF(dummy * 4); |
1218 | for (i = 0; i < dummy; ++i) | 1218 | for (j = 0; j < dummy; ++j) |
1219 | READ32(dummy); | 1219 | READ32(dummy); |
1220 | break; | 1220 | break; |
1221 | case RPC_AUTH_GSS: | 1221 | case RPC_AUTH_GSS: |
diff --git a/fs/nilfs2/btnode.c b/fs/nilfs2/btnode.c index 388e9e8f5286..85f7baa15f5d 100644 --- a/fs/nilfs2/btnode.c +++ b/fs/nilfs2/btnode.c | |||
@@ -35,11 +35,6 @@ | |||
35 | #include "btnode.h" | 35 | #include "btnode.h" |
36 | 36 | ||
37 | 37 | ||
38 | void nilfs_btnode_cache_init_once(struct address_space *btnc) | ||
39 | { | ||
40 | nilfs_mapping_init_once(btnc); | ||
41 | } | ||
42 | |||
43 | static const struct address_space_operations def_btnode_aops = { | 38 | static const struct address_space_operations def_btnode_aops = { |
44 | .sync_page = block_sync_page, | 39 | .sync_page = block_sync_page, |
45 | }; | 40 | }; |
diff --git a/fs/nilfs2/btnode.h b/fs/nilfs2/btnode.h index 79037494f1e0..1b8ebd888c28 100644 --- a/fs/nilfs2/btnode.h +++ b/fs/nilfs2/btnode.h | |||
@@ -37,7 +37,6 @@ struct nilfs_btnode_chkey_ctxt { | |||
37 | struct buffer_head *newbh; | 37 | struct buffer_head *newbh; |
38 | }; | 38 | }; |
39 | 39 | ||
40 | void nilfs_btnode_cache_init_once(struct address_space *); | ||
41 | void nilfs_btnode_cache_init(struct address_space *, struct backing_dev_info *); | 40 | void nilfs_btnode_cache_init(struct address_space *, struct backing_dev_info *); |
42 | void nilfs_btnode_cache_clear(struct address_space *); | 41 | void nilfs_btnode_cache_clear(struct address_space *); |
43 | struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc, | 42 | struct buffer_head *nilfs_btnode_create_block(struct address_space *btnc, |
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c index 6a0e2a189f60..a0babd2bff6a 100644 --- a/fs/nilfs2/mdt.c +++ b/fs/nilfs2/mdt.c | |||
@@ -454,9 +454,9 @@ int nilfs_mdt_setup_shadow_map(struct inode *inode, | |||
454 | struct backing_dev_info *bdi = inode->i_sb->s_bdi; | 454 | struct backing_dev_info *bdi = inode->i_sb->s_bdi; |
455 | 455 | ||
456 | INIT_LIST_HEAD(&shadow->frozen_buffers); | 456 | INIT_LIST_HEAD(&shadow->frozen_buffers); |
457 | nilfs_mapping_init_once(&shadow->frozen_data); | 457 | address_space_init_once(&shadow->frozen_data); |
458 | nilfs_mapping_init(&shadow->frozen_data, bdi, &shadow_map_aops); | 458 | nilfs_mapping_init(&shadow->frozen_data, bdi, &shadow_map_aops); |
459 | nilfs_mapping_init_once(&shadow->frozen_btnodes); | 459 | address_space_init_once(&shadow->frozen_btnodes); |
460 | nilfs_mapping_init(&shadow->frozen_btnodes, bdi, &shadow_map_aops); | 460 | nilfs_mapping_init(&shadow->frozen_btnodes, bdi, &shadow_map_aops); |
461 | mi->mi_shadow = shadow; | 461 | mi->mi_shadow = shadow; |
462 | return 0; | 462 | return 0; |
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c index 98034271cd02..161791d26458 100644 --- a/fs/nilfs2/namei.c +++ b/fs/nilfs2/namei.c | |||
@@ -397,7 +397,6 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
397 | new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page); | 397 | new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page); |
398 | if (!new_de) | 398 | if (!new_de) |
399 | goto out_dir; | 399 | goto out_dir; |
400 | inc_nlink(old_inode); | ||
401 | nilfs_set_link(new_dir, new_de, new_page, old_inode); | 400 | nilfs_set_link(new_dir, new_de, new_page, old_inode); |
402 | nilfs_mark_inode_dirty(new_dir); | 401 | nilfs_mark_inode_dirty(new_dir); |
403 | new_inode->i_ctime = CURRENT_TIME; | 402 | new_inode->i_ctime = CURRENT_TIME; |
@@ -411,13 +410,9 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
411 | if (new_dir->i_nlink >= NILFS_LINK_MAX) | 410 | if (new_dir->i_nlink >= NILFS_LINK_MAX) |
412 | goto out_dir; | 411 | goto out_dir; |
413 | } | 412 | } |
414 | inc_nlink(old_inode); | ||
415 | err = nilfs_add_link(new_dentry, old_inode); | 413 | err = nilfs_add_link(new_dentry, old_inode); |
416 | if (err) { | 414 | if (err) |
417 | drop_nlink(old_inode); | ||
418 | nilfs_mark_inode_dirty(old_inode); | ||
419 | goto out_dir; | 415 | goto out_dir; |
420 | } | ||
421 | if (dir_de) { | 416 | if (dir_de) { |
422 | inc_nlink(new_dir); | 417 | inc_nlink(new_dir); |
423 | nilfs_mark_inode_dirty(new_dir); | 418 | nilfs_mark_inode_dirty(new_dir); |
@@ -431,7 +426,6 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
431 | old_inode->i_ctime = CURRENT_TIME; | 426 | old_inode->i_ctime = CURRENT_TIME; |
432 | 427 | ||
433 | nilfs_delete_entry(old_de, old_page); | 428 | nilfs_delete_entry(old_de, old_page); |
434 | drop_nlink(old_inode); | ||
435 | 429 | ||
436 | if (dir_de) { | 430 | if (dir_de) { |
437 | nilfs_set_link(old_inode, dir_de, dir_page, new_dir); | 431 | nilfs_set_link(old_inode, dir_de, dir_page, new_dir); |
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c index 0c432416cfef..a585b35fd6bc 100644 --- a/fs/nilfs2/page.c +++ b/fs/nilfs2/page.c | |||
@@ -492,19 +492,6 @@ unsigned nilfs_page_count_clean_buffers(struct page *page, | |||
492 | return nc; | 492 | return nc; |
493 | } | 493 | } |
494 | 494 | ||
495 | void nilfs_mapping_init_once(struct address_space *mapping) | ||
496 | { | ||
497 | memset(mapping, 0, sizeof(*mapping)); | ||
498 | INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC); | ||
499 | spin_lock_init(&mapping->tree_lock); | ||
500 | INIT_LIST_HEAD(&mapping->private_list); | ||
501 | spin_lock_init(&mapping->private_lock); | ||
502 | |||
503 | spin_lock_init(&mapping->i_mmap_lock); | ||
504 | INIT_RAW_PRIO_TREE_ROOT(&mapping->i_mmap); | ||
505 | INIT_LIST_HEAD(&mapping->i_mmap_nonlinear); | ||
506 | } | ||
507 | |||
508 | void nilfs_mapping_init(struct address_space *mapping, | 495 | void nilfs_mapping_init(struct address_space *mapping, |
509 | struct backing_dev_info *bdi, | 496 | struct backing_dev_info *bdi, |
510 | const struct address_space_operations *aops) | 497 | const struct address_space_operations *aops) |
diff --git a/fs/nilfs2/page.h b/fs/nilfs2/page.h index 622df27cd891..2a00953ebd5f 100644 --- a/fs/nilfs2/page.h +++ b/fs/nilfs2/page.h | |||
@@ -61,7 +61,6 @@ void nilfs_free_private_page(struct page *); | |||
61 | int nilfs_copy_dirty_pages(struct address_space *, struct address_space *); | 61 | int nilfs_copy_dirty_pages(struct address_space *, struct address_space *); |
62 | void nilfs_copy_back_pages(struct address_space *, struct address_space *); | 62 | void nilfs_copy_back_pages(struct address_space *, struct address_space *); |
63 | void nilfs_clear_dirty_pages(struct address_space *); | 63 | void nilfs_clear_dirty_pages(struct address_space *); |
64 | void nilfs_mapping_init_once(struct address_space *mapping); | ||
65 | void nilfs_mapping_init(struct address_space *mapping, | 64 | void nilfs_mapping_init(struct address_space *mapping, |
66 | struct backing_dev_info *bdi, | 65 | struct backing_dev_info *bdi, |
67 | const struct address_space_operations *aops); | 66 | const struct address_space_operations *aops); |
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 55ebae5c7f39..2de9f636792a 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c | |||
@@ -430,7 +430,8 @@ static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci, | |||
430 | nilfs_segctor_map_segsum_entry( | 430 | nilfs_segctor_map_segsum_entry( |
431 | sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo)); | 431 | sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo)); |
432 | 432 | ||
433 | if (inode->i_sb && !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags)) | 433 | if (NILFS_I(inode)->i_root && |
434 | !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags)) | ||
434 | set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags); | 435 | set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags); |
435 | /* skip finfo */ | 436 | /* skip finfo */ |
436 | } | 437 | } |
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 58fd707174e1..1673b3d99842 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c | |||
@@ -1279,7 +1279,7 @@ static void nilfs_inode_init_once(void *obj) | |||
1279 | #ifdef CONFIG_NILFS_XATTR | 1279 | #ifdef CONFIG_NILFS_XATTR |
1280 | init_rwsem(&ii->xattr_sem); | 1280 | init_rwsem(&ii->xattr_sem); |
1281 | #endif | 1281 | #endif |
1282 | nilfs_btnode_cache_init_once(&ii->i_btnode_cache); | 1282 | address_space_init_once(&ii->i_btnode_cache); |
1283 | ii->i_bmap = &ii->i_bmap_data; | 1283 | ii->i_bmap = &ii->i_bmap_data; |
1284 | inode_init_once(&ii->vfs_inode); | 1284 | inode_init_once(&ii->vfs_inode); |
1285 | } | 1285 | } |
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c index 6d80ecc7834f..7eb90403fc8a 100644 --- a/fs/ocfs2/dcache.c +++ b/fs/ocfs2/dcache.c | |||
@@ -56,7 +56,7 @@ static int ocfs2_dentry_revalidate(struct dentry *dentry, | |||
56 | int ret = 0; /* if all else fails, just return false */ | 56 | int ret = 0; /* if all else fails, just return false */ |
57 | struct ocfs2_super *osb; | 57 | struct ocfs2_super *osb; |
58 | 58 | ||
59 | if (nd->flags & LOOKUP_RCU) | 59 | if (nd && nd->flags & LOOKUP_RCU) |
60 | return -ECHILD; | 60 | return -ECHILD; |
61 | 61 | ||
62 | inode = dentry->d_inode; | 62 | inode = dentry->d_inode; |
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h index 43e56b97f9c0..6180da1e37e6 100644 --- a/fs/ocfs2/journal.h +++ b/fs/ocfs2/journal.h | |||
@@ -405,9 +405,9 @@ static inline int ocfs2_remove_extent_credits(struct super_block *sb) | |||
405 | ocfs2_quota_trans_credits(sb); | 405 | ocfs2_quota_trans_credits(sb); |
406 | } | 406 | } |
407 | 407 | ||
408 | /* data block for new dir/symlink, 2 for bitmap updates (bitmap fe + | 408 | /* data block for new dir/symlink, allocation of directory block, dx_root |
409 | * bitmap block for the new bit) dx_root update for free list */ | 409 | * update for free list */ |
410 | #define OCFS2_DIR_LINK_ADDITIONAL_CREDITS (1 + 2 + 1) | 410 | #define OCFS2_DIR_LINK_ADDITIONAL_CREDITS (1 + OCFS2_SUBALLOC_ALLOC + 1) |
411 | 411 | ||
412 | static inline int ocfs2_add_dir_index_credits(struct super_block *sb) | 412 | static inline int ocfs2_add_dir_index_credits(struct super_block *sb) |
413 | { | 413 | { |
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index b5f9160e93e9..19ebc5aad391 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c | |||
@@ -3228,7 +3228,7 @@ static int ocfs2_make_clusters_writable(struct super_block *sb, | |||
3228 | u32 num_clusters, unsigned int e_flags) | 3228 | u32 num_clusters, unsigned int e_flags) |
3229 | { | 3229 | { |
3230 | int ret, delete, index, credits = 0; | 3230 | int ret, delete, index, credits = 0; |
3231 | u32 new_bit, new_len; | 3231 | u32 new_bit, new_len, orig_num_clusters; |
3232 | unsigned int set_len; | 3232 | unsigned int set_len; |
3233 | struct ocfs2_super *osb = OCFS2_SB(sb); | 3233 | struct ocfs2_super *osb = OCFS2_SB(sb); |
3234 | handle_t *handle; | 3234 | handle_t *handle; |
@@ -3261,6 +3261,8 @@ static int ocfs2_make_clusters_writable(struct super_block *sb, | |||
3261 | goto out; | 3261 | goto out; |
3262 | } | 3262 | } |
3263 | 3263 | ||
3264 | orig_num_clusters = num_clusters; | ||
3265 | |||
3264 | while (num_clusters) { | 3266 | while (num_clusters) { |
3265 | ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh, | 3267 | ret = ocfs2_get_refcount_rec(ref_ci, context->ref_root_bh, |
3266 | p_cluster, num_clusters, | 3268 | p_cluster, num_clusters, |
@@ -3348,7 +3350,8 @@ static int ocfs2_make_clusters_writable(struct super_block *sb, | |||
3348 | * in write-back mode. | 3350 | * in write-back mode. |
3349 | */ | 3351 | */ |
3350 | if (context->get_clusters == ocfs2_di_get_clusters) { | 3352 | if (context->get_clusters == ocfs2_di_get_clusters) { |
3351 | ret = ocfs2_cow_sync_writeback(sb, context, cpos, num_clusters); | 3353 | ret = ocfs2_cow_sync_writeback(sb, context, cpos, |
3354 | orig_num_clusters); | ||
3352 | if (ret) | 3355 | if (ret) |
3353 | mlog_errno(ret); | 3356 | mlog_errno(ret); |
3354 | } | 3357 | } |
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 38f986d2447e..36c423fb0635 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c | |||
@@ -1316,7 +1316,7 @@ static int ocfs2_parse_options(struct super_block *sb, | |||
1316 | struct mount_options *mopt, | 1316 | struct mount_options *mopt, |
1317 | int is_remount) | 1317 | int is_remount) |
1318 | { | 1318 | { |
1319 | int status; | 1319 | int status, user_stack = 0; |
1320 | char *p; | 1320 | char *p; |
1321 | u32 tmp; | 1321 | u32 tmp; |
1322 | 1322 | ||
@@ -1459,6 +1459,15 @@ static int ocfs2_parse_options(struct super_block *sb, | |||
1459 | memcpy(mopt->cluster_stack, args[0].from, | 1459 | memcpy(mopt->cluster_stack, args[0].from, |
1460 | OCFS2_STACK_LABEL_LEN); | 1460 | OCFS2_STACK_LABEL_LEN); |
1461 | mopt->cluster_stack[OCFS2_STACK_LABEL_LEN] = '\0'; | 1461 | mopt->cluster_stack[OCFS2_STACK_LABEL_LEN] = '\0'; |
1462 | /* | ||
1463 | * Open code the memcmp here as we don't have | ||
1464 | * an osb to pass to | ||
1465 | * ocfs2_userspace_stack(). | ||
1466 | */ | ||
1467 | if (memcmp(mopt->cluster_stack, | ||
1468 | OCFS2_CLASSIC_CLUSTER_STACK, | ||
1469 | OCFS2_STACK_LABEL_LEN)) | ||
1470 | user_stack = 1; | ||
1462 | break; | 1471 | break; |
1463 | case Opt_inode64: | 1472 | case Opt_inode64: |
1464 | mopt->mount_opt |= OCFS2_MOUNT_INODE64; | 1473 | mopt->mount_opt |= OCFS2_MOUNT_INODE64; |
@@ -1514,13 +1523,16 @@ static int ocfs2_parse_options(struct super_block *sb, | |||
1514 | } | 1523 | } |
1515 | } | 1524 | } |
1516 | 1525 | ||
1517 | /* Ensure only one heartbeat mode */ | 1526 | if (user_stack == 0) { |
1518 | tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL | OCFS2_MOUNT_HB_GLOBAL | | 1527 | /* Ensure only one heartbeat mode */ |
1519 | OCFS2_MOUNT_HB_NONE); | 1528 | tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL | |
1520 | if (hweight32(tmp) != 1) { | 1529 | OCFS2_MOUNT_HB_GLOBAL | |
1521 | mlog(ML_ERROR, "Invalid heartbeat mount options\n"); | 1530 | OCFS2_MOUNT_HB_NONE); |
1522 | status = 0; | 1531 | if (hweight32(tmp) != 1) { |
1523 | goto bail; | 1532 | mlog(ML_ERROR, "Invalid heartbeat mount options\n"); |
1533 | status = 0; | ||
1534 | goto bail; | ||
1535 | } | ||
1524 | } | 1536 | } |
1525 | 1537 | ||
1526 | status = 1; | 1538 | status = 1; |
@@ -233,6 +233,14 @@ int do_fallocate(struct file *file, int mode, loff_t offset, loff_t len) | |||
233 | 233 | ||
234 | if (!(file->f_mode & FMODE_WRITE)) | 234 | if (!(file->f_mode & FMODE_WRITE)) |
235 | return -EBADF; | 235 | return -EBADF; |
236 | |||
237 | /* It's not possible punch hole on append only file */ | ||
238 | if (mode & FALLOC_FL_PUNCH_HOLE && IS_APPEND(inode)) | ||
239 | return -EPERM; | ||
240 | |||
241 | if (IS_IMMUTABLE(inode)) | ||
242 | return -EPERM; | ||
243 | |||
236 | /* | 244 | /* |
237 | * Revalidate the write permissions, in case security policy has | 245 | * Revalidate the write permissions, in case security policy has |
238 | * changed since the files were opened. | 246 | * changed since the files were opened. |
diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c index 789c625c7aa5..b10e3540d5b7 100644 --- a/fs/partitions/ldm.c +++ b/fs/partitions/ldm.c | |||
@@ -251,6 +251,11 @@ static bool ldm_parse_vmdb (const u8 *data, struct vmdb *vm) | |||
251 | } | 251 | } |
252 | 252 | ||
253 | vm->vblk_size = get_unaligned_be32(data + 0x08); | 253 | vm->vblk_size = get_unaligned_be32(data + 0x08); |
254 | if (vm->vblk_size == 0) { | ||
255 | ldm_error ("Illegal VBLK size"); | ||
256 | return false; | ||
257 | } | ||
258 | |||
254 | vm->vblk_offset = get_unaligned_be32(data + 0x0C); | 259 | vm->vblk_offset = get_unaligned_be32(data + 0x0C); |
255 | vm->last_vblk_seq = get_unaligned_be32(data + 0x04); | 260 | vm->last_vblk_seq = get_unaligned_be32(data + 0x04); |
256 | 261 | ||
diff --git a/fs/partitions/osf.c b/fs/partitions/osf.c index 48cec7cbca17..be03a0b08b47 100644 --- a/fs/partitions/osf.c +++ b/fs/partitions/osf.c | |||
@@ -10,10 +10,13 @@ | |||
10 | #include "check.h" | 10 | #include "check.h" |
11 | #include "osf.h" | 11 | #include "osf.h" |
12 | 12 | ||
13 | #define MAX_OSF_PARTITIONS 8 | ||
14 | |||
13 | int osf_partition(struct parsed_partitions *state) | 15 | int osf_partition(struct parsed_partitions *state) |
14 | { | 16 | { |
15 | int i; | 17 | int i; |
16 | int slot = 1; | 18 | int slot = 1; |
19 | unsigned int npartitions; | ||
17 | Sector sect; | 20 | Sector sect; |
18 | unsigned char *data; | 21 | unsigned char *data; |
19 | struct disklabel { | 22 | struct disklabel { |
@@ -45,7 +48,7 @@ int osf_partition(struct parsed_partitions *state) | |||
45 | u8 p_fstype; | 48 | u8 p_fstype; |
46 | u8 p_frag; | 49 | u8 p_frag; |
47 | __le16 p_cpg; | 50 | __le16 p_cpg; |
48 | } d_partitions[8]; | 51 | } d_partitions[MAX_OSF_PARTITIONS]; |
49 | } * label; | 52 | } * label; |
50 | struct d_partition * partition; | 53 | struct d_partition * partition; |
51 | 54 | ||
@@ -63,7 +66,12 @@ int osf_partition(struct parsed_partitions *state) | |||
63 | put_dev_sector(sect); | 66 | put_dev_sector(sect); |
64 | return 0; | 67 | return 0; |
65 | } | 68 | } |
66 | for (i = 0 ; i < le16_to_cpu(label->d_npartitions); i++, partition++) { | 69 | npartitions = le16_to_cpu(label->d_npartitions); |
70 | if (npartitions > MAX_OSF_PARTITIONS) { | ||
71 | put_dev_sector(sect); | ||
72 | return 0; | ||
73 | } | ||
74 | for (i = 0 ; i < npartitions; i++, partition++) { | ||
67 | if (slot == state->limit) | 75 | if (slot == state->limit) |
68 | break; | 76 | break; |
69 | if (le32_to_cpu(partition->p_size)) | 77 | if (le32_to_cpu(partition->p_size)) |
diff --git a/fs/proc/base.c b/fs/proc/base.c index 9d096e82b201..d49c4b5d2c3e 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -2620,35 +2620,6 @@ static const struct pid_entry proc_base_stuff[] = { | |||
2620 | &proc_self_inode_operations, NULL, {}), | 2620 | &proc_self_inode_operations, NULL, {}), |
2621 | }; | 2621 | }; |
2622 | 2622 | ||
2623 | /* | ||
2624 | * Exceptional case: normally we are not allowed to unhash a busy | ||
2625 | * directory. In this case, however, we can do it - no aliasing problems | ||
2626 | * due to the way we treat inodes. | ||
2627 | */ | ||
2628 | static int proc_base_revalidate(struct dentry *dentry, struct nameidata *nd) | ||
2629 | { | ||
2630 | struct inode *inode; | ||
2631 | struct task_struct *task; | ||
2632 | |||
2633 | if (nd->flags & LOOKUP_RCU) | ||
2634 | return -ECHILD; | ||
2635 | |||
2636 | inode = dentry->d_inode; | ||
2637 | task = get_proc_task(inode); | ||
2638 | if (task) { | ||
2639 | put_task_struct(task); | ||
2640 | return 1; | ||
2641 | } | ||
2642 | d_drop(dentry); | ||
2643 | return 0; | ||
2644 | } | ||
2645 | |||
2646 | static const struct dentry_operations proc_base_dentry_operations = | ||
2647 | { | ||
2648 | .d_revalidate = proc_base_revalidate, | ||
2649 | .d_delete = pid_delete_dentry, | ||
2650 | }; | ||
2651 | |||
2652 | static struct dentry *proc_base_instantiate(struct inode *dir, | 2623 | static struct dentry *proc_base_instantiate(struct inode *dir, |
2653 | struct dentry *dentry, struct task_struct *task, const void *ptr) | 2624 | struct dentry *dentry, struct task_struct *task, const void *ptr) |
2654 | { | 2625 | { |
@@ -2685,7 +2656,6 @@ static struct dentry *proc_base_instantiate(struct inode *dir, | |||
2685 | if (p->fop) | 2656 | if (p->fop) |
2686 | inode->i_fop = p->fop; | 2657 | inode->i_fop = p->fop; |
2687 | ei->op = p->op; | 2658 | ei->op = p->op; |
2688 | d_set_d_op(dentry, &proc_base_dentry_operations); | ||
2689 | d_add(dentry, inode); | 2659 | d_add(dentry, inode); |
2690 | error = NULL; | 2660 | error = NULL; |
2691 | out: | 2661 | out: |
diff --git a/fs/proc/inode.c b/fs/proc/inode.c index 176ce4cda68a..d6a7ca1fdac5 100644 --- a/fs/proc/inode.c +++ b/fs/proc/inode.c | |||
@@ -27,6 +27,7 @@ | |||
27 | static void proc_evict_inode(struct inode *inode) | 27 | static void proc_evict_inode(struct inode *inode) |
28 | { | 28 | { |
29 | struct proc_dir_entry *de; | 29 | struct proc_dir_entry *de; |
30 | struct ctl_table_header *head; | ||
30 | 31 | ||
31 | truncate_inode_pages(&inode->i_data, 0); | 32 | truncate_inode_pages(&inode->i_data, 0); |
32 | end_writeback(inode); | 33 | end_writeback(inode); |
@@ -38,8 +39,11 @@ static void proc_evict_inode(struct inode *inode) | |||
38 | de = PROC_I(inode)->pde; | 39 | de = PROC_I(inode)->pde; |
39 | if (de) | 40 | if (de) |
40 | pde_put(de); | 41 | pde_put(de); |
41 | if (PROC_I(inode)->sysctl) | 42 | head = PROC_I(inode)->sysctl; |
42 | sysctl_head_put(PROC_I(inode)->sysctl); | 43 | if (head) { |
44 | rcu_assign_pointer(PROC_I(inode)->sysctl, NULL); | ||
45 | sysctl_head_put(head); | ||
46 | } | ||
43 | } | 47 | } |
44 | 48 | ||
45 | struct vfsmount *proc_mnt; | 49 | struct vfsmount *proc_mnt; |
diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c index d9396a4fc7ff..927cbd115e53 100644 --- a/fs/proc/proc_devtree.c +++ b/fs/proc/proc_devtree.c | |||
@@ -233,7 +233,7 @@ void __init proc_device_tree_init(void) | |||
233 | return; | 233 | return; |
234 | root = of_find_node_by_path("/"); | 234 | root = of_find_node_by_path("/"); |
235 | if (root == NULL) { | 235 | if (root == NULL) { |
236 | printk(KERN_ERR "/proc/device-tree: can't find root\n"); | 236 | pr_debug("/proc/device-tree: can't find root\n"); |
237 | return; | 237 | return; |
238 | } | 238 | } |
239 | proc_device_tree_add_node(root, proc_device_tree); | 239 | proc_device_tree_add_node(root, proc_device_tree); |
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c index 09a1f92a34ef..8eb2522111c5 100644 --- a/fs/proc/proc_sysctl.c +++ b/fs/proc/proc_sysctl.c | |||
@@ -408,15 +408,18 @@ static int proc_sys_compare(const struct dentry *parent, | |||
408 | const struct dentry *dentry, const struct inode *inode, | 408 | const struct dentry *dentry, const struct inode *inode, |
409 | unsigned int len, const char *str, const struct qstr *name) | 409 | unsigned int len, const char *str, const struct qstr *name) |
410 | { | 410 | { |
411 | struct ctl_table_header *head; | ||
411 | /* Although proc doesn't have negative dentries, rcu-walk means | 412 | /* Although proc doesn't have negative dentries, rcu-walk means |
412 | * that inode here can be NULL */ | 413 | * that inode here can be NULL */ |
414 | /* AV: can it, indeed? */ | ||
413 | if (!inode) | 415 | if (!inode) |
414 | return 0; | 416 | return 1; |
415 | if (name->len != len) | 417 | if (name->len != len) |
416 | return 1; | 418 | return 1; |
417 | if (memcmp(name->name, str, len)) | 419 | if (memcmp(name->name, str, len)) |
418 | return 1; | 420 | return 1; |
419 | return !sysctl_is_seen(PROC_I(inode)->sysctl); | 421 | head = rcu_dereference(PROC_I(inode)->sysctl); |
422 | return !head || !sysctl_is_seen(head); | ||
420 | } | 423 | } |
421 | 424 | ||
422 | static const struct dentry_operations proc_sys_dentry_operations = { | 425 | static const struct dentry_operations proc_sys_dentry_operations = { |
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c index ba5f51ec3458..68fdf45cc6c9 100644 --- a/fs/reiserfs/namei.c +++ b/fs/reiserfs/namei.c | |||
@@ -771,7 +771,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
771 | EMPTY_DIR_SIZE_V1 : EMPTY_DIR_SIZE, | 771 | EMPTY_DIR_SIZE_V1 : EMPTY_DIR_SIZE, |
772 | dentry, inode, &security); | 772 | dentry, inode, &security); |
773 | if (retval) { | 773 | if (retval) { |
774 | dir->i_nlink--; | 774 | DEC_DIR_INODE_NLINK(dir) |
775 | goto out_failed; | 775 | goto out_failed; |
776 | } | 776 | } |
777 | 777 | ||
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index 3cfb2e933644..5c11ca82b782 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c | |||
@@ -978,8 +978,6 @@ int reiserfs_permission(struct inode *inode, int mask, unsigned int flags) | |||
978 | 978 | ||
979 | static int xattr_hide_revalidate(struct dentry *dentry, struct nameidata *nd) | 979 | static int xattr_hide_revalidate(struct dentry *dentry, struct nameidata *nd) |
980 | { | 980 | { |
981 | if (nd->flags & LOOKUP_RCU) | ||
982 | return -ECHILD; | ||
983 | return -EPERM; | 981 | return -EPERM; |
984 | } | 982 | } |
985 | 983 | ||
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c index b427b1208c26..e474fbcf8bde 100644 --- a/fs/sysv/namei.c +++ b/fs/sysv/namei.c | |||
@@ -245,7 +245,6 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry, | |||
245 | new_de = sysv_find_entry(new_dentry, &new_page); | 245 | new_de = sysv_find_entry(new_dentry, &new_page); |
246 | if (!new_de) | 246 | if (!new_de) |
247 | goto out_dir; | 247 | goto out_dir; |
248 | inode_inc_link_count(old_inode); | ||
249 | sysv_set_link(new_de, new_page, old_inode); | 248 | sysv_set_link(new_de, new_page, old_inode); |
250 | new_inode->i_ctime = CURRENT_TIME_SEC; | 249 | new_inode->i_ctime = CURRENT_TIME_SEC; |
251 | if (dir_de) | 250 | if (dir_de) |
@@ -257,18 +256,15 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry, | |||
257 | if (new_dir->i_nlink >= SYSV_SB(new_dir->i_sb)->s_link_max) | 256 | if (new_dir->i_nlink >= SYSV_SB(new_dir->i_sb)->s_link_max) |
258 | goto out_dir; | 257 | goto out_dir; |
259 | } | 258 | } |
260 | inode_inc_link_count(old_inode); | ||
261 | err = sysv_add_link(new_dentry, old_inode); | 259 | err = sysv_add_link(new_dentry, old_inode); |
262 | if (err) { | 260 | if (err) |
263 | inode_dec_link_count(old_inode); | ||
264 | goto out_dir; | 261 | goto out_dir; |
265 | } | ||
266 | if (dir_de) | 262 | if (dir_de) |
267 | inode_inc_link_count(new_dir); | 263 | inode_inc_link_count(new_dir); |
268 | } | 264 | } |
269 | 265 | ||
270 | sysv_delete_entry(old_de, old_page); | 266 | sysv_delete_entry(old_de, old_page); |
271 | inode_dec_link_count(old_inode); | 267 | mark_inode_dirty(old_inode); |
272 | 268 | ||
273 | if (dir_de) { | 269 | if (dir_de) { |
274 | sysv_set_link(dir_de, dir_page, new_dir); | 270 | sysv_set_link(dir_de, dir_page, new_dir); |
diff --git a/fs/udf/namei.c b/fs/udf/namei.c index 2be0f9eb86d2..b7c338d5e9df 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c | |||
@@ -32,6 +32,8 @@ | |||
32 | #include <linux/crc-itu-t.h> | 32 | #include <linux/crc-itu-t.h> |
33 | #include <linux/exportfs.h> | 33 | #include <linux/exportfs.h> |
34 | 34 | ||
35 | enum { UDF_MAX_LINKS = 0xffff }; | ||
36 | |||
35 | static inline int udf_match(int len1, const unsigned char *name1, int len2, | 37 | static inline int udf_match(int len1, const unsigned char *name1, int len2, |
36 | const unsigned char *name2) | 38 | const unsigned char *name2) |
37 | { | 39 | { |
@@ -650,7 +652,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
650 | struct udf_inode_info *iinfo; | 652 | struct udf_inode_info *iinfo; |
651 | 653 | ||
652 | err = -EMLINK; | 654 | err = -EMLINK; |
653 | if (dir->i_nlink >= (256 << sizeof(dir->i_nlink)) - 1) | 655 | if (dir->i_nlink >= UDF_MAX_LINKS) |
654 | goto out; | 656 | goto out; |
655 | 657 | ||
656 | err = -EIO; | 658 | err = -EIO; |
@@ -1034,9 +1036,8 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir, | |||
1034 | struct fileIdentDesc cfi, *fi; | 1036 | struct fileIdentDesc cfi, *fi; |
1035 | int err; | 1037 | int err; |
1036 | 1038 | ||
1037 | if (inode->i_nlink >= (256 << sizeof(inode->i_nlink)) - 1) { | 1039 | if (inode->i_nlink >= UDF_MAX_LINKS) |
1038 | return -EMLINK; | 1040 | return -EMLINK; |
1039 | } | ||
1040 | 1041 | ||
1041 | fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); | 1042 | fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); |
1042 | if (!fi) { | 1043 | if (!fi) { |
@@ -1131,9 +1132,7 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
1131 | goto end_rename; | 1132 | goto end_rename; |
1132 | 1133 | ||
1133 | retval = -EMLINK; | 1134 | retval = -EMLINK; |
1134 | if (!new_inode && | 1135 | if (!new_inode && new_dir->i_nlink >= UDF_MAX_LINKS) |
1135 | new_dir->i_nlink >= | ||
1136 | (256 << sizeof(new_dir->i_nlink)) - 1) | ||
1137 | goto end_rename; | 1136 | goto end_rename; |
1138 | } | 1137 | } |
1139 | if (!nfi) { | 1138 | if (!nfi) { |
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c index 12f39b9e4437..d6f681535eb8 100644 --- a/fs/ufs/namei.c +++ b/fs/ufs/namei.c | |||
@@ -306,7 +306,6 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
306 | new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page); | 306 | new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page); |
307 | if (!new_de) | 307 | if (!new_de) |
308 | goto out_dir; | 308 | goto out_dir; |
309 | inode_inc_link_count(old_inode); | ||
310 | ufs_set_link(new_dir, new_de, new_page, old_inode); | 309 | ufs_set_link(new_dir, new_de, new_page, old_inode); |
311 | new_inode->i_ctime = CURRENT_TIME_SEC; | 310 | new_inode->i_ctime = CURRENT_TIME_SEC; |
312 | if (dir_de) | 311 | if (dir_de) |
@@ -318,12 +317,9 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
318 | if (new_dir->i_nlink >= UFS_LINK_MAX) | 317 | if (new_dir->i_nlink >= UFS_LINK_MAX) |
319 | goto out_dir; | 318 | goto out_dir; |
320 | } | 319 | } |
321 | inode_inc_link_count(old_inode); | ||
322 | err = ufs_add_link(new_dentry, old_inode); | 320 | err = ufs_add_link(new_dentry, old_inode); |
323 | if (err) { | 321 | if (err) |
324 | inode_dec_link_count(old_inode); | ||
325 | goto out_dir; | 322 | goto out_dir; |
326 | } | ||
327 | if (dir_de) | 323 | if (dir_de) |
328 | inode_inc_link_count(new_dir); | 324 | inode_inc_link_count(new_dir); |
329 | } | 325 | } |
@@ -331,12 +327,11 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
331 | /* | 327 | /* |
332 | * Like most other Unix systems, set the ctime for inodes on a | 328 | * Like most other Unix systems, set the ctime for inodes on a |
333 | * rename. | 329 | * rename. |
334 | * inode_dec_link_count() will mark the inode dirty. | ||
335 | */ | 330 | */ |
336 | old_inode->i_ctime = CURRENT_TIME_SEC; | 331 | old_inode->i_ctime = CURRENT_TIME_SEC; |
337 | 332 | ||
338 | ufs_delete_entry(old_dir, old_de, old_page); | 333 | ufs_delete_entry(old_dir, old_de, old_page); |
339 | inode_dec_link_count(old_inode); | 334 | mark_inode_dirty(old_inode); |
340 | 335 | ||
341 | if (dir_de) { | 336 | if (dir_de) { |
342 | ufs_set_link(old_inode, dir_de, dir_page, new_dir); | 337 | ufs_set_link(old_inode, dir_de, dir_page, new_dir); |
diff --git a/fs/xfs/linux-2.6/xfs_discard.c b/fs/xfs/linux-2.6/xfs_discard.c index 05201ae719e5..d61611c88012 100644 --- a/fs/xfs/linux-2.6/xfs_discard.c +++ b/fs/xfs/linux-2.6/xfs_discard.c | |||
@@ -152,6 +152,8 @@ xfs_ioc_trim( | |||
152 | 152 | ||
153 | if (!capable(CAP_SYS_ADMIN)) | 153 | if (!capable(CAP_SYS_ADMIN)) |
154 | return -XFS_ERROR(EPERM); | 154 | return -XFS_ERROR(EPERM); |
155 | if (!blk_queue_discard(q)) | ||
156 | return -XFS_ERROR(EOPNOTSUPP); | ||
155 | if (copy_from_user(&range, urange, sizeof(range))) | 157 | if (copy_from_user(&range, urange, sizeof(range))) |
156 | return -XFS_ERROR(EFAULT); | 158 | return -XFS_ERROR(EFAULT); |
157 | 159 | ||
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index f5e2a19e0f8e..0ca0e3c024d7 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c | |||
@@ -695,14 +695,19 @@ xfs_ioc_fsgeometry_v1( | |||
695 | xfs_mount_t *mp, | 695 | xfs_mount_t *mp, |
696 | void __user *arg) | 696 | void __user *arg) |
697 | { | 697 | { |
698 | xfs_fsop_geom_v1_t fsgeo; | 698 | xfs_fsop_geom_t fsgeo; |
699 | int error; | 699 | int error; |
700 | 700 | ||
701 | error = xfs_fs_geometry(mp, (xfs_fsop_geom_t *)&fsgeo, 3); | 701 | error = xfs_fs_geometry(mp, &fsgeo, 3); |
702 | if (error) | 702 | if (error) |
703 | return -error; | 703 | return -error; |
704 | 704 | ||
705 | if (copy_to_user(arg, &fsgeo, sizeof(fsgeo))) | 705 | /* |
706 | * Caller should have passed an argument of type | ||
707 | * xfs_fsop_geom_v1_t. This is a proper subset of the | ||
708 | * xfs_fsop_geom_t that xfs_fs_geometry() fills in. | ||
709 | */ | ||
710 | if (copy_to_user(arg, &fsgeo, sizeof(xfs_fsop_geom_v1_t))) | ||
706 | return -XFS_ERROR(EFAULT); | 711 | return -XFS_ERROR(EFAULT); |
707 | return 0; | 712 | return 0; |
708 | } | 713 | } |
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c index cec89dd5d7d2..85668efb3e3e 100644 --- a/fs/xfs/xfs_fsops.c +++ b/fs/xfs/xfs_fsops.c | |||
@@ -53,6 +53,9 @@ xfs_fs_geometry( | |||
53 | xfs_fsop_geom_t *geo, | 53 | xfs_fsop_geom_t *geo, |
54 | int new_version) | 54 | int new_version) |
55 | { | 55 | { |
56 | |||
57 | memset(geo, 0, sizeof(*geo)); | ||
58 | |||
56 | geo->blocksize = mp->m_sb.sb_blocksize; | 59 | geo->blocksize = mp->m_sb.sb_blocksize; |
57 | geo->rtextsize = mp->m_sb.sb_rextsize; | 60 | geo->rtextsize = mp->m_sb.sb_rextsize; |
58 | geo->agblocks = mp->m_sb.sb_agblocks; | 61 | geo->agblocks = mp->m_sb.sb_agblocks; |
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index 31b6188df221..b4bfe338ea0e 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h | |||
@@ -4,6 +4,8 @@ | |||
4 | #ifndef __ASSEMBLY__ | 4 | #ifndef __ASSEMBLY__ |
5 | #ifdef CONFIG_MMU | 5 | #ifdef CONFIG_MMU |
6 | 6 | ||
7 | #include <linux/mm_types.h> | ||
8 | |||
7 | #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS | 9 | #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS |
8 | extern int ptep_set_access_flags(struct vm_area_struct *vma, | 10 | extern int ptep_set_access_flags(struct vm_area_struct *vma, |
9 | unsigned long address, pte_t *ptep, | 11 | unsigned long address, pte_t *ptep, |
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index fe29aadb129d..348843b80150 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h | |||
@@ -1101,7 +1101,7 @@ struct drm_device { | |||
1101 | struct platform_device *platformdev; /**< Platform device struture */ | 1101 | struct platform_device *platformdev; /**< Platform device struture */ |
1102 | 1102 | ||
1103 | struct drm_sg_mem *sg; /**< Scatter gather memory */ | 1103 | struct drm_sg_mem *sg; /**< Scatter gather memory */ |
1104 | int num_crtcs; /**< Number of CRTCs on this device */ | 1104 | unsigned int num_crtcs; /**< Number of CRTCs on this device */ |
1105 | void *dev_private; /**< device private data */ | 1105 | void *dev_private; /**< device private data */ |
1106 | void *mm_private; | 1106 | void *mm_private; |
1107 | struct address_space *dev_mapping; | 1107 | struct address_space *dev_mapping; |
diff --git a/include/keys/rxrpc-type.h b/include/keys/rxrpc-type.h index 5cb86c307f5d..fc4875433817 100644 --- a/include/keys/rxrpc-type.h +++ b/include/keys/rxrpc-type.h | |||
@@ -99,7 +99,6 @@ struct rxrpc_key_token { | |||
99 | * structure of raw payloads passed to add_key() or instantiate key | 99 | * structure of raw payloads passed to add_key() or instantiate key |
100 | */ | 100 | */ |
101 | struct rxrpc_key_data_v1 { | 101 | struct rxrpc_key_data_v1 { |
102 | u32 kif_version; /* 1 */ | ||
103 | u16 security_index; | 102 | u16 security_index; |
104 | u16 ticket_length; | 103 | u16 ticket_length; |
105 | u32 expiry; /* time_t */ | 104 | u32 expiry; /* time_t */ |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 4d18ff34670a..d5063e1b5555 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -699,7 +699,7 @@ extern void blk_start_queue(struct request_queue *q); | |||
699 | extern void blk_stop_queue(struct request_queue *q); | 699 | extern void blk_stop_queue(struct request_queue *q); |
700 | extern void blk_sync_queue(struct request_queue *q); | 700 | extern void blk_sync_queue(struct request_queue *q); |
701 | extern void __blk_stop_queue(struct request_queue *q); | 701 | extern void __blk_stop_queue(struct request_queue *q); |
702 | extern void __blk_run_queue(struct request_queue *); | 702 | extern void __blk_run_queue(struct request_queue *q, bool force_kblockd); |
703 | extern void blk_run_queue(struct request_queue *); | 703 | extern void blk_run_queue(struct request_queue *); |
704 | extern int blk_rq_map_user(struct request_queue *, struct request *, | 704 | extern int blk_rq_map_user(struct request_queue *, struct request *, |
705 | struct rq_map_data *, void __user *, unsigned long, | 705 | struct rq_map_data *, void __user *, unsigned long, |
@@ -1088,7 +1088,6 @@ static inline void put_dev_sector(Sector p) | |||
1088 | 1088 | ||
1089 | struct work_struct; | 1089 | struct work_struct; |
1090 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); | 1090 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); |
1091 | int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay); | ||
1092 | 1091 | ||
1093 | #ifdef CONFIG_BLK_CGROUP | 1092 | #ifdef CONFIG_BLK_CGROUP |
1094 | /* | 1093 | /* |
@@ -1136,7 +1135,6 @@ static inline uint64_t rq_io_start_time_ns(struct request *req) | |||
1136 | extern int blk_throtl_init(struct request_queue *q); | 1135 | extern int blk_throtl_init(struct request_queue *q); |
1137 | extern void blk_throtl_exit(struct request_queue *q); | 1136 | extern void blk_throtl_exit(struct request_queue *q); |
1138 | extern int blk_throtl_bio(struct request_queue *q, struct bio **bio); | 1137 | extern int blk_throtl_bio(struct request_queue *q, struct bio **bio); |
1139 | extern void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay); | ||
1140 | extern void throtl_shutdown_timer_wq(struct request_queue *q); | 1138 | extern void throtl_shutdown_timer_wq(struct request_queue *q); |
1141 | #else /* CONFIG_BLK_DEV_THROTTLING */ | 1139 | #else /* CONFIG_BLK_DEV_THROTTLING */ |
1142 | static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) | 1140 | static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) |
@@ -1146,7 +1144,6 @@ static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) | |||
1146 | 1144 | ||
1147 | static inline int blk_throtl_init(struct request_queue *q) { return 0; } | 1145 | static inline int blk_throtl_init(struct request_queue *q) { return 0; } |
1148 | static inline int blk_throtl_exit(struct request_queue *q) { return 0; } | 1146 | static inline int blk_throtl_exit(struct request_queue *q) { return 0; } |
1149 | static inline void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) {} | ||
1150 | static inline void throtl_shutdown_timer_wq(struct request_queue *q) {} | 1147 | static inline void throtl_shutdown_timer_wq(struct request_queue *q) {} |
1151 | #endif /* CONFIG_BLK_DEV_THROTTLING */ | 1148 | #endif /* CONFIG_BLK_DEV_THROTTLING */ |
1152 | 1149 | ||
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 3395cf7130f5..b22fb0d3db0f 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h | |||
@@ -245,7 +245,6 @@ static inline int blk_cmd_buf_len(struct request *rq) | |||
245 | 245 | ||
246 | extern void blk_dump_cmd(char *buf, struct request *rq); | 246 | extern void blk_dump_cmd(char *buf, struct request *rq); |
247 | extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes); | 247 | extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes); |
248 | extern void blk_fill_rwbs_rq(char *rwbs, struct request *rq); | ||
249 | 248 | ||
250 | #endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */ | 249 | #endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */ |
251 | 250 | ||
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index c3011beac30d..31d91a64838b 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h | |||
@@ -123,6 +123,7 @@ struct ceph_msg_pos { | |||
123 | #define SOCK_CLOSED 11 /* socket state changed to closed */ | 123 | #define SOCK_CLOSED 11 /* socket state changed to closed */ |
124 | #define OPENING 13 /* open connection w/ (possibly new) peer */ | 124 | #define OPENING 13 /* open connection w/ (possibly new) peer */ |
125 | #define DEAD 14 /* dead, about to kfree */ | 125 | #define DEAD 14 /* dead, about to kfree */ |
126 | #define BACKOFF 15 | ||
126 | 127 | ||
127 | /* | 128 | /* |
128 | * A single connection with another host. | 129 | * A single connection with another host. |
@@ -160,7 +161,6 @@ struct ceph_connection { | |||
160 | struct list_head out_queue; | 161 | struct list_head out_queue; |
161 | struct list_head out_sent; /* sending or sent but unacked */ | 162 | struct list_head out_sent; /* sending or sent but unacked */ |
162 | u64 out_seq; /* last message queued for send */ | 163 | u64 out_seq; /* last message queued for send */ |
163 | bool out_keepalive_pending; | ||
164 | 164 | ||
165 | u64 in_seq, in_seq_acked; /* last message received, acked */ | 165 | u64 in_seq, in_seq_acked; /* last message received, acked */ |
166 | 166 | ||
diff --git a/include/linux/dcbnl.h b/include/linux/dcbnl.h index 68cd248f6d3e..66900e3c6eb1 100644 --- a/include/linux/dcbnl.h +++ b/include/linux/dcbnl.h | |||
@@ -101,8 +101,8 @@ struct ieee_pfc { | |||
101 | */ | 101 | */ |
102 | struct dcb_app { | 102 | struct dcb_app { |
103 | __u8 selector; | 103 | __u8 selector; |
104 | __u32 protocol; | ||
105 | __u8 priority; | 104 | __u8 priority; |
105 | __u16 protocol; | ||
106 | }; | 106 | }; |
107 | 107 | ||
108 | struct dcbmsg { | 108 | struct dcbmsg { |
diff --git a/include/linux/fs.h b/include/linux/fs.h index bd3215940c37..e38b50a4b9d2 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -649,6 +649,7 @@ struct address_space { | |||
649 | spinlock_t private_lock; /* for use by the address_space */ | 649 | spinlock_t private_lock; /* for use by the address_space */ |
650 | struct list_head private_list; /* ditto */ | 650 | struct list_head private_list; /* ditto */ |
651 | struct address_space *assoc_mapping; /* ditto */ | 651 | struct address_space *assoc_mapping; /* ditto */ |
652 | struct mutex unmap_mutex; /* to protect unmapping */ | ||
652 | } __attribute__((aligned(sizeof(long)))); | 653 | } __attribute__((aligned(sizeof(long)))); |
653 | /* | 654 | /* |
654 | * On most architectures that alignment is already the case; but | 655 | * On most architectures that alignment is already the case; but |
@@ -2139,7 +2140,7 @@ extern void check_disk_size_change(struct gendisk *disk, | |||
2139 | struct block_device *bdev); | 2140 | struct block_device *bdev); |
2140 | extern int revalidate_disk(struct gendisk *); | 2141 | extern int revalidate_disk(struct gendisk *); |
2141 | extern int check_disk_change(struct block_device *); | 2142 | extern int check_disk_change(struct block_device *); |
2142 | extern int __invalidate_device(struct block_device *); | 2143 | extern int __invalidate_device(struct block_device *, bool); |
2143 | extern int invalidate_partition(struct gendisk *, int); | 2144 | extern int invalidate_partition(struct gendisk *, int); |
2144 | #endif | 2145 | #endif |
2145 | unsigned long invalidate_mapping_pages(struct address_space *mapping, | 2146 | unsigned long invalidate_mapping_pages(struct address_space *mapping, |
@@ -2225,6 +2226,7 @@ extern loff_t vfs_llseek(struct file *file, loff_t offset, int origin); | |||
2225 | 2226 | ||
2226 | extern int inode_init_always(struct super_block *, struct inode *); | 2227 | extern int inode_init_always(struct super_block *, struct inode *); |
2227 | extern void inode_init_once(struct inode *); | 2228 | extern void inode_init_once(struct inode *); |
2229 | extern void address_space_init_once(struct address_space *mapping); | ||
2228 | extern void ihold(struct inode * inode); | 2230 | extern void ihold(struct inode * inode); |
2229 | extern void iput(struct inode *); | 2231 | extern void iput(struct inode *); |
2230 | extern struct inode * igrab(struct inode *); | 2232 | extern struct inode * igrab(struct inode *); |
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 0b84c61607e8..dca31761b311 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
@@ -332,16 +332,19 @@ alloc_pages(gfp_t gfp_mask, unsigned int order) | |||
332 | return alloc_pages_current(gfp_mask, order); | 332 | return alloc_pages_current(gfp_mask, order); |
333 | } | 333 | } |
334 | extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, | 334 | extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, |
335 | struct vm_area_struct *vma, unsigned long addr); | 335 | struct vm_area_struct *vma, unsigned long addr, |
336 | int node); | ||
336 | #else | 337 | #else |
337 | #define alloc_pages(gfp_mask, order) \ | 338 | #define alloc_pages(gfp_mask, order) \ |
338 | alloc_pages_node(numa_node_id(), gfp_mask, order) | 339 | alloc_pages_node(numa_node_id(), gfp_mask, order) |
339 | #define alloc_pages_vma(gfp_mask, order, vma, addr) \ | 340 | #define alloc_pages_vma(gfp_mask, order, vma, addr, node) \ |
340 | alloc_pages(gfp_mask, order) | 341 | alloc_pages(gfp_mask, order) |
341 | #endif | 342 | #endif |
342 | #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) | 343 | #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) |
343 | #define alloc_page_vma(gfp_mask, vma, addr) \ | 344 | #define alloc_page_vma(gfp_mask, vma, addr) \ |
344 | alloc_pages_vma(gfp_mask, 0, vma, addr) | 345 | alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id()) |
346 | #define alloc_page_vma_node(gfp_mask, vma, addr, node) \ | ||
347 | alloc_pages_vma(gfp_mask, 0, vma, addr, node) | ||
345 | 348 | ||
346 | extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); | 349 | extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); |
347 | extern unsigned long get_zeroed_page(gfp_t gfp_mask); | 350 | extern unsigned long get_zeroed_page(gfp_t gfp_mask); |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 55e0d4253e49..d746da19c6a2 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -55,7 +55,7 @@ | |||
55 | * Used by threaded interrupts which need to keep the | 55 | * Used by threaded interrupts which need to keep the |
56 | * irq line disabled until the threaded handler has been run. | 56 | * irq line disabled until the threaded handler has been run. |
57 | * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend | 57 | * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend |
58 | * | 58 | * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set |
59 | */ | 59 | */ |
60 | #define IRQF_DISABLED 0x00000020 | 60 | #define IRQF_DISABLED 0x00000020 |
61 | #define IRQF_SAMPLE_RANDOM 0x00000040 | 61 | #define IRQF_SAMPLE_RANDOM 0x00000040 |
@@ -67,6 +67,7 @@ | |||
67 | #define IRQF_IRQPOLL 0x00001000 | 67 | #define IRQF_IRQPOLL 0x00001000 |
68 | #define IRQF_ONESHOT 0x00002000 | 68 | #define IRQF_ONESHOT 0x00002000 |
69 | #define IRQF_NO_SUSPEND 0x00004000 | 69 | #define IRQF_NO_SUSPEND 0x00004000 |
70 | #define IRQF_FORCE_RESUME 0x00008000 | ||
70 | 71 | ||
71 | #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND) | 72 | #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND) |
72 | 73 | ||
diff --git a/include/linux/mfd/wm8994/core.h b/include/linux/mfd/wm8994/core.h index 3fd36845ca45..ef4f0b6083a3 100644 --- a/include/linux/mfd/wm8994/core.h +++ b/include/linux/mfd/wm8994/core.h | |||
@@ -71,6 +71,7 @@ struct wm8994 { | |||
71 | u16 irq_masks_cache[WM8994_NUM_IRQ_REGS]; | 71 | u16 irq_masks_cache[WM8994_NUM_IRQ_REGS]; |
72 | 72 | ||
73 | /* Used over suspend/resume */ | 73 | /* Used over suspend/resume */ |
74 | bool suspended; | ||
74 | u16 ldo_regs[WM8994_NUM_LDO_REGS]; | 75 | u16 ldo_regs[WM8994_NUM_LDO_REGS]; |
75 | u16 gpio_regs[WM8994_NUM_GPIO_REGS]; | 76 | u16 gpio_regs[WM8994_NUM_GPIO_REGS]; |
76 | 77 | ||
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index d971346b0340..71caf7a5e6c6 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -2392,6 +2392,9 @@ extern int netdev_notice(const struct net_device *dev, const char *format, ...) | |||
2392 | extern int netdev_info(const struct net_device *dev, const char *format, ...) | 2392 | extern int netdev_info(const struct net_device *dev, const char *format, ...) |
2393 | __attribute__ ((format (printf, 2, 3))); | 2393 | __attribute__ ((format (printf, 2, 3))); |
2394 | 2394 | ||
2395 | #define MODULE_ALIAS_NETDEV(device) \ | ||
2396 | MODULE_ALIAS("netdev-" device) | ||
2397 | |||
2395 | #if defined(DEBUG) | 2398 | #if defined(DEBUG) |
2396 | #define netdev_dbg(__dev, format, args...) \ | 2399 | #define netdev_dbg(__dev, format, args...) \ |
2397 | netdev_printk(KERN_DEBUG, __dev, format, ##args) | 2400 | netdev_printk(KERN_DEBUG, __dev, format, ##args) |
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index b197563913bf..3e112de12d8d 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h | |||
@@ -68,11 +68,7 @@ struct nfs_client { | |||
68 | unsigned char cl_id_uniquifier; | 68 | unsigned char cl_id_uniquifier; |
69 | u32 cl_cb_ident; /* v4.0 callback identifier */ | 69 | u32 cl_cb_ident; /* v4.0 callback identifier */ |
70 | const struct nfs4_minor_version_ops *cl_mvops; | 70 | const struct nfs4_minor_version_ops *cl_mvops; |
71 | #endif /* CONFIG_NFS_V4 */ | ||
72 | 71 | ||
73 | #ifdef CONFIG_NFS_V4_1 | ||
74 | /* clientid returned from EXCHANGE_ID, used by session operations */ | ||
75 | u64 cl_ex_clid; | ||
76 | /* The sequence id to use for the next CREATE_SESSION */ | 72 | /* The sequence id to use for the next CREATE_SESSION */ |
77 | u32 cl_seqid; | 73 | u32 cl_seqid; |
78 | /* The flags used for obtaining the clientid during EXCHANGE_ID */ | 74 | /* The flags used for obtaining the clientid during EXCHANGE_ID */ |
@@ -80,7 +76,7 @@ struct nfs_client { | |||
80 | struct nfs4_session *cl_session; /* sharred session */ | 76 | struct nfs4_session *cl_session; /* sharred session */ |
81 | struct list_head cl_layouts; | 77 | struct list_head cl_layouts; |
82 | struct pnfs_deviceid_cache *cl_devid_cache; /* pNFS deviceid cache */ | 78 | struct pnfs_deviceid_cache *cl_devid_cache; /* pNFS deviceid cache */ |
83 | #endif /* CONFIG_NFS_V4_1 */ | 79 | #endif /* CONFIG_NFS_V4 */ |
84 | 80 | ||
85 | #ifdef CONFIG_NFS_FSCACHE | 81 | #ifdef CONFIG_NFS_FSCACHE |
86 | struct fscache_cookie *fscache; /* client index cache cookie */ | 82 | struct fscache_cookie *fscache; /* client index cache cookie */ |
@@ -185,7 +181,7 @@ struct nfs_server { | |||
185 | /* maximum number of slots to use */ | 181 | /* maximum number of slots to use */ |
186 | #define NFS4_MAX_SLOT_TABLE RPC_MAX_SLOT_TABLE | 182 | #define NFS4_MAX_SLOT_TABLE RPC_MAX_SLOT_TABLE |
187 | 183 | ||
188 | #if defined(CONFIG_NFS_V4_1) | 184 | #if defined(CONFIG_NFS_V4) |
189 | 185 | ||
190 | /* Sessions */ | 186 | /* Sessions */ |
191 | #define SLOT_TABLE_SZ (NFS4_MAX_SLOT_TABLE/(8*sizeof(long))) | 187 | #define SLOT_TABLE_SZ (NFS4_MAX_SLOT_TABLE/(8*sizeof(long))) |
@@ -225,5 +221,5 @@ struct nfs4_session { | |||
225 | struct nfs_client *clp; | 221 | struct nfs_client *clp; |
226 | }; | 222 | }; |
227 | 223 | ||
228 | #endif /* CONFIG_NFS_V4_1 */ | 224 | #endif /* CONFIG_NFS_V4 */ |
229 | #endif | 225 | #endif |
diff --git a/include/linux/pm.h b/include/linux/pm.h index dd9c7ab38270..21415cc91cbb 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h | |||
@@ -431,6 +431,8 @@ struct dev_pm_info { | |||
431 | struct list_head entry; | 431 | struct list_head entry; |
432 | struct completion completion; | 432 | struct completion completion; |
433 | struct wakeup_source *wakeup; | 433 | struct wakeup_source *wakeup; |
434 | #else | ||
435 | unsigned int should_wakeup:1; | ||
434 | #endif | 436 | #endif |
435 | #ifdef CONFIG_PM_RUNTIME | 437 | #ifdef CONFIG_PM_RUNTIME |
436 | struct timer_list suspend_timer; | 438 | struct timer_list suspend_timer; |
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index 9cff00dd6b63..03a67db03d01 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h | |||
@@ -109,11 +109,6 @@ static inline bool device_can_wakeup(struct device *dev) | |||
109 | return dev->power.can_wakeup; | 109 | return dev->power.can_wakeup; |
110 | } | 110 | } |
111 | 111 | ||
112 | static inline bool device_may_wakeup(struct device *dev) | ||
113 | { | ||
114 | return false; | ||
115 | } | ||
116 | |||
117 | static inline struct wakeup_source *wakeup_source_create(const char *name) | 112 | static inline struct wakeup_source *wakeup_source_create(const char *name) |
118 | { | 113 | { |
119 | return NULL; | 114 | return NULL; |
@@ -134,24 +129,32 @@ static inline void wakeup_source_unregister(struct wakeup_source *ws) {} | |||
134 | 129 | ||
135 | static inline int device_wakeup_enable(struct device *dev) | 130 | static inline int device_wakeup_enable(struct device *dev) |
136 | { | 131 | { |
137 | return -EINVAL; | 132 | dev->power.should_wakeup = true; |
133 | return 0; | ||
138 | } | 134 | } |
139 | 135 | ||
140 | static inline int device_wakeup_disable(struct device *dev) | 136 | static inline int device_wakeup_disable(struct device *dev) |
141 | { | 137 | { |
138 | dev->power.should_wakeup = false; | ||
142 | return 0; | 139 | return 0; |
143 | } | 140 | } |
144 | 141 | ||
145 | static inline int device_init_wakeup(struct device *dev, bool val) | 142 | static inline int device_set_wakeup_enable(struct device *dev, bool enable) |
146 | { | 143 | { |
147 | dev->power.can_wakeup = val; | 144 | dev->power.should_wakeup = enable; |
148 | return val ? -EINVAL : 0; | 145 | return 0; |
149 | } | 146 | } |
150 | 147 | ||
148 | static inline int device_init_wakeup(struct device *dev, bool val) | ||
149 | { | ||
150 | device_set_wakeup_capable(dev, val); | ||
151 | device_set_wakeup_enable(dev, val); | ||
152 | return 0; | ||
153 | } | ||
151 | 154 | ||
152 | static inline int device_set_wakeup_enable(struct device *dev, bool enable) | 155 | static inline bool device_may_wakeup(struct device *dev) |
153 | { | 156 | { |
154 | return -EINVAL; | 157 | return dev->power.can_wakeup && dev->power.should_wakeup; |
155 | } | 158 | } |
156 | 159 | ||
157 | static inline void __pm_stay_awake(struct wakeup_source *ws) {} | 160 | static inline void __pm_stay_awake(struct wakeup_source *ws) {} |
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 092a04f874a8..a1147e5dd245 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h | |||
@@ -102,11 +102,8 @@ | |||
102 | 102 | ||
103 | extern long arch_ptrace(struct task_struct *child, long request, | 103 | extern long arch_ptrace(struct task_struct *child, long request, |
104 | unsigned long addr, unsigned long data); | 104 | unsigned long addr, unsigned long data); |
105 | extern int ptrace_traceme(void); | ||
106 | extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len); | 105 | extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len); |
107 | extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); | 106 | extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); |
108 | extern int ptrace_attach(struct task_struct *tsk); | ||
109 | extern int ptrace_detach(struct task_struct *, unsigned int); | ||
110 | extern void ptrace_disable(struct task_struct *); | 107 | extern void ptrace_disable(struct task_struct *); |
111 | extern int ptrace_check_attach(struct task_struct *task, int kill); | 108 | extern int ptrace_check_attach(struct task_struct *task, int kill); |
112 | extern int ptrace_request(struct task_struct *child, long request, | 109 | extern int ptrace_request(struct task_struct *child, long request, |
diff --git a/include/linux/rio_regs.h b/include/linux/rio_regs.h index d63dcbaea169..9026b30238f3 100644 --- a/include/linux/rio_regs.h +++ b/include/linux/rio_regs.h | |||
@@ -14,10 +14,12 @@ | |||
14 | #define LINUX_RIO_REGS_H | 14 | #define LINUX_RIO_REGS_H |
15 | 15 | ||
16 | /* | 16 | /* |
17 | * In RapidIO, each device has a 2MB configuration space that is | 17 | * In RapidIO, each device has a 16MB configuration space that is |
18 | * accessed via maintenance transactions. Portions of configuration | 18 | * accessed via maintenance transactions. Portions of configuration |
19 | * space are standardized and/or reserved. | 19 | * space are standardized and/or reserved. |
20 | */ | 20 | */ |
21 | #define RIO_MAINT_SPACE_SZ 0x1000000 /* 16MB of RapidIO mainenance space */ | ||
22 | |||
21 | #define RIO_DEV_ID_CAR 0x00 /* [I] Device Identity CAR */ | 23 | #define RIO_DEV_ID_CAR 0x00 /* [I] Device Identity CAR */ |
22 | #define RIO_DEV_INFO_CAR 0x04 /* [I] Device Information CAR */ | 24 | #define RIO_DEV_INFO_CAR 0x04 /* [I] Device Information CAR */ |
23 | #define RIO_ASM_ID_CAR 0x08 /* [I] Assembly Identity CAR */ | 25 | #define RIO_ASM_ID_CAR 0x08 /* [I] Assembly Identity CAR */ |
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 88513fd8e208..d81db8012c63 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h | |||
@@ -212,6 +212,7 @@ struct rpc_task *rpc_run_task(const struct rpc_task_setup *); | |||
212 | struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, | 212 | struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, |
213 | const struct rpc_call_ops *ops); | 213 | const struct rpc_call_ops *ops); |
214 | void rpc_put_task(struct rpc_task *); | 214 | void rpc_put_task(struct rpc_task *); |
215 | void rpc_put_task_async(struct rpc_task *); | ||
215 | void rpc_exit_task(struct rpc_task *); | 216 | void rpc_exit_task(struct rpc_task *); |
216 | void rpc_exit(struct rpc_task *, int); | 217 | void rpc_exit(struct rpc_task *, int); |
217 | void rpc_release_calldata(const struct rpc_call_ops *, void *); | 218 | void rpc_release_calldata(const struct rpc_call_ops *, void *); |
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 7bb5cb64f3b8..11684d9e6bd2 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h | |||
@@ -930,6 +930,7 @@ enum | |||
930 | 930 | ||
931 | #ifdef __KERNEL__ | 931 | #ifdef __KERNEL__ |
932 | #include <linux/list.h> | 932 | #include <linux/list.h> |
933 | #include <linux/rcupdate.h> | ||
933 | 934 | ||
934 | /* For the /proc/sys support */ | 935 | /* For the /proc/sys support */ |
935 | struct ctl_table; | 936 | struct ctl_table; |
@@ -1037,10 +1038,15 @@ struct ctl_table_root { | |||
1037 | struct ctl_table trees. */ | 1038 | struct ctl_table trees. */ |
1038 | struct ctl_table_header | 1039 | struct ctl_table_header |
1039 | { | 1040 | { |
1040 | struct ctl_table *ctl_table; | 1041 | union { |
1041 | struct list_head ctl_entry; | 1042 | struct { |
1042 | int used; | 1043 | struct ctl_table *ctl_table; |
1043 | int count; | 1044 | struct list_head ctl_entry; |
1045 | int used; | ||
1046 | int count; | ||
1047 | }; | ||
1048 | struct rcu_head rcu; | ||
1049 | }; | ||
1044 | struct completion *unregistering; | 1050 | struct completion *unregistering; |
1045 | struct ctl_table *ctl_table_arg; | 1051 | struct ctl_table *ctl_table_arg; |
1046 | struct ctl_table_root *root; | 1052 | struct ctl_table_root *root; |
diff --git a/include/linux/thermal.h b/include/linux/thermal.h index 8651556dbd52..d3ec89fb4122 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h | |||
@@ -172,6 +172,14 @@ void thermal_zone_device_update(struct thermal_zone_device *); | |||
172 | struct thermal_cooling_device *thermal_cooling_device_register(char *, void *, | 172 | struct thermal_cooling_device *thermal_cooling_device_register(char *, void *, |
173 | const struct thermal_cooling_device_ops *); | 173 | const struct thermal_cooling_device_ops *); |
174 | void thermal_cooling_device_unregister(struct thermal_cooling_device *); | 174 | void thermal_cooling_device_unregister(struct thermal_cooling_device *); |
175 | |||
176 | #ifdef CONFIG_NET | ||
175 | extern int generate_netlink_event(u32 orig, enum events event); | 177 | extern int generate_netlink_event(u32 orig, enum events event); |
178 | #else | ||
179 | static inline int generate_netlink_event(u32 orig, enum events event) | ||
180 | { | ||
181 | return 0; | ||
182 | } | ||
183 | #endif | ||
176 | 184 | ||
177 | #endif /* __THERMAL_H__ */ | 185 | #endif /* __THERMAL_H__ */ |
diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 4a3cd2cd2f5e..96e50e0ce3ca 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h | |||
@@ -89,6 +89,18 @@ | |||
89 | #define IPV6_ADDR_SCOPE_GLOBAL 0x0e | 89 | #define IPV6_ADDR_SCOPE_GLOBAL 0x0e |
90 | 90 | ||
91 | /* | 91 | /* |
92 | * Addr flags | ||
93 | */ | ||
94 | #ifdef __KERNEL__ | ||
95 | #define IPV6_ADDR_MC_FLAG_TRANSIENT(a) \ | ||
96 | ((a)->s6_addr[1] & 0x10) | ||
97 | #define IPV6_ADDR_MC_FLAG_PREFIX(a) \ | ||
98 | ((a)->s6_addr[1] & 0x20) | ||
99 | #define IPV6_ADDR_MC_FLAG_RENDEZVOUS(a) \ | ||
100 | ((a)->s6_addr[1] & 0x40) | ||
101 | #endif | ||
102 | |||
103 | /* | ||
92 | * fragmentation header | 104 | * fragmentation header |
93 | */ | 105 | */ |
94 | 106 | ||
diff --git a/include/net/netfilter/nf_tproxy_core.h b/include/net/netfilter/nf_tproxy_core.h index cd85b3bc8327..e505358d8999 100644 --- a/include/net/netfilter/nf_tproxy_core.h +++ b/include/net/netfilter/nf_tproxy_core.h | |||
@@ -201,18 +201,8 @@ nf_tproxy_get_sock_v6(struct net *net, const u8 protocol, | |||
201 | } | 201 | } |
202 | #endif | 202 | #endif |
203 | 203 | ||
204 | static inline void | ||
205 | nf_tproxy_put_sock(struct sock *sk) | ||
206 | { | ||
207 | /* TIME_WAIT inet sockets have to be handled differently */ | ||
208 | if ((sk->sk_protocol == IPPROTO_TCP) && (sk->sk_state == TCP_TIME_WAIT)) | ||
209 | inet_twsk_put(inet_twsk(sk)); | ||
210 | else | ||
211 | sock_put(sk); | ||
212 | } | ||
213 | |||
214 | /* assign a socket to the skb -- consumes sk */ | 204 | /* assign a socket to the skb -- consumes sk */ |
215 | int | 205 | void |
216 | nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk); | 206 | nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk); |
217 | 207 | ||
218 | #endif | 208 | #endif |
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 160a407c1963..04f8556313d5 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h | |||
@@ -199,7 +199,7 @@ struct tcf_proto { | |||
199 | 199 | ||
200 | struct qdisc_skb_cb { | 200 | struct qdisc_skb_cb { |
201 | unsigned int pkt_len; | 201 | unsigned int pkt_len; |
202 | char data[]; | 202 | long data[]; |
203 | }; | 203 | }; |
204 | 204 | ||
205 | static inline int qdisc_qlen(struct Qdisc *q) | 205 | static inline int qdisc_qlen(struct Qdisc *q) |
diff --git a/include/pcmcia/ds.h b/include/pcmcia/ds.h index 8479b66c067b..3fd5064dd43a 100644 --- a/include/pcmcia/ds.h +++ b/include/pcmcia/ds.h | |||
@@ -261,6 +261,7 @@ void pcmcia_disable_device(struct pcmcia_device *p_dev); | |||
261 | #define CONF_ENABLE_ESR 0x0008 | 261 | #define CONF_ENABLE_ESR 0x0008 |
262 | #define CONF_ENABLE_IOCARD 0x0010 /* auto-enabled if IO resources or IRQ | 262 | #define CONF_ENABLE_IOCARD 0x0010 /* auto-enabled if IO resources or IRQ |
263 | * (CONF_ENABLE_IRQ) in use */ | 263 | * (CONF_ENABLE_IRQ) in use */ |
264 | #define CONF_ENABLE_ZVCARD 0x0020 | ||
264 | 265 | ||
265 | /* flags used by pcmcia_loop_config() autoconfiguration */ | 266 | /* flags used by pcmcia_loop_config() autoconfiguration */ |
266 | #define CONF_AUTO_CHECK_VCC 0x0100 /* check for matching Vcc? */ | 267 | #define CONF_AUTO_CHECK_VCC 0x0100 /* check for matching Vcc? */ |
diff --git a/include/sound/wm8903.h b/include/sound/wm8903.h index b4a0db2307ef..1eeebd534f7e 100644 --- a/include/sound/wm8903.h +++ b/include/sound/wm8903.h | |||
@@ -17,13 +17,9 @@ | |||
17 | /* | 17 | /* |
18 | * R6 (0x06) - Mic Bias Control 0 | 18 | * R6 (0x06) - Mic Bias Control 0 |
19 | */ | 19 | */ |
20 | #define WM8903_MICDET_HYST_ENA 0x0080 /* MICDET_HYST_ENA */ | 20 | #define WM8903_MICDET_THR_MASK 0x0030 /* MICDET_THR - [5:4] */ |
21 | #define WM8903_MICDET_HYST_ENA_MASK 0x0080 /* MICDET_HYST_ENA */ | 21 | #define WM8903_MICDET_THR_SHIFT 4 /* MICDET_THR - [5:4] */ |
22 | #define WM8903_MICDET_HYST_ENA_SHIFT 7 /* MICDET_HYST_ENA */ | 22 | #define WM8903_MICDET_THR_WIDTH 2 /* MICDET_THR - [5:4] */ |
23 | #define WM8903_MICDET_HYST_ENA_WIDTH 1 /* MICDET_HYST_ENA */ | ||
24 | #define WM8903_MICDET_THR_MASK 0x0070 /* MICDET_THR - [6:4] */ | ||
25 | #define WM8903_MICDET_THR_SHIFT 4 /* MICDET_THR - [6:4] */ | ||
26 | #define WM8903_MICDET_THR_WIDTH 3 /* MICDET_THR - [6:4] */ | ||
27 | #define WM8903_MICSHORT_THR_MASK 0x000C /* MICSHORT_THR - [3:2] */ | 23 | #define WM8903_MICSHORT_THR_MASK 0x000C /* MICSHORT_THR - [3:2] */ |
28 | #define WM8903_MICSHORT_THR_SHIFT 2 /* MICSHORT_THR - [3:2] */ | 24 | #define WM8903_MICSHORT_THR_SHIFT 2 /* MICSHORT_THR - [3:2] */ |
29 | #define WM8903_MICSHORT_THR_WIDTH 2 /* MICSHORT_THR - [3:2] */ | 25 | #define WM8903_MICSHORT_THR_WIDTH 2 /* MICSHORT_THR - [3:2] */ |
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index 246940511579..2e8ec51f0615 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h | |||
@@ -135,6 +135,8 @@ extern void transport_complete_task(struct se_task *, int); | |||
135 | extern void transport_add_task_to_execute_queue(struct se_task *, | 135 | extern void transport_add_task_to_execute_queue(struct se_task *, |
136 | struct se_task *, | 136 | struct se_task *, |
137 | struct se_device *); | 137 | struct se_device *); |
138 | extern void transport_remove_task_from_execute_queue(struct se_task *, | ||
139 | struct se_device *); | ||
138 | unsigned char *transport_dump_cmd_direction(struct se_cmd *); | 140 | unsigned char *transport_dump_cmd_direction(struct se_cmd *); |
139 | extern void transport_dump_dev_state(struct se_device *, char *, int *); | 141 | extern void transport_dump_dev_state(struct se_device *, char *, int *); |
140 | extern void transport_dump_dev_info(struct se_device *, struct se_lun *, | 142 | extern void transport_dump_dev_info(struct se_device *, struct se_lun *, |
diff --git a/include/trace/events/block.h b/include/trace/events/block.h index aba421d68f6f..78f18adb49c8 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h | |||
@@ -31,7 +31,7 @@ DECLARE_EVENT_CLASS(block_rq_with_error, | |||
31 | 0 : blk_rq_sectors(rq); | 31 | 0 : blk_rq_sectors(rq); |
32 | __entry->errors = rq->errors; | 32 | __entry->errors = rq->errors; |
33 | 33 | ||
34 | blk_fill_rwbs_rq(__entry->rwbs, rq); | 34 | blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); |
35 | blk_dump_cmd(__get_str(cmd), rq); | 35 | blk_dump_cmd(__get_str(cmd), rq); |
36 | ), | 36 | ), |
37 | 37 | ||
@@ -118,7 +118,7 @@ DECLARE_EVENT_CLASS(block_rq, | |||
118 | __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? | 118 | __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? |
119 | blk_rq_bytes(rq) : 0; | 119 | blk_rq_bytes(rq) : 0; |
120 | 120 | ||
121 | blk_fill_rwbs_rq(__entry->rwbs, rq); | 121 | blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); |
122 | blk_dump_cmd(__get_str(cmd), rq); | 122 | blk_dump_cmd(__get_str(cmd), rq); |
123 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | 123 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); |
124 | ), | 124 | ), |
@@ -563,7 +563,7 @@ TRACE_EVENT(block_rq_remap, | |||
563 | __entry->nr_sector = blk_rq_sectors(rq); | 563 | __entry->nr_sector = blk_rq_sectors(rq); |
564 | __entry->old_dev = dev; | 564 | __entry->old_dev = dev; |
565 | __entry->old_sector = from; | 565 | __entry->old_sector = from; |
566 | blk_fill_rwbs_rq(__entry->rwbs, rq); | 566 | blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); |
567 | ), | 567 | ), |
568 | 568 | ||
569 | TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", | 569 | TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", |
diff --git a/include/xen/events.h b/include/xen/events.h index 00f53ddcc062..962da2ced5b4 100644 --- a/include/xen/events.h +++ b/include/xen/events.h | |||
@@ -75,11 +75,9 @@ int xen_allocate_pirq(unsigned gsi, int shareable, char *name); | |||
75 | int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name); | 75 | int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name); |
76 | 76 | ||
77 | #ifdef CONFIG_PCI_MSI | 77 | #ifdef CONFIG_PCI_MSI |
78 | /* Allocate an irq and a pirq to be used with MSIs. */ | 78 | int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc); |
79 | #define XEN_ALLOC_PIRQ (1 << 0) | 79 | int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc, |
80 | #define XEN_ALLOC_IRQ (1 << 1) | 80 | int pirq, int vector, const char *name); |
81 | void xen_allocate_pirq_msi(char *name, int *irq, int *pirq, int alloc_mask); | ||
82 | int xen_create_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int type); | ||
83 | #endif | 81 | #endif |
84 | 82 | ||
85 | /* De-allocates the above mentioned physical interrupt. */ | 83 | /* De-allocates the above mentioned physical interrupt. */ |
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h index 68dd2b49635c..61e523af3c46 100644 --- a/include/xen/interface/io/blkif.h +++ b/include/xen/interface/io/blkif.h | |||
@@ -51,11 +51,7 @@ typedef uint64_t blkif_sector_t; | |||
51 | */ | 51 | */ |
52 | #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 | 52 | #define BLKIF_MAX_SEGMENTS_PER_REQUEST 11 |
53 | 53 | ||
54 | struct blkif_request { | 54 | struct blkif_request_rw { |
55 | uint8_t operation; /* BLKIF_OP_??? */ | ||
56 | uint8_t nr_segments; /* number of segments */ | ||
57 | blkif_vdev_t handle; /* only for read/write requests */ | ||
58 | uint64_t id; /* private guest value, echoed in resp */ | ||
59 | blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ | 55 | blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */ |
60 | struct blkif_request_segment { | 56 | struct blkif_request_segment { |
61 | grant_ref_t gref; /* reference to I/O buffer frame */ | 57 | grant_ref_t gref; /* reference to I/O buffer frame */ |
@@ -65,6 +61,16 @@ struct blkif_request { | |||
65 | } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; | 61 | } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST]; |
66 | }; | 62 | }; |
67 | 63 | ||
64 | struct blkif_request { | ||
65 | uint8_t operation; /* BLKIF_OP_??? */ | ||
66 | uint8_t nr_segments; /* number of segments */ | ||
67 | blkif_vdev_t handle; /* only for read/write requests */ | ||
68 | uint64_t id; /* private guest value, echoed in resp */ | ||
69 | union { | ||
70 | struct blkif_request_rw rw; | ||
71 | } u; | ||
72 | }; | ||
73 | |||
68 | struct blkif_response { | 74 | struct blkif_response { |
69 | uint64_t id; /* copied from request */ | 75 | uint64_t id; /* copied from request */ |
70 | uint8_t operation; /* copied from request */ | 76 | uint8_t operation; /* copied from request */ |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 4349935c2ad8..e92e98189032 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -1575,8 +1575,10 @@ static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft, | |||
1575 | return -ENODEV; | 1575 | return -ENODEV; |
1576 | 1576 | ||
1577 | trialcs = alloc_trial_cpuset(cs); | 1577 | trialcs = alloc_trial_cpuset(cs); |
1578 | if (!trialcs) | 1578 | if (!trialcs) { |
1579 | return -ENOMEM; | 1579 | retval = -ENOMEM; |
1580 | goto out; | ||
1581 | } | ||
1580 | 1582 | ||
1581 | switch (cft->private) { | 1583 | switch (cft->private) { |
1582 | case FILE_CPULIST: | 1584 | case FILE_CPULIST: |
@@ -1591,6 +1593,7 @@ static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft, | |||
1591 | } | 1593 | } |
1592 | 1594 | ||
1593 | free_trial_cpuset(trialcs); | 1595 | free_trial_cpuset(trialcs); |
1596 | out: | ||
1594 | cgroup_unlock(); | 1597 | cgroup_unlock(); |
1595 | return retval; | 1598 | return retval; |
1596 | } | 1599 | } |
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 4571ae7e085a..99c3bc8a6fb4 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
@@ -3,6 +3,12 @@ | |||
3 | */ | 3 | */ |
4 | #include <linux/irqdesc.h> | 4 | #include <linux/irqdesc.h> |
5 | 5 | ||
6 | #ifdef CONFIG_SPARSE_IRQ | ||
7 | # define IRQ_BITMAP_BITS (NR_IRQS + 8196) | ||
8 | #else | ||
9 | # define IRQ_BITMAP_BITS NR_IRQS | ||
10 | #endif | ||
11 | |||
6 | extern int noirqdebug; | 12 | extern int noirqdebug; |
7 | 13 | ||
8 | #define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data) | 14 | #define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data) |
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 282f20230e67..2039bea31bdf 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c | |||
@@ -94,7 +94,7 @@ int nr_irqs = NR_IRQS; | |||
94 | EXPORT_SYMBOL_GPL(nr_irqs); | 94 | EXPORT_SYMBOL_GPL(nr_irqs); |
95 | 95 | ||
96 | static DEFINE_MUTEX(sparse_irq_lock); | 96 | static DEFINE_MUTEX(sparse_irq_lock); |
97 | static DECLARE_BITMAP(allocated_irqs, NR_IRQS); | 97 | static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS); |
98 | 98 | ||
99 | #ifdef CONFIG_SPARSE_IRQ | 99 | #ifdef CONFIG_SPARSE_IRQ |
100 | 100 | ||
@@ -217,6 +217,15 @@ int __init early_irq_init(void) | |||
217 | initcnt = arch_probe_nr_irqs(); | 217 | initcnt = arch_probe_nr_irqs(); |
218 | printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt); | 218 | printk(KERN_INFO "NR_IRQS:%d nr_irqs:%d %d\n", NR_IRQS, nr_irqs, initcnt); |
219 | 219 | ||
220 | if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS)) | ||
221 | nr_irqs = IRQ_BITMAP_BITS; | ||
222 | |||
223 | if (WARN_ON(initcnt > IRQ_BITMAP_BITS)) | ||
224 | initcnt = IRQ_BITMAP_BITS; | ||
225 | |||
226 | if (initcnt > nr_irqs) | ||
227 | nr_irqs = initcnt; | ||
228 | |||
220 | for (i = 0; i < initcnt; i++) { | 229 | for (i = 0; i < initcnt; i++) { |
221 | desc = alloc_desc(i, node); | 230 | desc = alloc_desc(i, node); |
222 | set_bit(i, allocated_irqs); | 231 | set_bit(i, allocated_irqs); |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 0caa59f747dd..2782bacdf494 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -282,8 +282,17 @@ EXPORT_SYMBOL(disable_irq); | |||
282 | 282 | ||
283 | void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) | 283 | void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) |
284 | { | 284 | { |
285 | if (resume) | 285 | if (resume) { |
286 | if (!(desc->status & IRQ_SUSPENDED)) { | ||
287 | if (!desc->action) | ||
288 | return; | ||
289 | if (!(desc->action->flags & IRQF_FORCE_RESUME)) | ||
290 | return; | ||
291 | /* Pretend that it got disabled ! */ | ||
292 | desc->depth++; | ||
293 | } | ||
286 | desc->status &= ~IRQ_SUSPENDED; | 294 | desc->status &= ~IRQ_SUSPENDED; |
295 | } | ||
287 | 296 | ||
288 | switch (desc->depth) { | 297 | switch (desc->depth) { |
289 | case 0: | 298 | case 0: |
@@ -1100,7 +1109,7 @@ int request_threaded_irq(unsigned int irq, irq_handler_t handler, | |||
1100 | if (retval) | 1109 | if (retval) |
1101 | kfree(action); | 1110 | kfree(action); |
1102 | 1111 | ||
1103 | #ifdef CONFIG_DEBUG_SHIRQ | 1112 | #ifdef CONFIG_DEBUG_SHIRQ_FIXME |
1104 | if (!retval && (irqflags & IRQF_SHARED)) { | 1113 | if (!retval && (irqflags & IRQF_SHARED)) { |
1105 | /* | 1114 | /* |
1106 | * It's a shared IRQ -- the driver ought to be prepared for it | 1115 | * It's a shared IRQ -- the driver ought to be prepared for it |
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c index 0d4005d85b03..d6bfb89cce91 100644 --- a/kernel/irq/pm.c +++ b/kernel/irq/pm.c | |||
@@ -53,9 +53,6 @@ void resume_device_irqs(void) | |||
53 | for_each_irq_desc(irq, desc) { | 53 | for_each_irq_desc(irq, desc) { |
54 | unsigned long flags; | 54 | unsigned long flags; |
55 | 55 | ||
56 | if (!(desc->status & IRQ_SUSPENDED)) | ||
57 | continue; | ||
58 | |||
59 | raw_spin_lock_irqsave(&desc->lock, flags); | 56 | raw_spin_lock_irqsave(&desc->lock, flags); |
60 | __enable_irq(desc, irq, true); | 57 | __enable_irq(desc, irq, true); |
61 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 58 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index 891115a929aa..dc49358b73fa 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #ifdef CONFIG_HARDIRQS_SW_RESEND | 23 | #ifdef CONFIG_HARDIRQS_SW_RESEND |
24 | 24 | ||
25 | /* Bitmap to handle software resend of interrupts: */ | 25 | /* Bitmap to handle software resend of interrupts: */ |
26 | static DECLARE_BITMAP(irqs_resend, NR_IRQS); | 26 | static DECLARE_BITMAP(irqs_resend, IRQ_BITMAP_BITS); |
27 | 27 | ||
28 | /* | 28 | /* |
29 | * Run software resends of IRQ's | 29 | * Run software resends of IRQ's |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 999835b6112b..656222fcf767 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -782,6 +782,10 @@ retry: | |||
782 | raw_spin_unlock_irq(&ctx->lock); | 782 | raw_spin_unlock_irq(&ctx->lock); |
783 | } | 783 | } |
784 | 784 | ||
785 | #define MAX_INTERRUPTS (~0ULL) | ||
786 | |||
787 | static void perf_log_throttle(struct perf_event *event, int enable); | ||
788 | |||
785 | static int | 789 | static int |
786 | event_sched_in(struct perf_event *event, | 790 | event_sched_in(struct perf_event *event, |
787 | struct perf_cpu_context *cpuctx, | 791 | struct perf_cpu_context *cpuctx, |
@@ -794,6 +798,17 @@ event_sched_in(struct perf_event *event, | |||
794 | 798 | ||
795 | event->state = PERF_EVENT_STATE_ACTIVE; | 799 | event->state = PERF_EVENT_STATE_ACTIVE; |
796 | event->oncpu = smp_processor_id(); | 800 | event->oncpu = smp_processor_id(); |
801 | |||
802 | /* | ||
803 | * Unthrottle events, since we scheduled we might have missed several | ||
804 | * ticks already, also for a heavily scheduling task there is little | ||
805 | * guarantee it'll get a tick in a timely manner. | ||
806 | */ | ||
807 | if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { | ||
808 | perf_log_throttle(event, 1); | ||
809 | event->hw.interrupts = 0; | ||
810 | } | ||
811 | |||
797 | /* | 812 | /* |
798 | * The new state must be visible before we turn it on in the hardware: | 813 | * The new state must be visible before we turn it on in the hardware: |
799 | */ | 814 | */ |
@@ -1596,10 +1611,6 @@ void __perf_event_task_sched_in(struct task_struct *task) | |||
1596 | } | 1611 | } |
1597 | } | 1612 | } |
1598 | 1613 | ||
1599 | #define MAX_INTERRUPTS (~0ULL) | ||
1600 | |||
1601 | static void perf_log_throttle(struct perf_event *event, int enable); | ||
1602 | |||
1603 | static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) | 1614 | static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) |
1604 | { | 1615 | { |
1605 | u64 frequency = event->attr.sample_freq; | 1616 | u64 frequency = event->attr.sample_freq; |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 1708b1e2972d..e2302e40b360 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -163,7 +163,7 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode) | |||
163 | return !err; | 163 | return !err; |
164 | } | 164 | } |
165 | 165 | ||
166 | int ptrace_attach(struct task_struct *task) | 166 | static int ptrace_attach(struct task_struct *task) |
167 | { | 167 | { |
168 | int retval; | 168 | int retval; |
169 | 169 | ||
@@ -219,7 +219,7 @@ out: | |||
219 | * Performs checks and sets PT_PTRACED. | 219 | * Performs checks and sets PT_PTRACED. |
220 | * Should be used by all ptrace implementations for PTRACE_TRACEME. | 220 | * Should be used by all ptrace implementations for PTRACE_TRACEME. |
221 | */ | 221 | */ |
222 | int ptrace_traceme(void) | 222 | static int ptrace_traceme(void) |
223 | { | 223 | { |
224 | int ret = -EPERM; | 224 | int ret = -EPERM; |
225 | 225 | ||
@@ -293,7 +293,7 @@ static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) | |||
293 | return false; | 293 | return false; |
294 | } | 294 | } |
295 | 295 | ||
296 | int ptrace_detach(struct task_struct *child, unsigned int data) | 296 | static int ptrace_detach(struct task_struct *child, unsigned int data) |
297 | { | 297 | { |
298 | bool dead = false; | 298 | bool dead = false; |
299 | 299 | ||
diff --git a/kernel/sched.c b/kernel/sched.c index 18d38e4ec7ba..42eab5a8437d 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -4213,6 +4213,7 @@ void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key) | |||
4213 | { | 4213 | { |
4214 | __wake_up_common(q, mode, 1, 0, key); | 4214 | __wake_up_common(q, mode, 1, 0, key); |
4215 | } | 4215 | } |
4216 | EXPORT_SYMBOL_GPL(__wake_up_locked_key); | ||
4216 | 4217 | ||
4217 | /** | 4218 | /** |
4218 | * __wake_up_sync_key - wake up threads blocked on a waitqueue. | 4219 | * __wake_up_sync_key - wake up threads blocked on a waitqueue. |
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index ad6267714c84..01f75a5f17af 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -210,11 +210,12 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se); | |||
210 | 210 | ||
211 | static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) | 211 | static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) |
212 | { | 212 | { |
213 | int this_cpu = smp_processor_id(); | ||
214 | struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; | 213 | struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; |
215 | struct sched_rt_entity *rt_se; | 214 | struct sched_rt_entity *rt_se; |
216 | 215 | ||
217 | rt_se = rt_rq->tg->rt_se[this_cpu]; | 216 | int cpu = cpu_of(rq_of_rt_rq(rt_rq)); |
217 | |||
218 | rt_se = rt_rq->tg->rt_se[cpu]; | ||
218 | 219 | ||
219 | if (rt_rq->rt_nr_running) { | 220 | if (rt_rq->rt_nr_running) { |
220 | if (rt_se && !on_rt_rq(rt_se)) | 221 | if (rt_se && !on_rt_rq(rt_se)) |
@@ -226,10 +227,10 @@ static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) | |||
226 | 227 | ||
227 | static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) | 228 | static void sched_rt_rq_dequeue(struct rt_rq *rt_rq) |
228 | { | 229 | { |
229 | int this_cpu = smp_processor_id(); | ||
230 | struct sched_rt_entity *rt_se; | 230 | struct sched_rt_entity *rt_se; |
231 | int cpu = cpu_of(rq_of_rt_rq(rt_rq)); | ||
231 | 232 | ||
232 | rt_se = rt_rq->tg->rt_se[this_cpu]; | 233 | rt_se = rt_rq->tg->rt_se[cpu]; |
233 | 234 | ||
234 | if (rt_se && on_rt_rq(rt_se)) | 235 | if (rt_se && on_rt_rq(rt_se)) |
235 | dequeue_rt_entity(rt_se); | 236 | dequeue_rt_entity(rt_se); |
@@ -565,8 +566,11 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) | |||
565 | if (rt_rq->rt_time || rt_rq->rt_nr_running) | 566 | if (rt_rq->rt_time || rt_rq->rt_nr_running) |
566 | idle = 0; | 567 | idle = 0; |
567 | raw_spin_unlock(&rt_rq->rt_runtime_lock); | 568 | raw_spin_unlock(&rt_rq->rt_runtime_lock); |
568 | } else if (rt_rq->rt_nr_running) | 569 | } else if (rt_rq->rt_nr_running) { |
569 | idle = 0; | 570 | idle = 0; |
571 | if (!rt_rq_throttled(rt_rq)) | ||
572 | enqueue = 1; | ||
573 | } | ||
570 | 574 | ||
571 | if (enqueue) | 575 | if (enqueue) |
572 | sched_rt_rq_enqueue(rt_rq); | 576 | sched_rt_rq_enqueue(rt_rq); |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 0f1bd83db985..4eed0af5d144 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -194,9 +194,9 @@ static int sysrq_sysctl_handler(ctl_table *table, int write, | |||
194 | static struct ctl_table root_table[]; | 194 | static struct ctl_table root_table[]; |
195 | static struct ctl_table_root sysctl_table_root; | 195 | static struct ctl_table_root sysctl_table_root; |
196 | static struct ctl_table_header root_table_header = { | 196 | static struct ctl_table_header root_table_header = { |
197 | .count = 1, | 197 | {{.count = 1, |
198 | .ctl_table = root_table, | 198 | .ctl_table = root_table, |
199 | .ctl_entry = LIST_HEAD_INIT(sysctl_table_root.default_set.list), | 199 | .ctl_entry = LIST_HEAD_INIT(sysctl_table_root.default_set.list),}}, |
200 | .root = &sysctl_table_root, | 200 | .root = &sysctl_table_root, |
201 | .set = &sysctl_table_root.default_set, | 201 | .set = &sysctl_table_root.default_set, |
202 | }; | 202 | }; |
@@ -1567,11 +1567,16 @@ void sysctl_head_get(struct ctl_table_header *head) | |||
1567 | spin_unlock(&sysctl_lock); | 1567 | spin_unlock(&sysctl_lock); |
1568 | } | 1568 | } |
1569 | 1569 | ||
1570 | static void free_head(struct rcu_head *rcu) | ||
1571 | { | ||
1572 | kfree(container_of(rcu, struct ctl_table_header, rcu)); | ||
1573 | } | ||
1574 | |||
1570 | void sysctl_head_put(struct ctl_table_header *head) | 1575 | void sysctl_head_put(struct ctl_table_header *head) |
1571 | { | 1576 | { |
1572 | spin_lock(&sysctl_lock); | 1577 | spin_lock(&sysctl_lock); |
1573 | if (!--head->count) | 1578 | if (!--head->count) |
1574 | kfree(head); | 1579 | call_rcu(&head->rcu, free_head); |
1575 | spin_unlock(&sysctl_lock); | 1580 | spin_unlock(&sysctl_lock); |
1576 | } | 1581 | } |
1577 | 1582 | ||
@@ -1948,10 +1953,10 @@ void unregister_sysctl_table(struct ctl_table_header * header) | |||
1948 | start_unregistering(header); | 1953 | start_unregistering(header); |
1949 | if (!--header->parent->count) { | 1954 | if (!--header->parent->count) { |
1950 | WARN_ON(1); | 1955 | WARN_ON(1); |
1951 | kfree(header->parent); | 1956 | call_rcu(&header->parent->rcu, free_head); |
1952 | } | 1957 | } |
1953 | if (!--header->count) | 1958 | if (!--header->count) |
1954 | kfree(header); | 1959 | call_rcu(&header->rcu, free_head); |
1955 | spin_unlock(&sysctl_lock); | 1960 | spin_unlock(&sysctl_lock); |
1956 | } | 1961 | } |
1957 | 1962 | ||
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 48b2761b5668..a3b5aff62606 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c | |||
@@ -600,4 +600,14 @@ int tick_broadcast_oneshot_active(void) | |||
600 | return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT; | 600 | return tick_broadcast_device.mode == TICKDEV_MODE_ONESHOT; |
601 | } | 601 | } |
602 | 602 | ||
603 | /* | ||
604 | * Check whether the broadcast device supports oneshot. | ||
605 | */ | ||
606 | bool tick_broadcast_oneshot_available(void) | ||
607 | { | ||
608 | struct clock_event_device *bc = tick_broadcast_device.evtdev; | ||
609 | |||
610 | return bc ? bc->features & CLOCK_EVT_FEAT_ONESHOT : false; | ||
611 | } | ||
612 | |||
603 | #endif | 613 | #endif |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 051bc80a0c43..ed228ef6f6b8 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
@@ -51,7 +51,11 @@ int tick_is_oneshot_available(void) | |||
51 | { | 51 | { |
52 | struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); | 52 | struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); |
53 | 53 | ||
54 | return dev && (dev->features & CLOCK_EVT_FEAT_ONESHOT); | 54 | if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT)) |
55 | return 0; | ||
56 | if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) | ||
57 | return 1; | ||
58 | return tick_broadcast_oneshot_available(); | ||
55 | } | 59 | } |
56 | 60 | ||
57 | /* | 61 | /* |
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h index 290eefbc1f60..f65d3a723a64 100644 --- a/kernel/time/tick-internal.h +++ b/kernel/time/tick-internal.h | |||
@@ -36,6 +36,7 @@ extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); | |||
36 | extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); | 36 | extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); |
37 | extern int tick_broadcast_oneshot_active(void); | 37 | extern int tick_broadcast_oneshot_active(void); |
38 | extern void tick_check_oneshot_broadcast(int cpu); | 38 | extern void tick_check_oneshot_broadcast(int cpu); |
39 | bool tick_broadcast_oneshot_available(void); | ||
39 | # else /* BROADCAST */ | 40 | # else /* BROADCAST */ |
40 | static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) | 41 | static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) |
41 | { | 42 | { |
@@ -46,6 +47,7 @@ static inline void tick_broadcast_switch_to_oneshot(void) { } | |||
46 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } | 47 | static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } |
47 | static inline int tick_broadcast_oneshot_active(void) { return 0; } | 48 | static inline int tick_broadcast_oneshot_active(void) { return 0; } |
48 | static inline void tick_check_oneshot_broadcast(int cpu) { } | 49 | static inline void tick_check_oneshot_broadcast(int cpu) { } |
50 | static inline bool tick_broadcast_oneshot_available(void) { return true; } | ||
49 | # endif /* !BROADCAST */ | 51 | # endif /* !BROADCAST */ |
50 | 52 | ||
51 | #else /* !ONESHOT */ | 53 | #else /* !ONESHOT */ |
@@ -76,6 +78,7 @@ static inline int tick_resume_broadcast_oneshot(struct clock_event_device *bc) | |||
76 | return 0; | 78 | return 0; |
77 | } | 79 | } |
78 | static inline int tick_broadcast_oneshot_active(void) { return 0; } | 80 | static inline int tick_broadcast_oneshot_active(void) { return 0; } |
81 | static inline bool tick_broadcast_oneshot_available(void) { return false; } | ||
79 | #endif /* !TICK_ONESHOT */ | 82 | #endif /* !TICK_ONESHOT */ |
80 | 83 | ||
81 | /* | 84 | /* |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index d95721f33702..cbafed7d4f38 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
@@ -1827,21 +1827,5 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes) | |||
1827 | rwbs[i] = '\0'; | 1827 | rwbs[i] = '\0'; |
1828 | } | 1828 | } |
1829 | 1829 | ||
1830 | void blk_fill_rwbs_rq(char *rwbs, struct request *rq) | ||
1831 | { | ||
1832 | int rw = rq->cmd_flags & 0x03; | ||
1833 | int bytes; | ||
1834 | |||
1835 | if (rq->cmd_flags & REQ_DISCARD) | ||
1836 | rw |= REQ_DISCARD; | ||
1837 | |||
1838 | if (rq->cmd_flags & REQ_SECURE) | ||
1839 | rw |= REQ_SECURE; | ||
1840 | |||
1841 | bytes = blk_rq_bytes(rq); | ||
1842 | |||
1843 | blk_fill_rwbs(rwbs, rw, bytes); | ||
1844 | } | ||
1845 | |||
1846 | #endif /* CONFIG_EVENT_TRACING */ | 1830 | #endif /* CONFIG_EVENT_TRACING */ |
1847 | 1831 | ||
diff --git a/lib/nlattr.c b/lib/nlattr.c index 5021cbc34411..ac09f2226dc7 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c | |||
@@ -148,7 +148,7 @@ nla_policy_len(const struct nla_policy *p, int n) | |||
148 | { | 148 | { |
149 | int i, len = 0; | 149 | int i, len = 0; |
150 | 150 | ||
151 | for (i = 0; i < n; i++) { | 151 | for (i = 0; i < n; i++, p++) { |
152 | if (p->len) | 152 | if (p->len) |
153 | len += nla_total_size(p->len); | 153 | len += nla_total_size(p->len); |
154 | else if (nla_attr_minlen[p->type]) | 154 | else if (nla_attr_minlen[p->type]) |
diff --git a/lib/swiotlb.c b/lib/swiotlb.c index c47bbe11b804..93ca08b8a451 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c | |||
@@ -686,8 +686,10 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, | |||
686 | /* | 686 | /* |
687 | * Ensure that the address returned is DMA'ble | 687 | * Ensure that the address returned is DMA'ble |
688 | */ | 688 | */ |
689 | if (!dma_capable(dev, dev_addr, size)) | 689 | if (!dma_capable(dev, dev_addr, size)) { |
690 | panic("map_single: bounce buffer is not DMA'ble"); | 690 | swiotlb_tbl_unmap_single(dev, map, size, dir); |
691 | dev_addr = swiotlb_virt_to_bus(dev, io_tlb_overflow_buffer); | ||
692 | } | ||
691 | 693 | ||
692 | return dev_addr; | 694 | return dev_addr; |
693 | } | 695 | } |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 3e29781ee762..113e35c47502 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -650,10 +650,10 @@ static inline gfp_t alloc_hugepage_gfpmask(int defrag) | |||
650 | 650 | ||
651 | static inline struct page *alloc_hugepage_vma(int defrag, | 651 | static inline struct page *alloc_hugepage_vma(int defrag, |
652 | struct vm_area_struct *vma, | 652 | struct vm_area_struct *vma, |
653 | unsigned long haddr) | 653 | unsigned long haddr, int nd) |
654 | { | 654 | { |
655 | return alloc_pages_vma(alloc_hugepage_gfpmask(defrag), | 655 | return alloc_pages_vma(alloc_hugepage_gfpmask(defrag), |
656 | HPAGE_PMD_ORDER, vma, haddr); | 656 | HPAGE_PMD_ORDER, vma, haddr, nd); |
657 | } | 657 | } |
658 | 658 | ||
659 | #ifndef CONFIG_NUMA | 659 | #ifndef CONFIG_NUMA |
@@ -678,7 +678,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
678 | if (unlikely(khugepaged_enter(vma))) | 678 | if (unlikely(khugepaged_enter(vma))) |
679 | return VM_FAULT_OOM; | 679 | return VM_FAULT_OOM; |
680 | page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), | 680 | page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), |
681 | vma, haddr); | 681 | vma, haddr, numa_node_id()); |
682 | if (unlikely(!page)) | 682 | if (unlikely(!page)) |
683 | goto out; | 683 | goto out; |
684 | if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { | 684 | if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { |
@@ -799,8 +799,8 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, | |||
799 | } | 799 | } |
800 | 800 | ||
801 | for (i = 0; i < HPAGE_PMD_NR; i++) { | 801 | for (i = 0; i < HPAGE_PMD_NR; i++) { |
802 | pages[i] = alloc_page_vma(GFP_HIGHUSER_MOVABLE, | 802 | pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, |
803 | vma, address); | 803 | vma, address, page_to_nid(page)); |
804 | if (unlikely(!pages[i] || | 804 | if (unlikely(!pages[i] || |
805 | mem_cgroup_newpage_charge(pages[i], mm, | 805 | mem_cgroup_newpage_charge(pages[i], mm, |
806 | GFP_KERNEL))) { | 806 | GFP_KERNEL))) { |
@@ -902,7 +902,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
902 | if (transparent_hugepage_enabled(vma) && | 902 | if (transparent_hugepage_enabled(vma) && |
903 | !transparent_hugepage_debug_cow()) | 903 | !transparent_hugepage_debug_cow()) |
904 | new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), | 904 | new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), |
905 | vma, haddr); | 905 | vma, haddr, numa_node_id()); |
906 | else | 906 | else |
907 | new_page = NULL; | 907 | new_page = NULL; |
908 | 908 | ||
@@ -1745,7 +1745,8 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page, | |||
1745 | static void collapse_huge_page(struct mm_struct *mm, | 1745 | static void collapse_huge_page(struct mm_struct *mm, |
1746 | unsigned long address, | 1746 | unsigned long address, |
1747 | struct page **hpage, | 1747 | struct page **hpage, |
1748 | struct vm_area_struct *vma) | 1748 | struct vm_area_struct *vma, |
1749 | int node) | ||
1749 | { | 1750 | { |
1750 | pgd_t *pgd; | 1751 | pgd_t *pgd; |
1751 | pud_t *pud; | 1752 | pud_t *pud; |
@@ -1761,6 +1762,10 @@ static void collapse_huge_page(struct mm_struct *mm, | |||
1761 | #ifndef CONFIG_NUMA | 1762 | #ifndef CONFIG_NUMA |
1762 | VM_BUG_ON(!*hpage); | 1763 | VM_BUG_ON(!*hpage); |
1763 | new_page = *hpage; | 1764 | new_page = *hpage; |
1765 | if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { | ||
1766 | up_read(&mm->mmap_sem); | ||
1767 | return; | ||
1768 | } | ||
1764 | #else | 1769 | #else |
1765 | VM_BUG_ON(*hpage); | 1770 | VM_BUG_ON(*hpage); |
1766 | /* | 1771 | /* |
@@ -1773,18 +1778,19 @@ static void collapse_huge_page(struct mm_struct *mm, | |||
1773 | * mmap_sem in read mode is good idea also to allow greater | 1778 | * mmap_sem in read mode is good idea also to allow greater |
1774 | * scalability. | 1779 | * scalability. |
1775 | */ | 1780 | */ |
1776 | new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address); | 1781 | new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address, |
1782 | node); | ||
1777 | if (unlikely(!new_page)) { | 1783 | if (unlikely(!new_page)) { |
1778 | up_read(&mm->mmap_sem); | 1784 | up_read(&mm->mmap_sem); |
1779 | *hpage = ERR_PTR(-ENOMEM); | 1785 | *hpage = ERR_PTR(-ENOMEM); |
1780 | return; | 1786 | return; |
1781 | } | 1787 | } |
1782 | #endif | ||
1783 | if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { | 1788 | if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { |
1784 | up_read(&mm->mmap_sem); | 1789 | up_read(&mm->mmap_sem); |
1785 | put_page(new_page); | 1790 | put_page(new_page); |
1786 | return; | 1791 | return; |
1787 | } | 1792 | } |
1793 | #endif | ||
1788 | 1794 | ||
1789 | /* after allocating the hugepage upgrade to mmap_sem write mode */ | 1795 | /* after allocating the hugepage upgrade to mmap_sem write mode */ |
1790 | up_read(&mm->mmap_sem); | 1796 | up_read(&mm->mmap_sem); |
@@ -1919,6 +1925,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, | |||
1919 | struct page *page; | 1925 | struct page *page; |
1920 | unsigned long _address; | 1926 | unsigned long _address; |
1921 | spinlock_t *ptl; | 1927 | spinlock_t *ptl; |
1928 | int node = -1; | ||
1922 | 1929 | ||
1923 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | 1930 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
1924 | 1931 | ||
@@ -1949,6 +1956,13 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, | |||
1949 | page = vm_normal_page(vma, _address, pteval); | 1956 | page = vm_normal_page(vma, _address, pteval); |
1950 | if (unlikely(!page)) | 1957 | if (unlikely(!page)) |
1951 | goto out_unmap; | 1958 | goto out_unmap; |
1959 | /* | ||
1960 | * Chose the node of the first page. This could | ||
1961 | * be more sophisticated and look at more pages, | ||
1962 | * but isn't for now. | ||
1963 | */ | ||
1964 | if (node == -1) | ||
1965 | node = page_to_nid(page); | ||
1952 | VM_BUG_ON(PageCompound(page)); | 1966 | VM_BUG_ON(PageCompound(page)); |
1953 | if (!PageLRU(page) || PageLocked(page) || !PageAnon(page)) | 1967 | if (!PageLRU(page) || PageLocked(page) || !PageAnon(page)) |
1954 | goto out_unmap; | 1968 | goto out_unmap; |
@@ -1965,7 +1979,7 @@ out_unmap: | |||
1965 | pte_unmap_unlock(pte, ptl); | 1979 | pte_unmap_unlock(pte, ptl); |
1966 | if (ret) | 1980 | if (ret) |
1967 | /* collapse_huge_page will return with the mmap_sem released */ | 1981 | /* collapse_huge_page will return with the mmap_sem released */ |
1968 | collapse_huge_page(mm, address, hpage, vma); | 1982 | collapse_huge_page(mm, address, hpage, vma, node); |
1969 | out: | 1983 | out: |
1970 | return ret; | 1984 | return ret; |
1971 | } | 1985 | } |
diff --git a/mm/memory.c b/mm/memory.c index 8e8c18324863..5823698c2b71 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2648,6 +2648,7 @@ void unmap_mapping_range(struct address_space *mapping, | |||
2648 | details.last_index = ULONG_MAX; | 2648 | details.last_index = ULONG_MAX; |
2649 | details.i_mmap_lock = &mapping->i_mmap_lock; | 2649 | details.i_mmap_lock = &mapping->i_mmap_lock; |
2650 | 2650 | ||
2651 | mutex_lock(&mapping->unmap_mutex); | ||
2651 | spin_lock(&mapping->i_mmap_lock); | 2652 | spin_lock(&mapping->i_mmap_lock); |
2652 | 2653 | ||
2653 | /* Protect against endless unmapping loops */ | 2654 | /* Protect against endless unmapping loops */ |
@@ -2664,6 +2665,7 @@ void unmap_mapping_range(struct address_space *mapping, | |||
2664 | if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) | 2665 | if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) |
2665 | unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details); | 2666 | unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details); |
2666 | spin_unlock(&mapping->i_mmap_lock); | 2667 | spin_unlock(&mapping->i_mmap_lock); |
2668 | mutex_unlock(&mapping->unmap_mutex); | ||
2667 | } | 2669 | } |
2668 | EXPORT_SYMBOL(unmap_mapping_range); | 2670 | EXPORT_SYMBOL(unmap_mapping_range); |
2669 | 2671 | ||
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 368fc9d23610..b53ec99f1428 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -1524,10 +1524,9 @@ static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) | |||
1524 | } | 1524 | } |
1525 | 1525 | ||
1526 | /* Return a zonelist indicated by gfp for node representing a mempolicy */ | 1526 | /* Return a zonelist indicated by gfp for node representing a mempolicy */ |
1527 | static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy) | 1527 | static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy, |
1528 | int nd) | ||
1528 | { | 1529 | { |
1529 | int nd = numa_node_id(); | ||
1530 | |||
1531 | switch (policy->mode) { | 1530 | switch (policy->mode) { |
1532 | case MPOL_PREFERRED: | 1531 | case MPOL_PREFERRED: |
1533 | if (!(policy->flags & MPOL_F_LOCAL)) | 1532 | if (!(policy->flags & MPOL_F_LOCAL)) |
@@ -1679,7 +1678,7 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, | |||
1679 | zl = node_zonelist(interleave_nid(*mpol, vma, addr, | 1678 | zl = node_zonelist(interleave_nid(*mpol, vma, addr, |
1680 | huge_page_shift(hstate_vma(vma))), gfp_flags); | 1679 | huge_page_shift(hstate_vma(vma))), gfp_flags); |
1681 | } else { | 1680 | } else { |
1682 | zl = policy_zonelist(gfp_flags, *mpol); | 1681 | zl = policy_zonelist(gfp_flags, *mpol, numa_node_id()); |
1683 | if ((*mpol)->mode == MPOL_BIND) | 1682 | if ((*mpol)->mode == MPOL_BIND) |
1684 | *nodemask = &(*mpol)->v.nodes; | 1683 | *nodemask = &(*mpol)->v.nodes; |
1685 | } | 1684 | } |
@@ -1820,7 +1819,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, | |||
1820 | */ | 1819 | */ |
1821 | struct page * | 1820 | struct page * |
1822 | alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, | 1821 | alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, |
1823 | unsigned long addr) | 1822 | unsigned long addr, int node) |
1824 | { | 1823 | { |
1825 | struct mempolicy *pol = get_vma_policy(current, vma, addr); | 1824 | struct mempolicy *pol = get_vma_policy(current, vma, addr); |
1826 | struct zonelist *zl; | 1825 | struct zonelist *zl; |
@@ -1830,13 +1829,13 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, | |||
1830 | if (unlikely(pol->mode == MPOL_INTERLEAVE)) { | 1829 | if (unlikely(pol->mode == MPOL_INTERLEAVE)) { |
1831 | unsigned nid; | 1830 | unsigned nid; |
1832 | 1831 | ||
1833 | nid = interleave_nid(pol, vma, addr, PAGE_SHIFT); | 1832 | nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order); |
1834 | mpol_cond_put(pol); | 1833 | mpol_cond_put(pol); |
1835 | page = alloc_page_interleave(gfp, order, nid); | 1834 | page = alloc_page_interleave(gfp, order, nid); |
1836 | put_mems_allowed(); | 1835 | put_mems_allowed(); |
1837 | return page; | 1836 | return page; |
1838 | } | 1837 | } |
1839 | zl = policy_zonelist(gfp, pol); | 1838 | zl = policy_zonelist(gfp, pol, node); |
1840 | if (unlikely(mpol_needs_cond_ref(pol))) { | 1839 | if (unlikely(mpol_needs_cond_ref(pol))) { |
1841 | /* | 1840 | /* |
1842 | * slow path: ref counted shared policy | 1841 | * slow path: ref counted shared policy |
@@ -1892,7 +1891,8 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order) | |||
1892 | page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); | 1891 | page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); |
1893 | else | 1892 | else |
1894 | page = __alloc_pages_nodemask(gfp, order, | 1893 | page = __alloc_pages_nodemask(gfp, order, |
1895 | policy_zonelist(gfp, pol), policy_nodemask(gfp, pol)); | 1894 | policy_zonelist(gfp, pol, numa_node_id()), |
1895 | policy_nodemask(gfp, pol)); | ||
1896 | put_mems_allowed(); | 1896 | put_mems_allowed(); |
1897 | return page; | 1897 | return page; |
1898 | } | 1898 | } |
diff --git a/mm/migrate.c b/mm/migrate.c index 766115253807..352de555626c 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -1287,14 +1287,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages, | |||
1287 | return -EPERM; | 1287 | return -EPERM; |
1288 | 1288 | ||
1289 | /* Find the mm_struct */ | 1289 | /* Find the mm_struct */ |
1290 | read_lock(&tasklist_lock); | 1290 | rcu_read_lock(); |
1291 | task = pid ? find_task_by_vpid(pid) : current; | 1291 | task = pid ? find_task_by_vpid(pid) : current; |
1292 | if (!task) { | 1292 | if (!task) { |
1293 | read_unlock(&tasklist_lock); | 1293 | rcu_read_unlock(); |
1294 | return -ESRCH; | 1294 | return -ESRCH; |
1295 | } | 1295 | } |
1296 | mm = get_task_mm(task); | 1296 | mm = get_task_mm(task); |
1297 | read_unlock(&tasklist_lock); | 1297 | rcu_read_unlock(); |
1298 | 1298 | ||
1299 | if (!mm) | 1299 | if (!mm) |
1300 | return -EINVAL; | 1300 | return -EINVAL; |
diff --git a/mm/mremap.c b/mm/mremap.c index 9925b6391b80..1de98d492ddc 100644 --- a/mm/mremap.c +++ b/mm/mremap.c | |||
@@ -94,9 +94,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd, | |||
94 | */ | 94 | */ |
95 | mapping = vma->vm_file->f_mapping; | 95 | mapping = vma->vm_file->f_mapping; |
96 | spin_lock(&mapping->i_mmap_lock); | 96 | spin_lock(&mapping->i_mmap_lock); |
97 | if (new_vma->vm_truncate_count && | 97 | new_vma->vm_truncate_count = 0; |
98 | new_vma->vm_truncate_count != vma->vm_truncate_count) | ||
99 | new_vma->vm_truncate_count = 0; | ||
100 | } | 98 | } |
101 | 99 | ||
102 | /* | 100 | /* |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a873e61e312e..cdef1d4b4e47 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -5376,10 +5376,9 @@ __count_immobile_pages(struct zone *zone, struct page *page, int count) | |||
5376 | for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) { | 5376 | for (found = 0, iter = 0; iter < pageblock_nr_pages; iter++) { |
5377 | unsigned long check = pfn + iter; | 5377 | unsigned long check = pfn + iter; |
5378 | 5378 | ||
5379 | if (!pfn_valid_within(check)) { | 5379 | if (!pfn_valid_within(check)) |
5380 | iter++; | ||
5381 | continue; | 5380 | continue; |
5382 | } | 5381 | |
5383 | page = pfn_to_page(check); | 5382 | page = pfn_to_page(check); |
5384 | if (!page_count(page)) { | 5383 | if (!page_count(page)) { |
5385 | if (PageBuddy(page)) | 5384 | if (PageBuddy(page)) |
@@ -497,41 +497,51 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma, | |||
497 | struct mm_struct *mm = vma->vm_mm; | 497 | struct mm_struct *mm = vma->vm_mm; |
498 | int referenced = 0; | 498 | int referenced = 0; |
499 | 499 | ||
500 | /* | ||
501 | * Don't want to elevate referenced for mlocked page that gets this far, | ||
502 | * in order that it progresses to try_to_unmap and is moved to the | ||
503 | * unevictable list. | ||
504 | */ | ||
505 | if (vma->vm_flags & VM_LOCKED) { | ||
506 | *mapcount = 0; /* break early from loop */ | ||
507 | *vm_flags |= VM_LOCKED; | ||
508 | goto out; | ||
509 | } | ||
510 | |||
511 | /* Pretend the page is referenced if the task has the | ||
512 | swap token and is in the middle of a page fault. */ | ||
513 | if (mm != current->mm && has_swap_token(mm) && | ||
514 | rwsem_is_locked(&mm->mmap_sem)) | ||
515 | referenced++; | ||
516 | |||
517 | if (unlikely(PageTransHuge(page))) { | 500 | if (unlikely(PageTransHuge(page))) { |
518 | pmd_t *pmd; | 501 | pmd_t *pmd; |
519 | 502 | ||
520 | spin_lock(&mm->page_table_lock); | 503 | spin_lock(&mm->page_table_lock); |
504 | /* | ||
505 | * rmap might return false positives; we must filter | ||
506 | * these out using page_check_address_pmd(). | ||
507 | */ | ||
521 | pmd = page_check_address_pmd(page, mm, address, | 508 | pmd = page_check_address_pmd(page, mm, address, |
522 | PAGE_CHECK_ADDRESS_PMD_FLAG); | 509 | PAGE_CHECK_ADDRESS_PMD_FLAG); |
523 | if (pmd && !pmd_trans_splitting(*pmd) && | 510 | if (!pmd) { |
524 | pmdp_clear_flush_young_notify(vma, address, pmd)) | 511 | spin_unlock(&mm->page_table_lock); |
512 | goto out; | ||
513 | } | ||
514 | |||
515 | if (vma->vm_flags & VM_LOCKED) { | ||
516 | spin_unlock(&mm->page_table_lock); | ||
517 | *mapcount = 0; /* break early from loop */ | ||
518 | *vm_flags |= VM_LOCKED; | ||
519 | goto out; | ||
520 | } | ||
521 | |||
522 | /* go ahead even if the pmd is pmd_trans_splitting() */ | ||
523 | if (pmdp_clear_flush_young_notify(vma, address, pmd)) | ||
525 | referenced++; | 524 | referenced++; |
526 | spin_unlock(&mm->page_table_lock); | 525 | spin_unlock(&mm->page_table_lock); |
527 | } else { | 526 | } else { |
528 | pte_t *pte; | 527 | pte_t *pte; |
529 | spinlock_t *ptl; | 528 | spinlock_t *ptl; |
530 | 529 | ||
530 | /* | ||
531 | * rmap might return false positives; we must filter | ||
532 | * these out using page_check_address(). | ||
533 | */ | ||
531 | pte = page_check_address(page, mm, address, &ptl, 0); | 534 | pte = page_check_address(page, mm, address, &ptl, 0); |
532 | if (!pte) | 535 | if (!pte) |
533 | goto out; | 536 | goto out; |
534 | 537 | ||
538 | if (vma->vm_flags & VM_LOCKED) { | ||
539 | pte_unmap_unlock(pte, ptl); | ||
540 | *mapcount = 0; /* break early from loop */ | ||
541 | *vm_flags |= VM_LOCKED; | ||
542 | goto out; | ||
543 | } | ||
544 | |||
535 | if (ptep_clear_flush_young_notify(vma, address, pte)) { | 545 | if (ptep_clear_flush_young_notify(vma, address, pte)) { |
536 | /* | 546 | /* |
537 | * Don't treat a reference through a sequentially read | 547 | * Don't treat a reference through a sequentially read |
@@ -546,6 +556,12 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma, | |||
546 | pte_unmap_unlock(pte, ptl); | 556 | pte_unmap_unlock(pte, ptl); |
547 | } | 557 | } |
548 | 558 | ||
559 | /* Pretend the page is referenced if the task has the | ||
560 | swap token and is in the middle of a page fault. */ | ||
561 | if (mm != current->mm && has_swap_token(mm) && | ||
562 | rwsem_is_locked(&mm->mmap_sem)) | ||
563 | referenced++; | ||
564 | |||
549 | (*mapcount)--; | 565 | (*mapcount)--; |
550 | 566 | ||
551 | if (referenced) | 567 | if (referenced) |
diff --git a/mm/swapfile.c b/mm/swapfile.c index 07a458d72fa8..0341c5700e34 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -1940,7 +1940,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) | |||
1940 | 1940 | ||
1941 | error = -EINVAL; | 1941 | error = -EINVAL; |
1942 | if (S_ISBLK(inode->i_mode)) { | 1942 | if (S_ISBLK(inode->i_mode)) { |
1943 | bdev = I_BDEV(inode); | 1943 | bdev = bdgrab(I_BDEV(inode)); |
1944 | error = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, | 1944 | error = blkdev_get(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL, |
1945 | sys_swapon); | 1945 | sys_swapon); |
1946 | if (error < 0) { | 1946 | if (error < 0) { |
diff --git a/mm/truncate.c b/mm/truncate.c index 49feb46e77b8..d64296be00d3 100644 --- a/mm/truncate.c +++ b/mm/truncate.c | |||
@@ -225,6 +225,7 @@ void truncate_inode_pages_range(struct address_space *mapping, | |||
225 | next = start; | 225 | next = start; |
226 | while (next <= end && | 226 | while (next <= end && |
227 | pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { | 227 | pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) { |
228 | mem_cgroup_uncharge_start(); | ||
228 | for (i = 0; i < pagevec_count(&pvec); i++) { | 229 | for (i = 0; i < pagevec_count(&pvec); i++) { |
229 | struct page *page = pvec.pages[i]; | 230 | struct page *page = pvec.pages[i]; |
230 | pgoff_t page_index = page->index; | 231 | pgoff_t page_index = page->index; |
@@ -247,6 +248,7 @@ void truncate_inode_pages_range(struct address_space *mapping, | |||
247 | unlock_page(page); | 248 | unlock_page(page); |
248 | } | 249 | } |
249 | pagevec_release(&pvec); | 250 | pagevec_release(&pvec); |
251 | mem_cgroup_uncharge_end(); | ||
250 | cond_resched(); | 252 | cond_resched(); |
251 | } | 253 | } |
252 | 254 | ||
diff --git a/mm/vmscan.c b/mm/vmscan.c index 17497d0cd8b9..6771ea70bfe7 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1841,16 +1841,28 @@ static inline bool should_continue_reclaim(struct zone *zone, | |||
1841 | if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION)) | 1841 | if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION)) |
1842 | return false; | 1842 | return false; |
1843 | 1843 | ||
1844 | /* | 1844 | /* Consider stopping depending on scan and reclaim activity */ |
1845 | * If we failed to reclaim and have scanned the full list, stop. | 1845 | if (sc->gfp_mask & __GFP_REPEAT) { |
1846 | * NOTE: Checking just nr_reclaimed would exit reclaim/compaction far | 1846 | /* |
1847 | * faster but obviously would be less likely to succeed | 1847 | * For __GFP_REPEAT allocations, stop reclaiming if the |
1848 | * allocation. If this is desirable, use GFP_REPEAT to decide | 1848 | * full LRU list has been scanned and we are still failing |
1849 | * if both reclaimed and scanned should be checked or just | 1849 | * to reclaim pages. This full LRU scan is potentially |
1850 | * reclaimed | 1850 | * expensive but a __GFP_REPEAT caller really wants to succeed |
1851 | */ | 1851 | */ |
1852 | if (!nr_reclaimed && !nr_scanned) | 1852 | if (!nr_reclaimed && !nr_scanned) |
1853 | return false; | 1853 | return false; |
1854 | } else { | ||
1855 | /* | ||
1856 | * For non-__GFP_REPEAT allocations which can presumably | ||
1857 | * fail without consequence, stop if we failed to reclaim | ||
1858 | * any pages from the last SWAP_CLUSTER_MAX number of | ||
1859 | * pages that were scanned. This will return to the | ||
1860 | * caller faster at the risk reclaim/compaction and | ||
1861 | * the resulting allocation attempt fails | ||
1862 | */ | ||
1863 | if (!nr_reclaimed) | ||
1864 | return false; | ||
1865 | } | ||
1854 | 1866 | ||
1855 | /* | 1867 | /* |
1856 | * If we have not reclaimed enough pages for compaction and the | 1868 | * If we have not reclaimed enough pages for compaction and the |
diff --git a/net/Makefile b/net/Makefile index a3330ebe2c53..a51d9465e628 100644 --- a/net/Makefile +++ b/net/Makefile | |||
@@ -19,9 +19,7 @@ obj-$(CONFIG_NETFILTER) += netfilter/ | |||
19 | obj-$(CONFIG_INET) += ipv4/ | 19 | obj-$(CONFIG_INET) += ipv4/ |
20 | obj-$(CONFIG_XFRM) += xfrm/ | 20 | obj-$(CONFIG_XFRM) += xfrm/ |
21 | obj-$(CONFIG_UNIX) += unix/ | 21 | obj-$(CONFIG_UNIX) += unix/ |
22 | ifneq ($(CONFIG_IPV6),) | 22 | obj-$(CONFIG_NET) += ipv6/ |
23 | obj-y += ipv6/ | ||
24 | endif | ||
25 | obj-$(CONFIG_PACKET) += packet/ | 23 | obj-$(CONFIG_PACKET) += packet/ |
26 | obj-$(CONFIG_NET_KEY) += key/ | 24 | obj-$(CONFIG_NET_KEY) += key/ |
27 | obj-$(CONFIG_BRIDGE) += bridge/ | 25 | obj-$(CONFIG_BRIDGE) += bridge/ |
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index 2575c2db6404..d7b9af4703d0 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c | |||
@@ -727,7 +727,9 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp) | |||
727 | break; | 727 | break; |
728 | } | 728 | } |
729 | 729 | ||
730 | tty_unlock(); | ||
730 | schedule(); | 731 | schedule(); |
732 | tty_lock(); | ||
731 | } | 733 | } |
732 | set_current_state(TASK_RUNNING); | 734 | set_current_state(TASK_RUNNING); |
733 | remove_wait_queue(&dev->wait, &wait); | 735 | remove_wait_queue(&dev->wait, &wait); |
diff --git a/net/bridge/Kconfig b/net/bridge/Kconfig index 9190ae462cb4..6dee7bf648a9 100644 --- a/net/bridge/Kconfig +++ b/net/bridge/Kconfig | |||
@@ -6,6 +6,7 @@ config BRIDGE | |||
6 | tristate "802.1d Ethernet Bridging" | 6 | tristate "802.1d Ethernet Bridging" |
7 | select LLC | 7 | select LLC |
8 | select STP | 8 | select STP |
9 | depends on IPV6 || IPV6=n | ||
9 | ---help--- | 10 | ---help--- |
10 | If you say Y here, then your Linux box will be able to act as an | 11 | If you say Y here, then your Linux box will be able to act as an |
11 | Ethernet bridge, which means that the different Ethernet segments it | 12 | Ethernet bridge, which means that the different Ethernet segments it |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 09d5c0987925..030a002ff8ee 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -37,10 +37,9 @@ | |||
37 | rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock)) | 37 | rcu_dereference_protected(X, lockdep_is_held(&br->multicast_lock)) |
38 | 38 | ||
39 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | 39 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) |
40 | static inline int ipv6_is_local_multicast(const struct in6_addr *addr) | 40 | static inline int ipv6_is_transient_multicast(const struct in6_addr *addr) |
41 | { | 41 | { |
42 | if (ipv6_addr_is_multicast(addr) && | 42 | if (ipv6_addr_is_multicast(addr) && IPV6_ADDR_MC_FLAG_TRANSIENT(addr)) |
43 | IPV6_ADDR_MC_SCOPE(addr) <= IPV6_ADDR_SCOPE_LINKLOCAL) | ||
44 | return 1; | 43 | return 1; |
45 | return 0; | 44 | return 0; |
46 | } | 45 | } |
@@ -435,7 +434,6 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, | |||
435 | eth = eth_hdr(skb); | 434 | eth = eth_hdr(skb); |
436 | 435 | ||
437 | memcpy(eth->h_source, br->dev->dev_addr, 6); | 436 | memcpy(eth->h_source, br->dev->dev_addr, 6); |
438 | ipv6_eth_mc_map(group, eth->h_dest); | ||
439 | eth->h_proto = htons(ETH_P_IPV6); | 437 | eth->h_proto = htons(ETH_P_IPV6); |
440 | skb_put(skb, sizeof(*eth)); | 438 | skb_put(skb, sizeof(*eth)); |
441 | 439 | ||
@@ -447,8 +445,10 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, | |||
447 | ip6h->payload_len = htons(8 + sizeof(*mldq)); | 445 | ip6h->payload_len = htons(8 + sizeof(*mldq)); |
448 | ip6h->nexthdr = IPPROTO_HOPOPTS; | 446 | ip6h->nexthdr = IPPROTO_HOPOPTS; |
449 | ip6h->hop_limit = 1; | 447 | ip6h->hop_limit = 1; |
450 | ipv6_addr_set(&ip6h->saddr, 0, 0, 0, 0); | 448 | ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0, |
449 | &ip6h->saddr); | ||
451 | ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); | 450 | ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1)); |
451 | ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); | ||
452 | 452 | ||
453 | hopopt = (u8 *)(ip6h + 1); | 453 | hopopt = (u8 *)(ip6h + 1); |
454 | hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ | 454 | hopopt[0] = IPPROTO_ICMPV6; /* next hdr */ |
@@ -780,11 +780,11 @@ static int br_ip6_multicast_add_group(struct net_bridge *br, | |||
780 | { | 780 | { |
781 | struct br_ip br_group; | 781 | struct br_ip br_group; |
782 | 782 | ||
783 | if (ipv6_is_local_multicast(group)) | 783 | if (!ipv6_is_transient_multicast(group)) |
784 | return 0; | 784 | return 0; |
785 | 785 | ||
786 | ipv6_addr_copy(&br_group.u.ip6, group); | 786 | ipv6_addr_copy(&br_group.u.ip6, group); |
787 | br_group.proto = htons(ETH_P_IP); | 787 | br_group.proto = htons(ETH_P_IPV6); |
788 | 788 | ||
789 | return br_multicast_add_group(br, port, &br_group); | 789 | return br_multicast_add_group(br, port, &br_group); |
790 | } | 790 | } |
@@ -1013,18 +1013,19 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br, | |||
1013 | 1013 | ||
1014 | nsrcs = skb_header_pointer(skb, | 1014 | nsrcs = skb_header_pointer(skb, |
1015 | len + offsetof(struct mld2_grec, | 1015 | len + offsetof(struct mld2_grec, |
1016 | grec_mca), | 1016 | grec_nsrcs), |
1017 | sizeof(_nsrcs), &_nsrcs); | 1017 | sizeof(_nsrcs), &_nsrcs); |
1018 | if (!nsrcs) | 1018 | if (!nsrcs) |
1019 | return -EINVAL; | 1019 | return -EINVAL; |
1020 | 1020 | ||
1021 | if (!pskb_may_pull(skb, | 1021 | if (!pskb_may_pull(skb, |
1022 | len + sizeof(*grec) + | 1022 | len + sizeof(*grec) + |
1023 | sizeof(struct in6_addr) * (*nsrcs))) | 1023 | sizeof(struct in6_addr) * ntohs(*nsrcs))) |
1024 | return -EINVAL; | 1024 | return -EINVAL; |
1025 | 1025 | ||
1026 | grec = (struct mld2_grec *)(skb->data + len); | 1026 | grec = (struct mld2_grec *)(skb->data + len); |
1027 | len += sizeof(*grec) + sizeof(struct in6_addr) * (*nsrcs); | 1027 | len += sizeof(*grec) + |
1028 | sizeof(struct in6_addr) * ntohs(*nsrcs); | ||
1028 | 1029 | ||
1029 | /* We treat these as MLDv1 reports for now. */ | 1030 | /* We treat these as MLDv1 reports for now. */ |
1030 | switch (grec->grec_type) { | 1031 | switch (grec->grec_type) { |
@@ -1340,7 +1341,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br, | |||
1340 | { | 1341 | { |
1341 | struct br_ip br_group; | 1342 | struct br_ip br_group; |
1342 | 1343 | ||
1343 | if (ipv6_is_local_multicast(group)) | 1344 | if (!ipv6_is_transient_multicast(group)) |
1344 | return; | 1345 | return; |
1345 | 1346 | ||
1346 | ipv6_addr_copy(&br_group.u.ip6, group); | 1347 | ipv6_addr_copy(&br_group.u.ip6, group); |
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 35b36b86d762..05f357828a2f 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c | |||
@@ -336,7 +336,6 @@ static void reset_connection(struct ceph_connection *con) | |||
336 | ceph_msg_put(con->out_msg); | 336 | ceph_msg_put(con->out_msg); |
337 | con->out_msg = NULL; | 337 | con->out_msg = NULL; |
338 | } | 338 | } |
339 | con->out_keepalive_pending = false; | ||
340 | con->in_seq = 0; | 339 | con->in_seq = 0; |
341 | con->in_seq_acked = 0; | 340 | con->in_seq_acked = 0; |
342 | } | 341 | } |
@@ -1248,8 +1247,6 @@ static int process_connect(struct ceph_connection *con) | |||
1248 | con->auth_retry); | 1247 | con->auth_retry); |
1249 | if (con->auth_retry == 2) { | 1248 | if (con->auth_retry == 2) { |
1250 | con->error_msg = "connect authorization failure"; | 1249 | con->error_msg = "connect authorization failure"; |
1251 | reset_connection(con); | ||
1252 | set_bit(CLOSED, &con->state); | ||
1253 | return -1; | 1250 | return -1; |
1254 | } | 1251 | } |
1255 | con->auth_retry = 1; | 1252 | con->auth_retry = 1; |
@@ -1715,14 +1712,6 @@ more: | |||
1715 | 1712 | ||
1716 | /* open the socket first? */ | 1713 | /* open the socket first? */ |
1717 | if (con->sock == NULL) { | 1714 | if (con->sock == NULL) { |
1718 | /* | ||
1719 | * if we were STANDBY and are reconnecting _this_ | ||
1720 | * connection, bump connect_seq now. Always bump | ||
1721 | * global_seq. | ||
1722 | */ | ||
1723 | if (test_and_clear_bit(STANDBY, &con->state)) | ||
1724 | con->connect_seq++; | ||
1725 | |||
1726 | prepare_write_banner(msgr, con); | 1715 | prepare_write_banner(msgr, con); |
1727 | prepare_write_connect(msgr, con, 1); | 1716 | prepare_write_connect(msgr, con, 1); |
1728 | prepare_read_banner(con); | 1717 | prepare_read_banner(con); |
@@ -1951,7 +1940,24 @@ static void con_work(struct work_struct *work) | |||
1951 | work.work); | 1940 | work.work); |
1952 | 1941 | ||
1953 | mutex_lock(&con->mutex); | 1942 | mutex_lock(&con->mutex); |
1943 | if (test_and_clear_bit(BACKOFF, &con->state)) { | ||
1944 | dout("con_work %p backing off\n", con); | ||
1945 | if (queue_delayed_work(ceph_msgr_wq, &con->work, | ||
1946 | round_jiffies_relative(con->delay))) { | ||
1947 | dout("con_work %p backoff %lu\n", con, con->delay); | ||
1948 | mutex_unlock(&con->mutex); | ||
1949 | return; | ||
1950 | } else { | ||
1951 | con->ops->put(con); | ||
1952 | dout("con_work %p FAILED to back off %lu\n", con, | ||
1953 | con->delay); | ||
1954 | } | ||
1955 | } | ||
1954 | 1956 | ||
1957 | if (test_bit(STANDBY, &con->state)) { | ||
1958 | dout("con_work %p STANDBY\n", con); | ||
1959 | goto done; | ||
1960 | } | ||
1955 | if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */ | 1961 | if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */ |
1956 | dout("con_work CLOSED\n"); | 1962 | dout("con_work CLOSED\n"); |
1957 | con_close_socket(con); | 1963 | con_close_socket(con); |
@@ -2008,10 +2014,12 @@ static void ceph_fault(struct ceph_connection *con) | |||
2008 | /* Requeue anything that hasn't been acked */ | 2014 | /* Requeue anything that hasn't been acked */ |
2009 | list_splice_init(&con->out_sent, &con->out_queue); | 2015 | list_splice_init(&con->out_sent, &con->out_queue); |
2010 | 2016 | ||
2011 | /* If there are no messages in the queue, place the connection | 2017 | /* If there are no messages queued or keepalive pending, place |
2012 | * in a STANDBY state (i.e., don't try to reconnect just yet). */ | 2018 | * the connection in a STANDBY state */ |
2013 | if (list_empty(&con->out_queue) && !con->out_keepalive_pending) { | 2019 | if (list_empty(&con->out_queue) && |
2014 | dout("fault setting STANDBY\n"); | 2020 | !test_bit(KEEPALIVE_PENDING, &con->state)) { |
2021 | dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con); | ||
2022 | clear_bit(WRITE_PENDING, &con->state); | ||
2015 | set_bit(STANDBY, &con->state); | 2023 | set_bit(STANDBY, &con->state); |
2016 | } else { | 2024 | } else { |
2017 | /* retry after a delay. */ | 2025 | /* retry after a delay. */ |
@@ -2019,11 +2027,24 @@ static void ceph_fault(struct ceph_connection *con) | |||
2019 | con->delay = BASE_DELAY_INTERVAL; | 2027 | con->delay = BASE_DELAY_INTERVAL; |
2020 | else if (con->delay < MAX_DELAY_INTERVAL) | 2028 | else if (con->delay < MAX_DELAY_INTERVAL) |
2021 | con->delay *= 2; | 2029 | con->delay *= 2; |
2022 | dout("fault queueing %p delay %lu\n", con, con->delay); | ||
2023 | con->ops->get(con); | 2030 | con->ops->get(con); |
2024 | if (queue_delayed_work(ceph_msgr_wq, &con->work, | 2031 | if (queue_delayed_work(ceph_msgr_wq, &con->work, |
2025 | round_jiffies_relative(con->delay)) == 0) | 2032 | round_jiffies_relative(con->delay))) { |
2033 | dout("fault queued %p delay %lu\n", con, con->delay); | ||
2034 | } else { | ||
2026 | con->ops->put(con); | 2035 | con->ops->put(con); |
2036 | dout("fault failed to queue %p delay %lu, backoff\n", | ||
2037 | con, con->delay); | ||
2038 | /* | ||
2039 | * In many cases we see a socket state change | ||
2040 | * while con_work is running and end up | ||
2041 | * queuing (non-delayed) work, such that we | ||
2042 | * can't backoff with a delay. Set a flag so | ||
2043 | * that when con_work restarts we schedule the | ||
2044 | * delay then. | ||
2045 | */ | ||
2046 | set_bit(BACKOFF, &con->state); | ||
2047 | } | ||
2027 | } | 2048 | } |
2028 | 2049 | ||
2029 | out_unlock: | 2050 | out_unlock: |
@@ -2094,6 +2115,19 @@ void ceph_messenger_destroy(struct ceph_messenger *msgr) | |||
2094 | } | 2115 | } |
2095 | EXPORT_SYMBOL(ceph_messenger_destroy); | 2116 | EXPORT_SYMBOL(ceph_messenger_destroy); |
2096 | 2117 | ||
2118 | static void clear_standby(struct ceph_connection *con) | ||
2119 | { | ||
2120 | /* come back from STANDBY? */ | ||
2121 | if (test_and_clear_bit(STANDBY, &con->state)) { | ||
2122 | mutex_lock(&con->mutex); | ||
2123 | dout("clear_standby %p and ++connect_seq\n", con); | ||
2124 | con->connect_seq++; | ||
2125 | WARN_ON(test_bit(WRITE_PENDING, &con->state)); | ||
2126 | WARN_ON(test_bit(KEEPALIVE_PENDING, &con->state)); | ||
2127 | mutex_unlock(&con->mutex); | ||
2128 | } | ||
2129 | } | ||
2130 | |||
2097 | /* | 2131 | /* |
2098 | * Queue up an outgoing message on the given connection. | 2132 | * Queue up an outgoing message on the given connection. |
2099 | */ | 2133 | */ |
@@ -2126,6 +2160,7 @@ void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg) | |||
2126 | 2160 | ||
2127 | /* if there wasn't anything waiting to send before, queue | 2161 | /* if there wasn't anything waiting to send before, queue |
2128 | * new work */ | 2162 | * new work */ |
2163 | clear_standby(con); | ||
2129 | if (test_and_set_bit(WRITE_PENDING, &con->state) == 0) | 2164 | if (test_and_set_bit(WRITE_PENDING, &con->state) == 0) |
2130 | queue_con(con); | 2165 | queue_con(con); |
2131 | } | 2166 | } |
@@ -2191,6 +2226,8 @@ void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg) | |||
2191 | */ | 2226 | */ |
2192 | void ceph_con_keepalive(struct ceph_connection *con) | 2227 | void ceph_con_keepalive(struct ceph_connection *con) |
2193 | { | 2228 | { |
2229 | dout("con_keepalive %p\n", con); | ||
2230 | clear_standby(con); | ||
2194 | if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 && | 2231 | if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 && |
2195 | test_and_set_bit(WRITE_PENDING, &con->state) == 0) | 2232 | test_and_set_bit(WRITE_PENDING, &con->state) == 0) |
2196 | queue_con(con); | 2233 | queue_con(con); |
diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c index 1a040e64c69f..cd9c21df87d1 100644 --- a/net/ceph/pagevec.c +++ b/net/ceph/pagevec.c | |||
@@ -16,22 +16,30 @@ struct page **ceph_get_direct_page_vector(const char __user *data, | |||
16 | int num_pages, bool write_page) | 16 | int num_pages, bool write_page) |
17 | { | 17 | { |
18 | struct page **pages; | 18 | struct page **pages; |
19 | int rc; | 19 | int got = 0; |
20 | int rc = 0; | ||
20 | 21 | ||
21 | pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS); | 22 | pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS); |
22 | if (!pages) | 23 | if (!pages) |
23 | return ERR_PTR(-ENOMEM); | 24 | return ERR_PTR(-ENOMEM); |
24 | 25 | ||
25 | down_read(¤t->mm->mmap_sem); | 26 | down_read(¤t->mm->mmap_sem); |
26 | rc = get_user_pages(current, current->mm, (unsigned long)data, | 27 | while (got < num_pages) { |
27 | num_pages, write_page, 0, pages, NULL); | 28 | rc = get_user_pages(current, current->mm, |
29 | (unsigned long)data + ((unsigned long)got * PAGE_SIZE), | ||
30 | num_pages - got, write_page, 0, pages + got, NULL); | ||
31 | if (rc < 0) | ||
32 | break; | ||
33 | BUG_ON(rc == 0); | ||
34 | got += rc; | ||
35 | } | ||
28 | up_read(¤t->mm->mmap_sem); | 36 | up_read(¤t->mm->mmap_sem); |
29 | if (rc < num_pages) | 37 | if (rc < 0) |
30 | goto fail; | 38 | goto fail; |
31 | return pages; | 39 | return pages; |
32 | 40 | ||
33 | fail: | 41 | fail: |
34 | ceph_put_page_vector(pages, rc > 0 ? rc : 0, false); | 42 | ceph_put_page_vector(pages, got, false); |
35 | return ERR_PTR(rc); | 43 | return ERR_PTR(rc); |
36 | } | 44 | } |
37 | EXPORT_SYMBOL(ceph_get_direct_page_vector); | 45 | EXPORT_SYMBOL(ceph_get_direct_page_vector); |
diff --git a/net/core/dev.c b/net/core/dev.c index 8ae6631abcc2..6561021d22d1 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1114,13 +1114,21 @@ EXPORT_SYMBOL(netdev_bonding_change); | |||
1114 | void dev_load(struct net *net, const char *name) | 1114 | void dev_load(struct net *net, const char *name) |
1115 | { | 1115 | { |
1116 | struct net_device *dev; | 1116 | struct net_device *dev; |
1117 | int no_module; | ||
1117 | 1118 | ||
1118 | rcu_read_lock(); | 1119 | rcu_read_lock(); |
1119 | dev = dev_get_by_name_rcu(net, name); | 1120 | dev = dev_get_by_name_rcu(net, name); |
1120 | rcu_read_unlock(); | 1121 | rcu_read_unlock(); |
1121 | 1122 | ||
1122 | if (!dev && capable(CAP_NET_ADMIN)) | 1123 | no_module = !dev; |
1123 | request_module("%s", name); | 1124 | if (no_module && capable(CAP_NET_ADMIN)) |
1125 | no_module = request_module("netdev-%s", name); | ||
1126 | if (no_module && capable(CAP_SYS_MODULE)) { | ||
1127 | if (!request_module("%s", name)) | ||
1128 | pr_err("Loading kernel module for a network device " | ||
1129 | "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s " | ||
1130 | "instead\n", name); | ||
1131 | } | ||
1124 | } | 1132 | } |
1125 | EXPORT_SYMBOL(dev_load); | 1133 | EXPORT_SYMBOL(dev_load); |
1126 | 1134 | ||
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index 508f9c18992f..133fd22ea287 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c | |||
@@ -144,7 +144,7 @@ void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list, | |||
144 | 144 | ||
145 | list_for_each_entry(ha, &from_list->list, list) { | 145 | list_for_each_entry(ha, &from_list->list, list) { |
146 | type = addr_type ? addr_type : ha->type; | 146 | type = addr_type ? addr_type : ha->type; |
147 | __hw_addr_del(to_list, ha->addr, addr_len, addr_type); | 147 | __hw_addr_del(to_list, ha->addr, addr_len, type); |
148 | } | 148 | } |
149 | } | 149 | } |
150 | EXPORT_SYMBOL(__hw_addr_del_multiple); | 150 | EXPORT_SYMBOL(__hw_addr_del_multiple); |
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index a9e7fc4c461f..b5bada92f637 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -3321,7 +3321,7 @@ static void show_results(struct pktgen_dev *pkt_dev, int nr_frags) | |||
3321 | pkt_dev->started_at); | 3321 | pkt_dev->started_at); |
3322 | ktime_t idle = ns_to_ktime(pkt_dev->idle_acc); | 3322 | ktime_t idle = ns_to_ktime(pkt_dev->idle_acc); |
3323 | 3323 | ||
3324 | p += sprintf(p, "OK: %llu(c%llu+d%llu) nsec, %llu (%dbyte,%dfrags)\n", | 3324 | p += sprintf(p, "OK: %llu(c%llu+d%llu) usec, %llu (%dbyte,%dfrags)\n", |
3325 | (unsigned long long)ktime_to_us(elapsed), | 3325 | (unsigned long long)ktime_to_us(elapsed), |
3326 | (unsigned long long)ktime_to_us(ktime_sub(elapsed, idle)), | 3326 | (unsigned long long)ktime_to_us(ktime_sub(elapsed, idle)), |
3327 | (unsigned long long)ktime_to_us(idle), | 3327 | (unsigned long long)ktime_to_us(idle), |
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index d5074a567289..c44348adba3b 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c | |||
@@ -1193,7 +1193,7 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb, | |||
1193 | goto err; | 1193 | goto err; |
1194 | } | 1194 | } |
1195 | 1195 | ||
1196 | if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setets) { | 1196 | if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) { |
1197 | struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]); | 1197 | struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]); |
1198 | err = ops->ieee_setpfc(netdev, pfc); | 1198 | err = ops->ieee_setpfc(netdev, pfc); |
1199 | if (err) | 1199 | if (err) |
diff --git a/net/dccp/input.c b/net/dccp/input.c index 8cde009e8b85..4222e7a654b0 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c | |||
@@ -614,6 +614,9 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
614 | /* Caller (dccp_v4_do_rcv) will send Reset */ | 614 | /* Caller (dccp_v4_do_rcv) will send Reset */ |
615 | dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; | 615 | dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; |
616 | return 1; | 616 | return 1; |
617 | } else if (sk->sk_state == DCCP_CLOSED) { | ||
618 | dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; | ||
619 | return 1; | ||
617 | } | 620 | } |
618 | 621 | ||
619 | if (sk->sk_state != DCCP_REQUESTING && sk->sk_state != DCCP_RESPOND) { | 622 | if (sk->sk_state != DCCP_REQUESTING && sk->sk_state != DCCP_RESPOND) { |
@@ -668,10 +671,6 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
668 | } | 671 | } |
669 | 672 | ||
670 | switch (sk->sk_state) { | 673 | switch (sk->sk_state) { |
671 | case DCCP_CLOSED: | ||
672 | dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; | ||
673 | return 1; | ||
674 | |||
675 | case DCCP_REQUESTING: | 674 | case DCCP_REQUESTING: |
676 | queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len); | 675 | queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len); |
677 | if (queued >= 0) | 676 | if (queued >= 0) |
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c index 739435a6af39..cfa7a5e1c5c9 100644 --- a/net/dns_resolver/dns_key.c +++ b/net/dns_resolver/dns_key.c | |||
@@ -67,8 +67,9 @@ dns_resolver_instantiate(struct key *key, const void *_data, size_t datalen) | |||
67 | size_t result_len = 0; | 67 | size_t result_len = 0; |
68 | const char *data = _data, *end, *opt; | 68 | const char *data = _data, *end, *opt; |
69 | 69 | ||
70 | kenter("%%%d,%s,'%s',%zu", | 70 | kenter("%%%d,%s,'%*.*s',%zu", |
71 | key->serial, key->description, data, datalen); | 71 | key->serial, key->description, |
72 | (int)datalen, (int)datalen, data, datalen); | ||
72 | 73 | ||
73 | if (datalen <= 1 || !data || data[datalen - 1] != '\0') | 74 | if (datalen <= 1 || !data || data[datalen - 1] != '\0') |
74 | return -EINVAL; | 75 | return -EINVAL; |
@@ -217,6 +218,19 @@ static void dns_resolver_describe(const struct key *key, struct seq_file *m) | |||
217 | seq_printf(m, ": %u", key->datalen); | 218 | seq_printf(m, ": %u", key->datalen); |
218 | } | 219 | } |
219 | 220 | ||
221 | /* | ||
222 | * read the DNS data | ||
223 | * - the key's semaphore is read-locked | ||
224 | */ | ||
225 | static long dns_resolver_read(const struct key *key, | ||
226 | char __user *buffer, size_t buflen) | ||
227 | { | ||
228 | if (key->type_data.x[0]) | ||
229 | return key->type_data.x[0]; | ||
230 | |||
231 | return user_read(key, buffer, buflen); | ||
232 | } | ||
233 | |||
220 | struct key_type key_type_dns_resolver = { | 234 | struct key_type key_type_dns_resolver = { |
221 | .name = "dns_resolver", | 235 | .name = "dns_resolver", |
222 | .instantiate = dns_resolver_instantiate, | 236 | .instantiate = dns_resolver_instantiate, |
@@ -224,7 +238,7 @@ struct key_type key_type_dns_resolver = { | |||
224 | .revoke = user_revoke, | 238 | .revoke = user_revoke, |
225 | .destroy = user_destroy, | 239 | .destroy = user_destroy, |
226 | .describe = dns_resolver_describe, | 240 | .describe = dns_resolver_describe, |
227 | .read = user_read, | 241 | .read = dns_resolver_read, |
228 | }; | 242 | }; |
229 | 243 | ||
230 | static int __init init_dns_resolver(void) | 244 | static int __init init_dns_resolver(void) |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index df4616fce929..036652c8166d 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -670,7 +670,7 @@ int devinet_ioctl(struct net *net, unsigned int cmd, void __user *arg) | |||
670 | ifap = &ifa->ifa_next) { | 670 | ifap = &ifa->ifa_next) { |
671 | if (!strcmp(ifr.ifr_name, ifa->ifa_label) && | 671 | if (!strcmp(ifr.ifr_name, ifa->ifa_label) && |
672 | sin_orig.sin_addr.s_addr == | 672 | sin_orig.sin_addr.s_addr == |
673 | ifa->ifa_address) { | 673 | ifa->ifa_local) { |
674 | break; /* found */ | 674 | break; /* found */ |
675 | } | 675 | } |
676 | } | 676 | } |
@@ -1040,8 +1040,8 @@ static void inetdev_send_gratuitous_arp(struct net_device *dev, | |||
1040 | return; | 1040 | return; |
1041 | 1041 | ||
1042 | arp_send(ARPOP_REQUEST, ETH_P_ARP, | 1042 | arp_send(ARPOP_REQUEST, ETH_P_ARP, |
1043 | ifa->ifa_address, dev, | 1043 | ifa->ifa_local, dev, |
1044 | ifa->ifa_address, NULL, | 1044 | ifa->ifa_local, NULL, |
1045 | dev->dev_addr, NULL); | 1045 | dev->dev_addr, NULL); |
1046 | } | 1046 | } |
1047 | 1047 | ||
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index c5af909cf701..3c8dfa16614d 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c | |||
@@ -505,7 +505,9 @@ restart: | |||
505 | } | 505 | } |
506 | 506 | ||
507 | rcu_read_unlock(); | 507 | rcu_read_unlock(); |
508 | local_bh_disable(); | ||
508 | inet_twsk_deschedule(tw, twdr); | 509 | inet_twsk_deschedule(tw, twdr); |
510 | local_bh_enable(); | ||
509 | inet_twsk_put(tw); | 511 | inet_twsk_put(tw); |
510 | goto restart_rcu; | 512 | goto restart_rcu; |
511 | } | 513 | } |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index 6613edfac28c..d1d0e2c256fc 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -1765,4 +1765,4 @@ module_exit(ipgre_fini); | |||
1765 | MODULE_LICENSE("GPL"); | 1765 | MODULE_LICENSE("GPL"); |
1766 | MODULE_ALIAS_RTNL_LINK("gre"); | 1766 | MODULE_ALIAS_RTNL_LINK("gre"); |
1767 | MODULE_ALIAS_RTNL_LINK("gretap"); | 1767 | MODULE_ALIAS_RTNL_LINK("gretap"); |
1768 | MODULE_ALIAS("gre0"); | 1768 | MODULE_ALIAS_NETDEV("gre0"); |
diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 988f52fba54a..a5f58e7cbb26 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c | |||
@@ -913,4 +913,4 @@ static void __exit ipip_fini(void) | |||
913 | module_init(ipip_init); | 913 | module_init(ipip_init); |
914 | module_exit(ipip_fini); | 914 | module_exit(ipip_fini); |
915 | MODULE_LICENSE("GPL"); | 915 | MODULE_LICENSE("GPL"); |
916 | MODULE_ALIAS("tunl0"); | 916 | MODULE_ALIAS_NETDEV("tunl0"); |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index eb7f82ebf4a3..65f6c0406245 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -1222,7 +1222,7 @@ static int tcp_check_dsack(struct sock *sk, struct sk_buff *ack_skb, | |||
1222 | } | 1222 | } |
1223 | 1223 | ||
1224 | /* D-SACK for already forgotten data... Do dumb counting. */ | 1224 | /* D-SACK for already forgotten data... Do dumb counting. */ |
1225 | if (dup_sack && | 1225 | if (dup_sack && tp->undo_marker && tp->undo_retrans && |
1226 | !after(end_seq_0, prior_snd_una) && | 1226 | !after(end_seq_0, prior_snd_una) && |
1227 | after(end_seq_0, tp->undo_marker)) | 1227 | after(end_seq_0, tp->undo_marker)) |
1228 | tp->undo_retrans--; | 1228 | tp->undo_retrans--; |
@@ -1299,7 +1299,8 @@ static u8 tcp_sacktag_one(struct sk_buff *skb, struct sock *sk, | |||
1299 | 1299 | ||
1300 | /* Account D-SACK for retransmitted packet. */ | 1300 | /* Account D-SACK for retransmitted packet. */ |
1301 | if (dup_sack && (sacked & TCPCB_RETRANS)) { | 1301 | if (dup_sack && (sacked & TCPCB_RETRANS)) { |
1302 | if (after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker)) | 1302 | if (tp->undo_marker && tp->undo_retrans && |
1303 | after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker)) | ||
1303 | tp->undo_retrans--; | 1304 | tp->undo_retrans--; |
1304 | if (sacked & TCPCB_SACKED_ACKED) | 1305 | if (sacked & TCPCB_SACKED_ACKED) |
1305 | state->reord = min(fack_count, state->reord); | 1306 | state->reord = min(fack_count, state->reord); |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 406f320336e6..dfa5beb0c1c8 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -2162,7 +2162,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | |||
2162 | if (!tp->retrans_stamp) | 2162 | if (!tp->retrans_stamp) |
2163 | tp->retrans_stamp = TCP_SKB_CB(skb)->when; | 2163 | tp->retrans_stamp = TCP_SKB_CB(skb)->when; |
2164 | 2164 | ||
2165 | tp->undo_retrans++; | 2165 | tp->undo_retrans += tcp_skb_pcount(skb); |
2166 | 2166 | ||
2167 | /* snd_nxt is stored to detect loss of retransmitted segment, | 2167 | /* snd_nxt is stored to detect loss of retransmitted segment, |
2168 | * see tcp_input.c tcp_sacktag_write_queue(). | 2168 | * see tcp_input.c tcp_sacktag_write_queue(). |
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 4f4483e697bd..e528a42a52be 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c | |||
@@ -57,6 +57,7 @@ | |||
57 | MODULE_AUTHOR("Ville Nuorvala"); | 57 | MODULE_AUTHOR("Ville Nuorvala"); |
58 | MODULE_DESCRIPTION("IPv6 tunneling device"); | 58 | MODULE_DESCRIPTION("IPv6 tunneling device"); |
59 | MODULE_LICENSE("GPL"); | 59 | MODULE_LICENSE("GPL"); |
60 | MODULE_ALIAS_NETDEV("ip6tnl0"); | ||
60 | 61 | ||
61 | #ifdef IP6_TNL_DEBUG | 62 | #ifdef IP6_TNL_DEBUG |
62 | #define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__) | 63 | #define IP6_TNL_TRACE(x...) printk(KERN_DEBUG "%s:" x "\n", __func__) |
diff --git a/net/ipv6/netfilter/ip6t_LOG.c b/net/ipv6/netfilter/ip6t_LOG.c index 09c88891a753..de338037a736 100644 --- a/net/ipv6/netfilter/ip6t_LOG.c +++ b/net/ipv6/netfilter/ip6t_LOG.c | |||
@@ -410,7 +410,7 @@ fallback: | |||
410 | if (p != NULL) { | 410 | if (p != NULL) { |
411 | sb_add(m, "%02x", *p++); | 411 | sb_add(m, "%02x", *p++); |
412 | for (i = 1; i < len; i++) | 412 | for (i = 1; i < len; i++) |
413 | sb_add(m, ":%02x", p[i]); | 413 | sb_add(m, ":%02x", *p++); |
414 | } | 414 | } |
415 | sb_add(m, " "); | 415 | sb_add(m, " "); |
416 | 416 | ||
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index a998db6e7895..e7db7014e89f 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -739,8 +739,10 @@ restart: | |||
739 | 739 | ||
740 | if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP)) | 740 | if (!rt->rt6i_nexthop && !(rt->rt6i_flags & RTF_NONEXTHOP)) |
741 | nrt = rt6_alloc_cow(rt, &fl->fl6_dst, &fl->fl6_src); | 741 | nrt = rt6_alloc_cow(rt, &fl->fl6_dst, &fl->fl6_src); |
742 | else | 742 | else if (!(rt->dst.flags & DST_HOST)) |
743 | nrt = rt6_alloc_clone(rt, &fl->fl6_dst); | 743 | nrt = rt6_alloc_clone(rt, &fl->fl6_dst); |
744 | else | ||
745 | goto out2; | ||
744 | 746 | ||
745 | dst_release(&rt->dst); | 747 | dst_release(&rt->dst); |
746 | rt = nrt ? : net->ipv6.ip6_null_entry; | 748 | rt = nrt ? : net->ipv6.ip6_null_entry; |
@@ -2557,14 +2559,16 @@ static | |||
2557 | int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write, | 2559 | int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write, |
2558 | void __user *buffer, size_t *lenp, loff_t *ppos) | 2560 | void __user *buffer, size_t *lenp, loff_t *ppos) |
2559 | { | 2561 | { |
2560 | struct net *net = current->nsproxy->net_ns; | 2562 | struct net *net; |
2561 | int delay = net->ipv6.sysctl.flush_delay; | 2563 | int delay; |
2562 | if (write) { | 2564 | if (!write) |
2563 | proc_dointvec(ctl, write, buffer, lenp, ppos); | ||
2564 | fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net); | ||
2565 | return 0; | ||
2566 | } else | ||
2567 | return -EINVAL; | 2565 | return -EINVAL; |
2566 | |||
2567 | net = (struct net *)ctl->extra1; | ||
2568 | delay = net->ipv6.sysctl.flush_delay; | ||
2569 | proc_dointvec(ctl, write, buffer, lenp, ppos); | ||
2570 | fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net); | ||
2571 | return 0; | ||
2568 | } | 2572 | } |
2569 | 2573 | ||
2570 | ctl_table ipv6_route_table_template[] = { | 2574 | ctl_table ipv6_route_table_template[] = { |
@@ -2651,6 +2655,7 @@ struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net) | |||
2651 | 2655 | ||
2652 | if (table) { | 2656 | if (table) { |
2653 | table[0].data = &net->ipv6.sysctl.flush_delay; | 2657 | table[0].data = &net->ipv6.sysctl.flush_delay; |
2658 | table[0].extra1 = net; | ||
2654 | table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh; | 2659 | table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh; |
2655 | table[2].data = &net->ipv6.sysctl.ip6_rt_max_size; | 2660 | table[2].data = &net->ipv6.sysctl.ip6_rt_max_size; |
2656 | table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; | 2661 | table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; |
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c index 8ce38f10a547..d2c16e10f650 100644 --- a/net/ipv6/sit.c +++ b/net/ipv6/sit.c | |||
@@ -1290,4 +1290,4 @@ static int __init sit_init(void) | |||
1290 | module_init(sit_init); | 1290 | module_init(sit_init); |
1291 | module_exit(sit_cleanup); | 1291 | module_exit(sit_cleanup); |
1292 | MODULE_LICENSE("GPL"); | 1292 | MODULE_LICENSE("GPL"); |
1293 | MODULE_ALIAS("sit0"); | 1293 | MODULE_ALIAS_NETDEV("sit0"); |
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 8acba456744e..7a10a8d1b2d0 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c | |||
@@ -1229,6 +1229,7 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local) | |||
1229 | } | 1229 | } |
1230 | mutex_unlock(&local->iflist_mtx); | 1230 | mutex_unlock(&local->iflist_mtx); |
1231 | unregister_netdevice_many(&unreg_list); | 1231 | unregister_netdevice_many(&unreg_list); |
1232 | list_del(&unreg_list); | ||
1232 | } | 1233 | } |
1233 | 1234 | ||
1234 | static u32 ieee80211_idle_off(struct ieee80211_local *local, | 1235 | static u32 ieee80211_idle_off(struct ieee80211_local *local, |
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 45fbb9e33746..c9ceb4d57ab0 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c | |||
@@ -1033,6 +1033,12 @@ void ieee80211_sta_rx_notify(struct ieee80211_sub_if_data *sdata, | |||
1033 | if (is_multicast_ether_addr(hdr->addr1)) | 1033 | if (is_multicast_ether_addr(hdr->addr1)) |
1034 | return; | 1034 | return; |
1035 | 1035 | ||
1036 | /* | ||
1037 | * In case we receive frames after disassociation. | ||
1038 | */ | ||
1039 | if (!sdata->u.mgd.associated) | ||
1040 | return; | ||
1041 | |||
1036 | ieee80211_sta_reset_conn_monitor(sdata); | 1042 | ieee80211_sta_reset_conn_monitor(sdata); |
1037 | } | 1043 | } |
1038 | 1044 | ||
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 22f7ad5101ab..ba98e1308f3c 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c | |||
@@ -808,9 +808,9 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest, | |||
808 | dest->u_threshold = udest->u_threshold; | 808 | dest->u_threshold = udest->u_threshold; |
809 | dest->l_threshold = udest->l_threshold; | 809 | dest->l_threshold = udest->l_threshold; |
810 | 810 | ||
811 | spin_lock(&dest->dst_lock); | 811 | spin_lock_bh(&dest->dst_lock); |
812 | ip_vs_dst_reset(dest); | 812 | ip_vs_dst_reset(dest); |
813 | spin_unlock(&dest->dst_lock); | 813 | spin_unlock_bh(&dest->dst_lock); |
814 | 814 | ||
815 | if (add) | 815 | if (add) |
816 | ip_vs_new_estimator(&dest->stats); | 816 | ip_vs_new_estimator(&dest->stats); |
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index b07393eab88e..91816998ed86 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c | |||
@@ -85,6 +85,8 @@ EXPORT_SYMBOL(nf_log_unregister); | |||
85 | 85 | ||
86 | int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger) | 86 | int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger) |
87 | { | 87 | { |
88 | if (pf >= ARRAY_SIZE(nf_loggers)) | ||
89 | return -EINVAL; | ||
88 | mutex_lock(&nf_log_mutex); | 90 | mutex_lock(&nf_log_mutex); |
89 | if (__find_logger(pf, logger->name) == NULL) { | 91 | if (__find_logger(pf, logger->name) == NULL) { |
90 | mutex_unlock(&nf_log_mutex); | 92 | mutex_unlock(&nf_log_mutex); |
@@ -98,6 +100,8 @@ EXPORT_SYMBOL(nf_log_bind_pf); | |||
98 | 100 | ||
99 | void nf_log_unbind_pf(u_int8_t pf) | 101 | void nf_log_unbind_pf(u_int8_t pf) |
100 | { | 102 | { |
103 | if (pf >= ARRAY_SIZE(nf_loggers)) | ||
104 | return; | ||
101 | mutex_lock(&nf_log_mutex); | 105 | mutex_lock(&nf_log_mutex); |
102 | rcu_assign_pointer(nf_loggers[pf], NULL); | 106 | rcu_assign_pointer(nf_loggers[pf], NULL); |
103 | mutex_unlock(&nf_log_mutex); | 107 | mutex_unlock(&nf_log_mutex); |
diff --git a/net/netfilter/nf_tproxy_core.c b/net/netfilter/nf_tproxy_core.c index 4d87befb04c0..474d621cbc2e 100644 --- a/net/netfilter/nf_tproxy_core.c +++ b/net/netfilter/nf_tproxy_core.c | |||
@@ -28,26 +28,23 @@ nf_tproxy_destructor(struct sk_buff *skb) | |||
28 | skb->destructor = NULL; | 28 | skb->destructor = NULL; |
29 | 29 | ||
30 | if (sk) | 30 | if (sk) |
31 | nf_tproxy_put_sock(sk); | 31 | sock_put(sk); |
32 | } | 32 | } |
33 | 33 | ||
34 | /* consumes sk */ | 34 | /* consumes sk */ |
35 | int | 35 | void |
36 | nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk) | 36 | nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk) |
37 | { | 37 | { |
38 | bool transparent = (sk->sk_state == TCP_TIME_WAIT) ? | 38 | /* assigning tw sockets complicates things; most |
39 | inet_twsk(sk)->tw_transparent : | 39 | * skb->sk->X checks would have to test sk->sk_state first */ |
40 | inet_sk(sk)->transparent; | 40 | if (sk->sk_state == TCP_TIME_WAIT) { |
41 | 41 | inet_twsk_put(inet_twsk(sk)); | |
42 | if (transparent) { | 42 | return; |
43 | skb_orphan(skb); | 43 | } |
44 | skb->sk = sk; | 44 | |
45 | skb->destructor = nf_tproxy_destructor; | 45 | skb_orphan(skb); |
46 | return 1; | 46 | skb->sk = sk; |
47 | } else | 47 | skb->destructor = nf_tproxy_destructor; |
48 | nf_tproxy_put_sock(sk); | ||
49 | |||
50 | return 0; | ||
51 | } | 48 | } |
52 | EXPORT_SYMBOL_GPL(nf_tproxy_assign_sock); | 49 | EXPORT_SYMBOL_GPL(nf_tproxy_assign_sock); |
53 | 50 | ||
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c index 640678f47a2a..dcfd57eb9d02 100644 --- a/net/netfilter/xt_TPROXY.c +++ b/net/netfilter/xt_TPROXY.c | |||
@@ -33,6 +33,20 @@ | |||
33 | #include <net/netfilter/nf_tproxy_core.h> | 33 | #include <net/netfilter/nf_tproxy_core.h> |
34 | #include <linux/netfilter/xt_TPROXY.h> | 34 | #include <linux/netfilter/xt_TPROXY.h> |
35 | 35 | ||
36 | static bool tproxy_sk_is_transparent(struct sock *sk) | ||
37 | { | ||
38 | if (sk->sk_state != TCP_TIME_WAIT) { | ||
39 | if (inet_sk(sk)->transparent) | ||
40 | return true; | ||
41 | sock_put(sk); | ||
42 | } else { | ||
43 | if (inet_twsk(sk)->tw_transparent) | ||
44 | return true; | ||
45 | inet_twsk_put(inet_twsk(sk)); | ||
46 | } | ||
47 | return false; | ||
48 | } | ||
49 | |||
36 | static inline __be32 | 50 | static inline __be32 |
37 | tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr) | 51 | tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr) |
38 | { | 52 | { |
@@ -141,7 +155,7 @@ tproxy_tg4(struct sk_buff *skb, __be32 laddr, __be16 lport, | |||
141 | skb->dev, NFT_LOOKUP_LISTENER); | 155 | skb->dev, NFT_LOOKUP_LISTENER); |
142 | 156 | ||
143 | /* NOTE: assign_sock consumes our sk reference */ | 157 | /* NOTE: assign_sock consumes our sk reference */ |
144 | if (sk && nf_tproxy_assign_sock(skb, sk)) { | 158 | if (sk && tproxy_sk_is_transparent(sk)) { |
145 | /* This should be in a separate target, but we don't do multiple | 159 | /* This should be in a separate target, but we don't do multiple |
146 | targets on the same rule yet */ | 160 | targets on the same rule yet */ |
147 | skb->mark = (skb->mark & ~mark_mask) ^ mark_value; | 161 | skb->mark = (skb->mark & ~mark_mask) ^ mark_value; |
@@ -149,6 +163,8 @@ tproxy_tg4(struct sk_buff *skb, __be32 laddr, __be16 lport, | |||
149 | pr_debug("redirecting: proto %hhu %pI4:%hu -> %pI4:%hu, mark: %x\n", | 163 | pr_debug("redirecting: proto %hhu %pI4:%hu -> %pI4:%hu, mark: %x\n", |
150 | iph->protocol, &iph->daddr, ntohs(hp->dest), | 164 | iph->protocol, &iph->daddr, ntohs(hp->dest), |
151 | &laddr, ntohs(lport), skb->mark); | 165 | &laddr, ntohs(lport), skb->mark); |
166 | |||
167 | nf_tproxy_assign_sock(skb, sk); | ||
152 | return NF_ACCEPT; | 168 | return NF_ACCEPT; |
153 | } | 169 | } |
154 | 170 | ||
@@ -306,7 +322,7 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par) | |||
306 | par->in, NFT_LOOKUP_LISTENER); | 322 | par->in, NFT_LOOKUP_LISTENER); |
307 | 323 | ||
308 | /* NOTE: assign_sock consumes our sk reference */ | 324 | /* NOTE: assign_sock consumes our sk reference */ |
309 | if (sk && nf_tproxy_assign_sock(skb, sk)) { | 325 | if (sk && tproxy_sk_is_transparent(sk)) { |
310 | /* This should be in a separate target, but we don't do multiple | 326 | /* This should be in a separate target, but we don't do multiple |
311 | targets on the same rule yet */ | 327 | targets on the same rule yet */ |
312 | skb->mark = (skb->mark & ~tgi->mark_mask) ^ tgi->mark_value; | 328 | skb->mark = (skb->mark & ~tgi->mark_mask) ^ tgi->mark_value; |
@@ -314,6 +330,8 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par) | |||
314 | pr_debug("redirecting: proto %hhu %pI6:%hu -> %pI6:%hu, mark: %x\n", | 330 | pr_debug("redirecting: proto %hhu %pI6:%hu -> %pI6:%hu, mark: %x\n", |
315 | tproto, &iph->saddr, ntohs(hp->source), | 331 | tproto, &iph->saddr, ntohs(hp->source), |
316 | laddr, ntohs(lport), skb->mark); | 332 | laddr, ntohs(lport), skb->mark); |
333 | |||
334 | nf_tproxy_assign_sock(skb, sk); | ||
317 | return NF_ACCEPT; | 335 | return NF_ACCEPT; |
318 | } | 336 | } |
319 | 337 | ||
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c index 00d6ae838303..9cc46356b577 100644 --- a/net/netfilter/xt_socket.c +++ b/net/netfilter/xt_socket.c | |||
@@ -35,6 +35,15 @@ | |||
35 | #include <net/netfilter/nf_conntrack.h> | 35 | #include <net/netfilter/nf_conntrack.h> |
36 | #endif | 36 | #endif |
37 | 37 | ||
38 | static void | ||
39 | xt_socket_put_sk(struct sock *sk) | ||
40 | { | ||
41 | if (sk->sk_state == TCP_TIME_WAIT) | ||
42 | inet_twsk_put(inet_twsk(sk)); | ||
43 | else | ||
44 | sock_put(sk); | ||
45 | } | ||
46 | |||
38 | static int | 47 | static int |
39 | extract_icmp4_fields(const struct sk_buff *skb, | 48 | extract_icmp4_fields(const struct sk_buff *skb, |
40 | u8 *protocol, | 49 | u8 *protocol, |
@@ -164,7 +173,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par, | |||
164 | (sk->sk_state == TCP_TIME_WAIT && | 173 | (sk->sk_state == TCP_TIME_WAIT && |
165 | inet_twsk(sk)->tw_transparent)); | 174 | inet_twsk(sk)->tw_transparent)); |
166 | 175 | ||
167 | nf_tproxy_put_sock(sk); | 176 | xt_socket_put_sk(sk); |
168 | 177 | ||
169 | if (wildcard || !transparent) | 178 | if (wildcard || !transparent) |
170 | sk = NULL; | 179 | sk = NULL; |
@@ -298,7 +307,7 @@ socket_mt6_v1(const struct sk_buff *skb, struct xt_action_param *par) | |||
298 | (sk->sk_state == TCP_TIME_WAIT && | 307 | (sk->sk_state == TCP_TIME_WAIT && |
299 | inet_twsk(sk)->tw_transparent)); | 308 | inet_twsk(sk)->tw_transparent)); |
300 | 309 | ||
301 | nf_tproxy_put_sock(sk); | 310 | xt_socket_put_sk(sk); |
302 | 311 | ||
303 | if (wildcard || !transparent) | 312 | if (wildcard || !transparent) |
304 | sk = NULL; | 313 | sk = NULL; |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 478181d53c55..1f924595bdef 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -1407,7 +1407,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, | |||
1407 | int noblock = flags&MSG_DONTWAIT; | 1407 | int noblock = flags&MSG_DONTWAIT; |
1408 | size_t copied; | 1408 | size_t copied; |
1409 | struct sk_buff *skb, *data_skb; | 1409 | struct sk_buff *skb, *data_skb; |
1410 | int err; | 1410 | int err, ret; |
1411 | 1411 | ||
1412 | if (flags&MSG_OOB) | 1412 | if (flags&MSG_OOB) |
1413 | return -EOPNOTSUPP; | 1413 | return -EOPNOTSUPP; |
@@ -1470,8 +1470,13 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, | |||
1470 | 1470 | ||
1471 | skb_free_datagram(sk, skb); | 1471 | skb_free_datagram(sk, skb); |
1472 | 1472 | ||
1473 | if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) | 1473 | if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) { |
1474 | netlink_dump(sk); | 1474 | ret = netlink_dump(sk); |
1475 | if (ret) { | ||
1476 | sk->sk_err = ret; | ||
1477 | sk->sk_error_report(sk); | ||
1478 | } | ||
1479 | } | ||
1475 | 1480 | ||
1476 | scm_recv(sock, msg, siocb->scm, flags); | 1481 | scm_recv(sock, msg, siocb->scm, flags); |
1477 | out: | 1482 | out: |
@@ -1736,6 +1741,7 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, | |||
1736 | struct netlink_callback *cb; | 1741 | struct netlink_callback *cb; |
1737 | struct sock *sk; | 1742 | struct sock *sk; |
1738 | struct netlink_sock *nlk; | 1743 | struct netlink_sock *nlk; |
1744 | int ret; | ||
1739 | 1745 | ||
1740 | cb = kzalloc(sizeof(*cb), GFP_KERNEL); | 1746 | cb = kzalloc(sizeof(*cb), GFP_KERNEL); |
1741 | if (cb == NULL) | 1747 | if (cb == NULL) |
@@ -1764,9 +1770,13 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, | |||
1764 | nlk->cb = cb; | 1770 | nlk->cb = cb; |
1765 | mutex_unlock(nlk->cb_mutex); | 1771 | mutex_unlock(nlk->cb_mutex); |
1766 | 1772 | ||
1767 | netlink_dump(sk); | 1773 | ret = netlink_dump(sk); |
1774 | |||
1768 | sock_put(sk); | 1775 | sock_put(sk); |
1769 | 1776 | ||
1777 | if (ret) | ||
1778 | return ret; | ||
1779 | |||
1770 | /* We successfully started a dump, by returning -EINTR we | 1780 | /* We successfully started a dump, by returning -EINTR we |
1771 | * signal not to send ACK even if it was requested. | 1781 | * signal not to send ACK even if it was requested. |
1772 | */ | 1782 | */ |
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c index 71f373c421bc..c47a511f203d 100644 --- a/net/rds/ib_send.c +++ b/net/rds/ib_send.c | |||
@@ -551,7 +551,10 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
551 | if (conn->c_loopback | 551 | if (conn->c_loopback |
552 | && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { | 552 | && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { |
553 | rds_cong_map_updated(conn->c_fcong, ~(u64) 0); | 553 | rds_cong_map_updated(conn->c_fcong, ~(u64) 0); |
554 | return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; | 554 | scat = &rm->data.op_sg[sg]; |
555 | ret = sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; | ||
556 | ret = min_t(int, ret, scat->length - conn->c_xmit_data_off); | ||
557 | return ret; | ||
555 | } | 558 | } |
556 | 559 | ||
557 | /* FIXME we may overallocate here */ | 560 | /* FIXME we may overallocate here */ |
diff --git a/net/rds/loop.c b/net/rds/loop.c index aeec1d483b17..bca6761a3ca2 100644 --- a/net/rds/loop.c +++ b/net/rds/loop.c | |||
@@ -61,10 +61,15 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
61 | unsigned int hdr_off, unsigned int sg, | 61 | unsigned int hdr_off, unsigned int sg, |
62 | unsigned int off) | 62 | unsigned int off) |
63 | { | 63 | { |
64 | struct scatterlist *sgp = &rm->data.op_sg[sg]; | ||
65 | int ret = sizeof(struct rds_header) + | ||
66 | be32_to_cpu(rm->m_inc.i_hdr.h_len); | ||
67 | |||
64 | /* Do not send cong updates to loopback */ | 68 | /* Do not send cong updates to loopback */ |
65 | if (rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { | 69 | if (rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { |
66 | rds_cong_map_updated(conn->c_fcong, ~(u64) 0); | 70 | rds_cong_map_updated(conn->c_fcong, ~(u64) 0); |
67 | return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; | 71 | ret = min_t(int, ret, sgp->length - conn->c_xmit_data_off); |
72 | goto out; | ||
68 | } | 73 | } |
69 | 74 | ||
70 | BUG_ON(hdr_off || sg || off); | 75 | BUG_ON(hdr_off || sg || off); |
@@ -80,8 +85,8 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm, | |||
80 | NULL); | 85 | NULL); |
81 | 86 | ||
82 | rds_inc_put(&rm->m_inc); | 87 | rds_inc_put(&rm->m_inc); |
83 | 88 | out: | |
84 | return sizeof(struct rds_header) + be32_to_cpu(rm->m_inc.i_hdr.h_len); | 89 | return ret; |
85 | } | 90 | } |
86 | 91 | ||
87 | /* | 92 | /* |
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c index 89315009bab1..1a2b0633fece 100644 --- a/net/rxrpc/ar-input.c +++ b/net/rxrpc/ar-input.c | |||
@@ -423,6 +423,7 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb) | |||
423 | goto protocol_error; | 423 | goto protocol_error; |
424 | } | 424 | } |
425 | 425 | ||
426 | case RXRPC_PACKET_TYPE_ACKALL: | ||
426 | case RXRPC_PACKET_TYPE_ACK: | 427 | case RXRPC_PACKET_TYPE_ACK: |
427 | /* ACK processing is done in process context */ | 428 | /* ACK processing is done in process context */ |
428 | read_lock_bh(&call->state_lock); | 429 | read_lock_bh(&call->state_lock); |
diff --git a/net/rxrpc/ar-key.c b/net/rxrpc/ar-key.c index 5ee16f0353fe..d763793d39de 100644 --- a/net/rxrpc/ar-key.c +++ b/net/rxrpc/ar-key.c | |||
@@ -89,11 +89,11 @@ static int rxrpc_instantiate_xdr_rxkad(struct key *key, const __be32 *xdr, | |||
89 | return ret; | 89 | return ret; |
90 | 90 | ||
91 | plen -= sizeof(*token); | 91 | plen -= sizeof(*token); |
92 | token = kmalloc(sizeof(*token), GFP_KERNEL); | 92 | token = kzalloc(sizeof(*token), GFP_KERNEL); |
93 | if (!token) | 93 | if (!token) |
94 | return -ENOMEM; | 94 | return -ENOMEM; |
95 | 95 | ||
96 | token->kad = kmalloc(plen, GFP_KERNEL); | 96 | token->kad = kzalloc(plen, GFP_KERNEL); |
97 | if (!token->kad) { | 97 | if (!token->kad) { |
98 | kfree(token); | 98 | kfree(token); |
99 | return -ENOMEM; | 99 | return -ENOMEM; |
@@ -731,10 +731,10 @@ static int rxrpc_instantiate(struct key *key, const void *data, size_t datalen) | |||
731 | goto error; | 731 | goto error; |
732 | 732 | ||
733 | ret = -ENOMEM; | 733 | ret = -ENOMEM; |
734 | token = kmalloc(sizeof(*token), GFP_KERNEL); | 734 | token = kzalloc(sizeof(*token), GFP_KERNEL); |
735 | if (!token) | 735 | if (!token) |
736 | goto error; | 736 | goto error; |
737 | token->kad = kmalloc(plen, GFP_KERNEL); | 737 | token->kad = kzalloc(plen, GFP_KERNEL); |
738 | if (!token->kad) | 738 | if (!token->kad) |
739 | goto error_free; | 739 | goto error_free; |
740 | 740 | ||
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 34dc598440a2..1bc698039ae2 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c | |||
@@ -839,6 +839,7 @@ void dev_deactivate(struct net_device *dev) | |||
839 | 839 | ||
840 | list_add(&dev->unreg_list, &single); | 840 | list_add(&dev->unreg_list, &single); |
841 | dev_deactivate_many(&single); | 841 | dev_deactivate_many(&single); |
842 | list_del(&single); | ||
842 | } | 843 | } |
843 | 844 | ||
844 | static void dev_init_scheduler_queue(struct net_device *dev, | 845 | static void dev_init_scheduler_queue(struct net_device *dev, |
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 2cc46f0962ca..b23428f3c0dd 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c | |||
@@ -2029,11 +2029,11 @@ static sctp_ierror_t sctp_process_unk_param(const struct sctp_association *asoc, | |||
2029 | *errp = sctp_make_op_error_fixed(asoc, chunk); | 2029 | *errp = sctp_make_op_error_fixed(asoc, chunk); |
2030 | 2030 | ||
2031 | if (*errp) { | 2031 | if (*errp) { |
2032 | sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM, | 2032 | if (!sctp_init_cause_fixed(*errp, SCTP_ERROR_UNKNOWN_PARAM, |
2033 | WORD_ROUND(ntohs(param.p->length))); | 2033 | WORD_ROUND(ntohs(param.p->length)))) |
2034 | sctp_addto_chunk_fixed(*errp, | 2034 | sctp_addto_chunk_fixed(*errp, |
2035 | WORD_ROUND(ntohs(param.p->length)), | 2035 | WORD_ROUND(ntohs(param.p->length)), |
2036 | param.v); | 2036 | param.v); |
2037 | } else { | 2037 | } else { |
2038 | /* If there is no memory for generating the ERROR | 2038 | /* If there is no memory for generating the ERROR |
2039 | * report as specified, an ABORT will be triggered | 2039 | * report as specified, an ABORT will be triggered |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 243fc09b164e..59e599498e37 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -252,23 +252,37 @@ static void rpc_set_active(struct rpc_task *task) | |||
252 | 252 | ||
253 | /* | 253 | /* |
254 | * Mark an RPC call as having completed by clearing the 'active' bit | 254 | * Mark an RPC call as having completed by clearing the 'active' bit |
255 | * and then waking up all tasks that were sleeping. | ||
255 | */ | 256 | */ |
256 | static void rpc_mark_complete_task(struct rpc_task *task) | 257 | static int rpc_complete_task(struct rpc_task *task) |
257 | { | 258 | { |
258 | smp_mb__before_clear_bit(); | 259 | void *m = &task->tk_runstate; |
260 | wait_queue_head_t *wq = bit_waitqueue(m, RPC_TASK_ACTIVE); | ||
261 | struct wait_bit_key k = __WAIT_BIT_KEY_INITIALIZER(m, RPC_TASK_ACTIVE); | ||
262 | unsigned long flags; | ||
263 | int ret; | ||
264 | |||
265 | spin_lock_irqsave(&wq->lock, flags); | ||
259 | clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); | 266 | clear_bit(RPC_TASK_ACTIVE, &task->tk_runstate); |
260 | smp_mb__after_clear_bit(); | 267 | ret = atomic_dec_and_test(&task->tk_count); |
261 | wake_up_bit(&task->tk_runstate, RPC_TASK_ACTIVE); | 268 | if (waitqueue_active(wq)) |
269 | __wake_up_locked_key(wq, TASK_NORMAL, &k); | ||
270 | spin_unlock_irqrestore(&wq->lock, flags); | ||
271 | return ret; | ||
262 | } | 272 | } |
263 | 273 | ||
264 | /* | 274 | /* |
265 | * Allow callers to wait for completion of an RPC call | 275 | * Allow callers to wait for completion of an RPC call |
276 | * | ||
277 | * Note the use of out_of_line_wait_on_bit() rather than wait_on_bit() | ||
278 | * to enforce taking of the wq->lock and hence avoid races with | ||
279 | * rpc_complete_task(). | ||
266 | */ | 280 | */ |
267 | int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *)) | 281 | int __rpc_wait_for_completion_task(struct rpc_task *task, int (*action)(void *)) |
268 | { | 282 | { |
269 | if (action == NULL) | 283 | if (action == NULL) |
270 | action = rpc_wait_bit_killable; | 284 | action = rpc_wait_bit_killable; |
271 | return wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, | 285 | return out_of_line_wait_on_bit(&task->tk_runstate, RPC_TASK_ACTIVE, |
272 | action, TASK_KILLABLE); | 286 | action, TASK_KILLABLE); |
273 | } | 287 | } |
274 | EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task); | 288 | EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task); |
@@ -857,34 +871,67 @@ static void rpc_async_release(struct work_struct *work) | |||
857 | rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); | 871 | rpc_free_task(container_of(work, struct rpc_task, u.tk_work)); |
858 | } | 872 | } |
859 | 873 | ||
860 | void rpc_put_task(struct rpc_task *task) | 874 | static void rpc_release_resources_task(struct rpc_task *task) |
861 | { | 875 | { |
862 | if (!atomic_dec_and_test(&task->tk_count)) | ||
863 | return; | ||
864 | /* Release resources */ | ||
865 | if (task->tk_rqstp) | 876 | if (task->tk_rqstp) |
866 | xprt_release(task); | 877 | xprt_release(task); |
867 | if (task->tk_msg.rpc_cred) | 878 | if (task->tk_msg.rpc_cred) |
868 | put_rpccred(task->tk_msg.rpc_cred); | 879 | put_rpccred(task->tk_msg.rpc_cred); |
869 | rpc_task_release_client(task); | 880 | rpc_task_release_client(task); |
870 | if (task->tk_workqueue != NULL) { | 881 | } |
882 | |||
883 | static void rpc_final_put_task(struct rpc_task *task, | ||
884 | struct workqueue_struct *q) | ||
885 | { | ||
886 | if (q != NULL) { | ||
871 | INIT_WORK(&task->u.tk_work, rpc_async_release); | 887 | INIT_WORK(&task->u.tk_work, rpc_async_release); |
872 | queue_work(task->tk_workqueue, &task->u.tk_work); | 888 | queue_work(q, &task->u.tk_work); |
873 | } else | 889 | } else |
874 | rpc_free_task(task); | 890 | rpc_free_task(task); |
875 | } | 891 | } |
892 | |||
893 | static void rpc_do_put_task(struct rpc_task *task, struct workqueue_struct *q) | ||
894 | { | ||
895 | if (atomic_dec_and_test(&task->tk_count)) { | ||
896 | rpc_release_resources_task(task); | ||
897 | rpc_final_put_task(task, q); | ||
898 | } | ||
899 | } | ||
900 | |||
901 | void rpc_put_task(struct rpc_task *task) | ||
902 | { | ||
903 | rpc_do_put_task(task, NULL); | ||
904 | } | ||
876 | EXPORT_SYMBOL_GPL(rpc_put_task); | 905 | EXPORT_SYMBOL_GPL(rpc_put_task); |
877 | 906 | ||
907 | void rpc_put_task_async(struct rpc_task *task) | ||
908 | { | ||
909 | rpc_do_put_task(task, task->tk_workqueue); | ||
910 | } | ||
911 | EXPORT_SYMBOL_GPL(rpc_put_task_async); | ||
912 | |||
878 | static void rpc_release_task(struct rpc_task *task) | 913 | static void rpc_release_task(struct rpc_task *task) |
879 | { | 914 | { |
880 | dprintk("RPC: %5u release task\n", task->tk_pid); | 915 | dprintk("RPC: %5u release task\n", task->tk_pid); |
881 | 916 | ||
882 | BUG_ON (RPC_IS_QUEUED(task)); | 917 | BUG_ON (RPC_IS_QUEUED(task)); |
883 | 918 | ||
884 | /* Wake up anyone who is waiting for task completion */ | 919 | rpc_release_resources_task(task); |
885 | rpc_mark_complete_task(task); | ||
886 | 920 | ||
887 | rpc_put_task(task); | 921 | /* |
922 | * Note: at this point we have been removed from rpc_clnt->cl_tasks, | ||
923 | * so it should be safe to use task->tk_count as a test for whether | ||
924 | * or not any other processes still hold references to our rpc_task. | ||
925 | */ | ||
926 | if (atomic_read(&task->tk_count) != 1 + !RPC_IS_ASYNC(task)) { | ||
927 | /* Wake up anyone who may be waiting for task completion */ | ||
928 | if (!rpc_complete_task(task)) | ||
929 | return; | ||
930 | } else { | ||
931 | if (!atomic_dec_and_test(&task->tk_count)) | ||
932 | return; | ||
933 | } | ||
934 | rpc_final_put_task(task, task->tk_workqueue); | ||
888 | } | 935 | } |
889 | 936 | ||
890 | int rpciod_up(void) | 937 | int rpciod_up(void) |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 9df1eadc912a..1a10dcd999ea 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c | |||
@@ -1335,6 +1335,7 @@ void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp, | |||
1335 | p, 0, length, DMA_FROM_DEVICE); | 1335 | p, 0, length, DMA_FROM_DEVICE); |
1336 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) { | 1336 | if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) { |
1337 | put_page(p); | 1337 | put_page(p); |
1338 | svc_rdma_put_context(ctxt, 1); | ||
1338 | return; | 1339 | return; |
1339 | } | 1340 | } |
1340 | atomic_inc(&xprt->sc_dma_used); | 1341 | atomic_inc(&xprt->sc_dma_used); |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index c431f5a57960..be96d429b475 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -1631,7 +1631,8 @@ static struct socket *xs_create_sock(struct rpc_xprt *xprt, | |||
1631 | } | 1631 | } |
1632 | xs_reclassify_socket(family, sock); | 1632 | xs_reclassify_socket(family, sock); |
1633 | 1633 | ||
1634 | if (xs_bind(transport, sock)) { | 1634 | err = xs_bind(transport, sock); |
1635 | if (err) { | ||
1635 | sock_release(sock); | 1636 | sock_release(sock); |
1636 | goto out; | 1637 | goto out; |
1637 | } | 1638 | } |
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index dd419d286204..437a99e560e1 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c | |||
@@ -1724,7 +1724,11 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1724 | 1724 | ||
1725 | msg->msg_namelen = 0; | 1725 | msg->msg_namelen = 0; |
1726 | 1726 | ||
1727 | mutex_lock(&u->readlock); | 1727 | err = mutex_lock_interruptible(&u->readlock); |
1728 | if (err) { | ||
1729 | err = sock_intr_errno(sock_rcvtimeo(sk, noblock)); | ||
1730 | goto out; | ||
1731 | } | ||
1728 | 1732 | ||
1729 | skb = skb_recv_datagram(sk, flags, noblock, &err); | 1733 | skb = skb_recv_datagram(sk, flags, noblock, &err); |
1730 | if (!skb) { | 1734 | if (!skb) { |
@@ -1864,7 +1868,11 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1864 | memset(&tmp_scm, 0, sizeof(tmp_scm)); | 1868 | memset(&tmp_scm, 0, sizeof(tmp_scm)); |
1865 | } | 1869 | } |
1866 | 1870 | ||
1867 | mutex_lock(&u->readlock); | 1871 | err = mutex_lock_interruptible(&u->readlock); |
1872 | if (err) { | ||
1873 | err = sock_intr_errno(timeo); | ||
1874 | goto out; | ||
1875 | } | ||
1868 | 1876 | ||
1869 | do { | 1877 | do { |
1870 | int chunk; | 1878 | int chunk; |
@@ -1895,11 +1903,12 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1895 | 1903 | ||
1896 | timeo = unix_stream_data_wait(sk, timeo); | 1904 | timeo = unix_stream_data_wait(sk, timeo); |
1897 | 1905 | ||
1898 | if (signal_pending(current)) { | 1906 | if (signal_pending(current) |
1907 | || mutex_lock_interruptible(&u->readlock)) { | ||
1899 | err = sock_intr_errno(timeo); | 1908 | err = sock_intr_errno(timeo); |
1900 | goto out; | 1909 | goto out; |
1901 | } | 1910 | } |
1902 | mutex_lock(&u->readlock); | 1911 | |
1903 | continue; | 1912 | continue; |
1904 | unlock: | 1913 | unlock: |
1905 | unix_state_unlock(sk); | 1914 | unix_state_unlock(sk); |
diff --git a/net/wireless/wext-compat.c b/net/wireless/wext-compat.c index 3e5dbd4e4cd5..d112f038edf0 100644 --- a/net/wireless/wext-compat.c +++ b/net/wireless/wext-compat.c | |||
@@ -802,11 +802,11 @@ int cfg80211_wext_siwfreq(struct net_device *dev, | |||
802 | return freq; | 802 | return freq; |
803 | if (freq == 0) | 803 | if (freq == 0) |
804 | return -EINVAL; | 804 | return -EINVAL; |
805 | wdev_lock(wdev); | ||
806 | mutex_lock(&rdev->devlist_mtx); | 805 | mutex_lock(&rdev->devlist_mtx); |
806 | wdev_lock(wdev); | ||
807 | err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT); | 807 | err = cfg80211_set_freq(rdev, wdev, freq, NL80211_CHAN_NO_HT); |
808 | mutex_unlock(&rdev->devlist_mtx); | ||
809 | wdev_unlock(wdev); | 808 | wdev_unlock(wdev); |
809 | mutex_unlock(&rdev->devlist_mtx); | ||
810 | return err; | 810 | return err; |
811 | default: | 811 | default: |
812 | return -EOPNOTSUPP; | 812 | return -EOPNOTSUPP; |
diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c index 6c94c6ce2925..291228e25984 100644 --- a/scripts/basic/fixdep.c +++ b/scripts/basic/fixdep.c | |||
@@ -309,6 +309,11 @@ static void do_config_file(const char *filename) | |||
309 | close(fd); | 309 | close(fd); |
310 | } | 310 | } |
311 | 311 | ||
312 | /* | ||
313 | * Important: The below generated source_foo.o and deps_foo.o variable | ||
314 | * assignments are parsed not only by make, but also by the rather simple | ||
315 | * parser in scripts/mod/sumversion.c. | ||
316 | */ | ||
312 | static void parse_dep_file(void *map, size_t len) | 317 | static void parse_dep_file(void *map, size_t len) |
313 | { | 318 | { |
314 | char *m = map; | 319 | char *m = map; |
@@ -323,7 +328,6 @@ static void parse_dep_file(void *map, size_t len) | |||
323 | exit(1); | 328 | exit(1); |
324 | } | 329 | } |
325 | memcpy(s, m, p-m); s[p-m] = 0; | 330 | memcpy(s, m, p-m); s[p-m] = 0; |
326 | printf("deps_%s := \\\n", target); | ||
327 | m = p+1; | 331 | m = p+1; |
328 | 332 | ||
329 | clear_config(); | 333 | clear_config(); |
@@ -343,12 +347,15 @@ static void parse_dep_file(void *map, size_t len) | |||
343 | strrcmp(s, "arch/um/include/uml-config.h") && | 347 | strrcmp(s, "arch/um/include/uml-config.h") && |
344 | strrcmp(s, ".ver")) { | 348 | strrcmp(s, ".ver")) { |
345 | /* | 349 | /* |
346 | * Do not output the first dependency (the | 350 | * Do not list the source file as dependency, so that |
347 | * source file), so that kbuild is not confused | 351 | * kbuild is not confused if a .c file is rewritten |
348 | * if a .c file is rewritten into .S or vice | 352 | * into .S or vice versa. Storing it in source_* is |
349 | * versa. | 353 | * needed for modpost to compute srcversions. |
350 | */ | 354 | */ |
351 | if (!first) | 355 | if (first) { |
356 | printf("source_%s := %s\n\n", target, s); | ||
357 | printf("deps_%s := \\\n", target); | ||
358 | } else | ||
352 | printf(" %s \\\n", s); | 359 | printf(" %s \\\n", s); |
353 | do_config_file(s); | 360 | do_config_file(s); |
354 | } | 361 | } |
diff --git a/scripts/mod/sumversion.c b/scripts/mod/sumversion.c index ecf9c7dc1825..9dfcd6d988da 100644 --- a/scripts/mod/sumversion.c +++ b/scripts/mod/sumversion.c | |||
@@ -300,8 +300,8 @@ static int is_static_library(const char *objfile) | |||
300 | return 0; | 300 | return 0; |
301 | } | 301 | } |
302 | 302 | ||
303 | /* We have dir/file.o. Open dir/.file.o.cmd, look for deps_ line to | 303 | /* We have dir/file.o. Open dir/.file.o.cmd, look for source_ and deps_ line |
304 | * figure out source file. */ | 304 | * to figure out source files. */ |
305 | static int parse_source_files(const char *objfile, struct md4_ctx *md) | 305 | static int parse_source_files(const char *objfile, struct md4_ctx *md) |
306 | { | 306 | { |
307 | char *cmd, *file, *line, *dir; | 307 | char *cmd, *file, *line, *dir; |
@@ -340,6 +340,21 @@ static int parse_source_files(const char *objfile, struct md4_ctx *md) | |||
340 | */ | 340 | */ |
341 | while ((line = get_next_line(&pos, file, flen)) != NULL) { | 341 | while ((line = get_next_line(&pos, file, flen)) != NULL) { |
342 | char* p = line; | 342 | char* p = line; |
343 | |||
344 | if (strncmp(line, "source_", sizeof("source_")-1) == 0) { | ||
345 | p = strrchr(line, ' '); | ||
346 | if (!p) { | ||
347 | warn("malformed line: %s\n", line); | ||
348 | goto out_file; | ||
349 | } | ||
350 | p++; | ||
351 | if (!parse_file(p, md)) { | ||
352 | warn("could not open %s: %s\n", | ||
353 | p, strerror(errno)); | ||
354 | goto out_file; | ||
355 | } | ||
356 | continue; | ||
357 | } | ||
343 | if (strncmp(line, "deps_", sizeof("deps_")-1) == 0) { | 358 | if (strncmp(line, "deps_", sizeof("deps_")-1) == 0) { |
344 | check_files = 1; | 359 | check_files = 1; |
345 | continue; | 360 | continue; |
diff --git a/sound/core/jack.c b/sound/core/jack.c index 4902ae568730..53b53e97c896 100644 --- a/sound/core/jack.c +++ b/sound/core/jack.c | |||
@@ -141,6 +141,7 @@ int snd_jack_new(struct snd_card *card, const char *id, int type, | |||
141 | 141 | ||
142 | fail_input: | 142 | fail_input: |
143 | input_free_device(jack->input_dev); | 143 | input_free_device(jack->input_dev); |
144 | kfree(jack->id); | ||
144 | kfree(jack); | 145 | kfree(jack); |
145 | return err; | 146 | return err; |
146 | } | 147 | } |
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c index a07b031090d8..067982f4f182 100644 --- a/sound/pci/hda/patch_cirrus.c +++ b/sound/pci/hda/patch_cirrus.c | |||
@@ -1039,9 +1039,11 @@ static struct hda_verb cs_errata_init_verbs[] = { | |||
1039 | {0x11, AC_VERB_SET_PROC_COEF, 0x0008}, | 1039 | {0x11, AC_VERB_SET_PROC_COEF, 0x0008}, |
1040 | {0x11, AC_VERB_SET_PROC_STATE, 0x00}, | 1040 | {0x11, AC_VERB_SET_PROC_STATE, 0x00}, |
1041 | 1041 | ||
1042 | #if 0 /* Don't to set to D3 as we are in power-up sequence */ | ||
1042 | {0x07, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Rx: D3 */ | 1043 | {0x07, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Rx: D3 */ |
1043 | {0x08, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Tx: D3 */ | 1044 | {0x08, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Tx: D3 */ |
1044 | /*{0x01, AC_VERB_SET_POWER_STATE, 0x03},*/ /* AFG: D3 This is already handled */ | 1045 | /*{0x01, AC_VERB_SET_POWER_STATE, 0x03},*/ /* AFG: D3 This is already handled */ |
1046 | #endif | ||
1045 | 1047 | ||
1046 | {} /* terminator */ | 1048 | {} /* terminator */ |
1047 | }; | 1049 | }; |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index dd7c5c12225d..4d5004e693f0 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
@@ -3114,6 +3114,8 @@ static struct snd_pci_quirk cxt5066_cfg_tbl[] = { | |||
3114 | SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO), | 3114 | SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO), |
3115 | SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO), | 3115 | SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO), |
3116 | SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), | 3116 | SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD), |
3117 | SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD), | ||
3118 | SND_PCI_QUIRK(0x1028, 0x0510, "Dell Vostro", CXT5066_IDEAPAD), | ||
3117 | SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP), | 3119 | SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP), |
3118 | SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_ASUS), | 3120 | SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_ASUS), |
3119 | SND_PCI_QUIRK(0x1043, 0x1643, "Asus K52JU", CXT5066_ASUS), | 3121 | SND_PCI_QUIRK(0x1043, 0x1643, "Asus K52JU", CXT5066_ASUS), |
@@ -3937,6 +3939,8 @@ static struct hda_codec_preset snd_hda_preset_conexant[] = { | |||
3937 | .patch = patch_cxt5066 }, | 3939 | .patch = patch_cxt5066 }, |
3938 | { .id = 0x14f15069, .name = "CX20585", | 3940 | { .id = 0x14f15069, .name = "CX20585", |
3939 | .patch = patch_cxt5066 }, | 3941 | .patch = patch_cxt5066 }, |
3942 | { .id = 0x14f1506e, .name = "CX20590", | ||
3943 | .patch = patch_cxt5066 }, | ||
3940 | { .id = 0x14f15097, .name = "CX20631", | 3944 | { .id = 0x14f15097, .name = "CX20631", |
3941 | .patch = patch_conexant_auto }, | 3945 | .patch = patch_conexant_auto }, |
3942 | { .id = 0x14f15098, .name = "CX20632", | 3946 | { .id = 0x14f15098, .name = "CX20632", |
@@ -3963,6 +3967,7 @@ MODULE_ALIAS("snd-hda-codec-id:14f15066"); | |||
3963 | MODULE_ALIAS("snd-hda-codec-id:14f15067"); | 3967 | MODULE_ALIAS("snd-hda-codec-id:14f15067"); |
3964 | MODULE_ALIAS("snd-hda-codec-id:14f15068"); | 3968 | MODULE_ALIAS("snd-hda-codec-id:14f15068"); |
3965 | MODULE_ALIAS("snd-hda-codec-id:14f15069"); | 3969 | MODULE_ALIAS("snd-hda-codec-id:14f15069"); |
3970 | MODULE_ALIAS("snd-hda-codec-id:14f1506e"); | ||
3966 | MODULE_ALIAS("snd-hda-codec-id:14f15097"); | 3971 | MODULE_ALIAS("snd-hda-codec-id:14f15097"); |
3967 | MODULE_ALIAS("snd-hda-codec-id:14f15098"); | 3972 | MODULE_ALIAS("snd-hda-codec-id:14f15098"); |
3968 | MODULE_ALIAS("snd-hda-codec-id:14f150a1"); | 3973 | MODULE_ALIAS("snd-hda-codec-id:14f150a1"); |
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index a58767736727..ec0fa2dd0a27 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c | |||
@@ -1634,6 +1634,9 @@ static struct hda_codec_preset snd_hda_preset_hdmi[] = { | |||
1634 | { .id = 0x10de0012, .name = "GPU 12 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, | 1634 | { .id = 0x10de0012, .name = "GPU 12 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, |
1635 | { .id = 0x10de0013, .name = "GPU 13 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, | 1635 | { .id = 0x10de0013, .name = "GPU 13 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, |
1636 | { .id = 0x10de0014, .name = "GPU 14 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, | 1636 | { .id = 0x10de0014, .name = "GPU 14 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, |
1637 | { .id = 0x10de0015, .name = "GPU 15 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, | ||
1638 | { .id = 0x10de0016, .name = "GPU 16 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, | ||
1639 | /* 17 is known to be absent */ | ||
1637 | { .id = 0x10de0018, .name = "GPU 18 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, | 1640 | { .id = 0x10de0018, .name = "GPU 18 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, |
1638 | { .id = 0x10de0019, .name = "GPU 19 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, | 1641 | { .id = 0x10de0019, .name = "GPU 19 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, |
1639 | { .id = 0x10de001a, .name = "GPU 1a HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, | 1642 | { .id = 0x10de001a, .name = "GPU 1a HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, |
@@ -1676,6 +1679,8 @@ MODULE_ALIAS("snd-hda-codec-id:10de0011"); | |||
1676 | MODULE_ALIAS("snd-hda-codec-id:10de0012"); | 1679 | MODULE_ALIAS("snd-hda-codec-id:10de0012"); |
1677 | MODULE_ALIAS("snd-hda-codec-id:10de0013"); | 1680 | MODULE_ALIAS("snd-hda-codec-id:10de0013"); |
1678 | MODULE_ALIAS("snd-hda-codec-id:10de0014"); | 1681 | MODULE_ALIAS("snd-hda-codec-id:10de0014"); |
1682 | MODULE_ALIAS("snd-hda-codec-id:10de0015"); | ||
1683 | MODULE_ALIAS("snd-hda-codec-id:10de0016"); | ||
1679 | MODULE_ALIAS("snd-hda-codec-id:10de0018"); | 1684 | MODULE_ALIAS("snd-hda-codec-id:10de0018"); |
1680 | MODULE_ALIAS("snd-hda-codec-id:10de0019"); | 1685 | MODULE_ALIAS("snd-hda-codec-id:10de0019"); |
1681 | MODULE_ALIAS("snd-hda-codec-id:10de001a"); | 1686 | MODULE_ALIAS("snd-hda-codec-id:10de001a"); |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 3328a259a242..4261bb8eec1d 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -1133,11 +1133,8 @@ static void alc_automute_speaker(struct hda_codec *codec, int pinctl) | |||
1133 | nid = spec->autocfg.hp_pins[i]; | 1133 | nid = spec->autocfg.hp_pins[i]; |
1134 | if (!nid) | 1134 | if (!nid) |
1135 | break; | 1135 | break; |
1136 | if (snd_hda_jack_detect(codec, nid)) { | 1136 | alc_report_jack(codec, nid); |
1137 | spec->jack_present = 1; | 1137 | spec->jack_present |= snd_hda_jack_detect(codec, nid); |
1138 | break; | ||
1139 | } | ||
1140 | alc_report_jack(codec, spec->autocfg.hp_pins[i]); | ||
1141 | } | 1138 | } |
1142 | 1139 | ||
1143 | mute = spec->jack_present ? HDA_AMP_MUTE : 0; | 1140 | mute = spec->jack_present ? HDA_AMP_MUTE : 0; |
@@ -15015,7 +15012,7 @@ static struct snd_pci_quirk alc269_cfg_tbl[] = { | |||
15015 | SND_PCI_QUIRK(0x1043, 0x11e3, "ASUS U33Jc", ALC269VB_AMIC), | 15012 | SND_PCI_QUIRK(0x1043, 0x11e3, "ASUS U33Jc", ALC269VB_AMIC), |
15016 | SND_PCI_QUIRK(0x1043, 0x1273, "ASUS UL80Jt", ALC269VB_AMIC), | 15013 | SND_PCI_QUIRK(0x1043, 0x1273, "ASUS UL80Jt", ALC269VB_AMIC), |
15017 | SND_PCI_QUIRK(0x1043, 0x1283, "ASUS U53Jc", ALC269_AMIC), | 15014 | SND_PCI_QUIRK(0x1043, 0x1283, "ASUS U53Jc", ALC269_AMIC), |
15018 | SND_PCI_QUIRK(0x1043, 0x12b3, "ASUS N82Jv", ALC269_AMIC), | 15015 | SND_PCI_QUIRK(0x1043, 0x12b3, "ASUS N82JV", ALC269VB_AMIC), |
15019 | SND_PCI_QUIRK(0x1043, 0x12d3, "ASUS N61Jv", ALC269_AMIC), | 15016 | SND_PCI_QUIRK(0x1043, 0x12d3, "ASUS N61Jv", ALC269_AMIC), |
15020 | SND_PCI_QUIRK(0x1043, 0x13a3, "ASUS UL30Vt", ALC269_AMIC), | 15017 | SND_PCI_QUIRK(0x1043, 0x13a3, "ASUS UL30Vt", ALC269_AMIC), |
15021 | SND_PCI_QUIRK(0x1043, 0x1373, "ASUS G73JX", ALC269_AMIC), | 15018 | SND_PCI_QUIRK(0x1043, 0x1373, "ASUS G73JX", ALC269_AMIC), |
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 9ea48b425d0b..bd7b123f6440 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c | |||
@@ -586,7 +586,12 @@ static hda_nid_t stac92hd83xxx_pin_nids[10] = { | |||
586 | 0x0f, 0x10, 0x11, 0x1f, 0x20, | 586 | 0x0f, 0x10, 0x11, 0x1f, 0x20, |
587 | }; | 587 | }; |
588 | 588 | ||
589 | static hda_nid_t stac92hd88xxx_pin_nids[10] = { | 589 | static hda_nid_t stac92hd87xxx_pin_nids[6] = { |
590 | 0x0a, 0x0b, 0x0c, 0x0d, | ||
591 | 0x0f, 0x11, | ||
592 | }; | ||
593 | |||
594 | static hda_nid_t stac92hd88xxx_pin_nids[8] = { | ||
590 | 0x0a, 0x0b, 0x0c, 0x0d, | 595 | 0x0a, 0x0b, 0x0c, 0x0d, |
591 | 0x0f, 0x11, 0x1f, 0x20, | 596 | 0x0f, 0x11, 0x1f, 0x20, |
592 | }; | 597 | }; |
@@ -5430,12 +5435,13 @@ again: | |||
5430 | switch (codec->vendor_id) { | 5435 | switch (codec->vendor_id) { |
5431 | case 0x111d76d1: | 5436 | case 0x111d76d1: |
5432 | case 0x111d76d9: | 5437 | case 0x111d76d9: |
5438 | case 0x111d76e5: | ||
5433 | spec->dmic_nids = stac92hd87b_dmic_nids; | 5439 | spec->dmic_nids = stac92hd87b_dmic_nids; |
5434 | spec->num_dmics = stac92xx_connected_ports(codec, | 5440 | spec->num_dmics = stac92xx_connected_ports(codec, |
5435 | stac92hd87b_dmic_nids, | 5441 | stac92hd87b_dmic_nids, |
5436 | STAC92HD87B_NUM_DMICS); | 5442 | STAC92HD87B_NUM_DMICS); |
5437 | spec->num_pins = ARRAY_SIZE(stac92hd88xxx_pin_nids); | 5443 | spec->num_pins = ARRAY_SIZE(stac92hd87xxx_pin_nids); |
5438 | spec->pin_nids = stac92hd88xxx_pin_nids; | 5444 | spec->pin_nids = stac92hd87xxx_pin_nids; |
5439 | spec->mono_nid = 0; | 5445 | spec->mono_nid = 0; |
5440 | spec->num_pwrs = 0; | 5446 | spec->num_pwrs = 0; |
5441 | break; | 5447 | break; |
@@ -5443,6 +5449,7 @@ again: | |||
5443 | case 0x111d7667: | 5449 | case 0x111d7667: |
5444 | case 0x111d7668: | 5450 | case 0x111d7668: |
5445 | case 0x111d7669: | 5451 | case 0x111d7669: |
5452 | case 0x111d76e3: | ||
5446 | spec->num_dmics = stac92xx_connected_ports(codec, | 5453 | spec->num_dmics = stac92xx_connected_ports(codec, |
5447 | stac92hd88xxx_dmic_nids, | 5454 | stac92hd88xxx_dmic_nids, |
5448 | STAC92HD88XXX_NUM_DMICS); | 5455 | STAC92HD88XXX_NUM_DMICS); |
@@ -6387,6 +6394,8 @@ static struct hda_codec_preset snd_hda_preset_sigmatel[] = { | |||
6387 | { .id = 0x111d76cd, .name = "92HD89F2", .patch = patch_stac92hd73xx }, | 6394 | { .id = 0x111d76cd, .name = "92HD89F2", .patch = patch_stac92hd73xx }, |
6388 | { .id = 0x111d76ce, .name = "92HD89F1", .patch = patch_stac92hd73xx }, | 6395 | { .id = 0x111d76ce, .name = "92HD89F1", .patch = patch_stac92hd73xx }, |
6389 | { .id = 0x111d76e0, .name = "92HD91BXX", .patch = patch_stac92hd83xxx}, | 6396 | { .id = 0x111d76e0, .name = "92HD91BXX", .patch = patch_stac92hd83xxx}, |
6397 | { .id = 0x111d76e3, .name = "92HD98BXX", .patch = patch_stac92hd83xxx}, | ||
6398 | { .id = 0x111d76e5, .name = "92HD99BXX", .patch = patch_stac92hd83xxx}, | ||
6390 | { .id = 0x111d76e7, .name = "92HD90BXX", .patch = patch_stac92hd83xxx}, | 6399 | { .id = 0x111d76e7, .name = "92HD90BXX", .patch = patch_stac92hd83xxx}, |
6391 | {} /* terminator */ | 6400 | {} /* terminator */ |
6392 | }; | 6401 | }; |
diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c index a76c3260d941..63b0054200a8 100644 --- a/sound/pci/hda/patch_via.c +++ b/sound/pci/hda/patch_via.c | |||
@@ -567,7 +567,7 @@ static void via_auto_init_analog_input(struct hda_codec *codec) | |||
567 | hda_nid_t nid = cfg->inputs[i].pin; | 567 | hda_nid_t nid = cfg->inputs[i].pin; |
568 | if (spec->smart51_enabled && is_smart51_pins(spec, nid)) | 568 | if (spec->smart51_enabled && is_smart51_pins(spec, nid)) |
569 | ctl = PIN_OUT; | 569 | ctl = PIN_OUT; |
570 | else if (i == AUTO_PIN_MIC) | 570 | else if (cfg->inputs[i].type == AUTO_PIN_MIC) |
571 | ctl = PIN_VREF50; | 571 | ctl = PIN_VREF50; |
572 | else | 572 | else |
573 | ctl = PIN_IN; | 573 | ctl = PIN_IN; |
diff --git a/sound/soc/codecs/cx20442.c b/sound/soc/codecs/cx20442.c index bb4bf65b9e7e..0bb424af956f 100644 --- a/sound/soc/codecs/cx20442.c +++ b/sound/soc/codecs/cx20442.c | |||
@@ -367,7 +367,7 @@ static int cx20442_codec_remove(struct snd_soc_codec *codec) | |||
367 | return 0; | 367 | return 0; |
368 | } | 368 | } |
369 | 369 | ||
370 | static const u8 cx20442_reg = CX20442_TELOUT | CX20442_MIC; | 370 | static const u8 cx20442_reg; |
371 | 371 | ||
372 | static struct snd_soc_codec_driver cx20442_codec_dev = { | 372 | static struct snd_soc_codec_driver cx20442_codec_dev = { |
373 | .probe = cx20442_codec_probe, | 373 | .probe = cx20442_codec_probe, |
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c index 987476a5895f..017d99ceb42e 100644 --- a/sound/soc/codecs/wm8903.c +++ b/sound/soc/codecs/wm8903.c | |||
@@ -1482,7 +1482,7 @@ int wm8903_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack, | |||
1482 | WM8903_MICDET_EINT | WM8903_MICSHRT_EINT, | 1482 | WM8903_MICDET_EINT | WM8903_MICSHRT_EINT, |
1483 | irq_mask); | 1483 | irq_mask); |
1484 | 1484 | ||
1485 | if (det && shrt) { | 1485 | if (det || shrt) { |
1486 | /* Enable mic detection, this may not have been set through | 1486 | /* Enable mic detection, this may not have been set through |
1487 | * platform data (eg, if the defaults are OK). */ | 1487 | * platform data (eg, if the defaults are OK). */ |
1488 | snd_soc_update_bits(codec, WM8903_WRITE_SEQUENCER_0, | 1488 | snd_soc_update_bits(codec, WM8903_WRITE_SEQUENCER_0, |
diff --git a/sound/soc/codecs/wm8903.h b/sound/soc/codecs/wm8903.h index e8490f3edd03..e3ec2433b215 100644 --- a/sound/soc/codecs/wm8903.h +++ b/sound/soc/codecs/wm8903.h | |||
@@ -165,7 +165,7 @@ extern int wm8903_mic_detect(struct snd_soc_codec *codec, | |||
165 | 165 | ||
166 | #define WM8903_VMID_RES_50K 2 | 166 | #define WM8903_VMID_RES_50K 2 |
167 | #define WM8903_VMID_RES_250K 3 | 167 | #define WM8903_VMID_RES_250K 3 |
168 | #define WM8903_VMID_RES_5K 4 | 168 | #define WM8903_VMID_RES_5K 6 |
169 | 169 | ||
170 | /* | 170 | /* |
171 | * R8 (0x08) - Analogue DAC 0 | 171 | * R8 (0x08) - Analogue DAC 0 |
diff --git a/sound/soc/codecs/wm8978.c b/sound/soc/codecs/wm8978.c index 4bbc3442703f..8dfb0a0da673 100644 --- a/sound/soc/codecs/wm8978.c +++ b/sound/soc/codecs/wm8978.c | |||
@@ -145,18 +145,18 @@ static const struct snd_kcontrol_new wm8978_snd_controls[] = { | |||
145 | SOC_SINGLE("DAC Playback Limiter Threshold", | 145 | SOC_SINGLE("DAC Playback Limiter Threshold", |
146 | WM8978_DAC_LIMITER_2, 4, 7, 0), | 146 | WM8978_DAC_LIMITER_2, 4, 7, 0), |
147 | SOC_SINGLE("DAC Playback Limiter Boost", | 147 | SOC_SINGLE("DAC Playback Limiter Boost", |
148 | WM8978_DAC_LIMITER_2, 0, 15, 0), | 148 | WM8978_DAC_LIMITER_2, 0, 12, 0), |
149 | 149 | ||
150 | SOC_ENUM("ALC Enable Switch", alc1), | 150 | SOC_ENUM("ALC Enable Switch", alc1), |
151 | SOC_SINGLE("ALC Capture Min Gain", WM8978_ALC_CONTROL_1, 0, 7, 0), | 151 | SOC_SINGLE("ALC Capture Min Gain", WM8978_ALC_CONTROL_1, 0, 7, 0), |
152 | SOC_SINGLE("ALC Capture Max Gain", WM8978_ALC_CONTROL_1, 3, 7, 0), | 152 | SOC_SINGLE("ALC Capture Max Gain", WM8978_ALC_CONTROL_1, 3, 7, 0), |
153 | 153 | ||
154 | SOC_SINGLE("ALC Capture Hold", WM8978_ALC_CONTROL_2, 4, 7, 0), | 154 | SOC_SINGLE("ALC Capture Hold", WM8978_ALC_CONTROL_2, 4, 10, 0), |
155 | SOC_SINGLE("ALC Capture Target", WM8978_ALC_CONTROL_2, 0, 15, 0), | 155 | SOC_SINGLE("ALC Capture Target", WM8978_ALC_CONTROL_2, 0, 15, 0), |
156 | 156 | ||
157 | SOC_ENUM("ALC Capture Mode", alc3), | 157 | SOC_ENUM("ALC Capture Mode", alc3), |
158 | SOC_SINGLE("ALC Capture Decay", WM8978_ALC_CONTROL_3, 4, 15, 0), | 158 | SOC_SINGLE("ALC Capture Decay", WM8978_ALC_CONTROL_3, 4, 10, 0), |
159 | SOC_SINGLE("ALC Capture Attack", WM8978_ALC_CONTROL_3, 0, 15, 0), | 159 | SOC_SINGLE("ALC Capture Attack", WM8978_ALC_CONTROL_3, 0, 10, 0), |
160 | 160 | ||
161 | SOC_SINGLE("ALC Capture Noise Gate Switch", WM8978_NOISE_GATE, 3, 1, 0), | 161 | SOC_SINGLE("ALC Capture Noise Gate Switch", WM8978_NOISE_GATE, 3, 1, 0), |
162 | SOC_SINGLE("ALC Capture Noise Gate Threshold", | 162 | SOC_SINGLE("ALC Capture Noise Gate Threshold", |
@@ -211,8 +211,10 @@ static const struct snd_kcontrol_new wm8978_snd_controls[] = { | |||
211 | WM8978_LOUT2_SPK_CONTROL, WM8978_ROUT2_SPK_CONTROL, 6, 1, 1), | 211 | WM8978_LOUT2_SPK_CONTROL, WM8978_ROUT2_SPK_CONTROL, 6, 1, 1), |
212 | 212 | ||
213 | /* DAC / ADC oversampling */ | 213 | /* DAC / ADC oversampling */ |
214 | SOC_SINGLE("DAC 128x Oversampling Switch", WM8978_DAC_CONTROL, 8, 1, 0), | 214 | SOC_SINGLE("DAC 128x Oversampling Switch", WM8978_DAC_CONTROL, |
215 | SOC_SINGLE("ADC 128x Oversampling Switch", WM8978_ADC_CONTROL, 8, 1, 0), | 215 | 5, 1, 0), |
216 | SOC_SINGLE("ADC 128x Oversampling Switch", WM8978_ADC_CONTROL, | ||
217 | 5, 1, 0), | ||
216 | }; | 218 | }; |
217 | 219 | ||
218 | /* Mixer #1: Output (OUT1, OUT2) Mixer: mix AUX, Input mixer output and DAC */ | 220 | /* Mixer #1: Output (OUT1, OUT2) Mixer: mix AUX, Input mixer output and DAC */ |
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c index 37b8aa8a680f..c6c958ee5d59 100644 --- a/sound/soc/codecs/wm8994.c +++ b/sound/soc/codecs/wm8994.c | |||
@@ -107,6 +107,12 @@ struct wm8994_priv { | |||
107 | 107 | ||
108 | int revision; | 108 | int revision; |
109 | struct wm8994_pdata *pdata; | 109 | struct wm8994_pdata *pdata; |
110 | |||
111 | unsigned int aif1clk_enable:1; | ||
112 | unsigned int aif2clk_enable:1; | ||
113 | |||
114 | unsigned int aif1clk_disable:1; | ||
115 | unsigned int aif2clk_disable:1; | ||
110 | }; | 116 | }; |
111 | 117 | ||
112 | static int wm8994_readable(unsigned int reg) | 118 | static int wm8994_readable(unsigned int reg) |
@@ -1004,6 +1010,110 @@ static void wm8994_update_class_w(struct snd_soc_codec *codec) | |||
1004 | } | 1010 | } |
1005 | } | 1011 | } |
1006 | 1012 | ||
1013 | static int late_enable_ev(struct snd_soc_dapm_widget *w, | ||
1014 | struct snd_kcontrol *kcontrol, int event) | ||
1015 | { | ||
1016 | struct snd_soc_codec *codec = w->codec; | ||
1017 | struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); | ||
1018 | |||
1019 | switch (event) { | ||
1020 | case SND_SOC_DAPM_PRE_PMU: | ||
1021 | if (wm8994->aif1clk_enable) { | ||
1022 | snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, | ||
1023 | WM8994_AIF1CLK_ENA_MASK, | ||
1024 | WM8994_AIF1CLK_ENA); | ||
1025 | wm8994->aif1clk_enable = 0; | ||
1026 | } | ||
1027 | if (wm8994->aif2clk_enable) { | ||
1028 | snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, | ||
1029 | WM8994_AIF2CLK_ENA_MASK, | ||
1030 | WM8994_AIF2CLK_ENA); | ||
1031 | wm8994->aif2clk_enable = 0; | ||
1032 | } | ||
1033 | break; | ||
1034 | } | ||
1035 | |||
1036 | return 0; | ||
1037 | } | ||
1038 | |||
1039 | static int late_disable_ev(struct snd_soc_dapm_widget *w, | ||
1040 | struct snd_kcontrol *kcontrol, int event) | ||
1041 | { | ||
1042 | struct snd_soc_codec *codec = w->codec; | ||
1043 | struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); | ||
1044 | |||
1045 | switch (event) { | ||
1046 | case SND_SOC_DAPM_POST_PMD: | ||
1047 | if (wm8994->aif1clk_disable) { | ||
1048 | snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, | ||
1049 | WM8994_AIF1CLK_ENA_MASK, 0); | ||
1050 | wm8994->aif1clk_disable = 0; | ||
1051 | } | ||
1052 | if (wm8994->aif2clk_disable) { | ||
1053 | snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, | ||
1054 | WM8994_AIF2CLK_ENA_MASK, 0); | ||
1055 | wm8994->aif2clk_disable = 0; | ||
1056 | } | ||
1057 | break; | ||
1058 | } | ||
1059 | |||
1060 | return 0; | ||
1061 | } | ||
1062 | |||
1063 | static int aif1clk_ev(struct snd_soc_dapm_widget *w, | ||
1064 | struct snd_kcontrol *kcontrol, int event) | ||
1065 | { | ||
1066 | struct snd_soc_codec *codec = w->codec; | ||
1067 | struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); | ||
1068 | |||
1069 | switch (event) { | ||
1070 | case SND_SOC_DAPM_PRE_PMU: | ||
1071 | wm8994->aif1clk_enable = 1; | ||
1072 | break; | ||
1073 | case SND_SOC_DAPM_POST_PMD: | ||
1074 | wm8994->aif1clk_disable = 1; | ||
1075 | break; | ||
1076 | } | ||
1077 | |||
1078 | return 0; | ||
1079 | } | ||
1080 | |||
1081 | static int aif2clk_ev(struct snd_soc_dapm_widget *w, | ||
1082 | struct snd_kcontrol *kcontrol, int event) | ||
1083 | { | ||
1084 | struct snd_soc_codec *codec = w->codec; | ||
1085 | struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); | ||
1086 | |||
1087 | switch (event) { | ||
1088 | case SND_SOC_DAPM_PRE_PMU: | ||
1089 | wm8994->aif2clk_enable = 1; | ||
1090 | break; | ||
1091 | case SND_SOC_DAPM_POST_PMD: | ||
1092 | wm8994->aif2clk_disable = 1; | ||
1093 | break; | ||
1094 | } | ||
1095 | |||
1096 | return 0; | ||
1097 | } | ||
1098 | |||
1099 | static int adc_mux_ev(struct snd_soc_dapm_widget *w, | ||
1100 | struct snd_kcontrol *kcontrol, int event) | ||
1101 | { | ||
1102 | late_enable_ev(w, kcontrol, event); | ||
1103 | return 0; | ||
1104 | } | ||
1105 | |||
1106 | static int dac_ev(struct snd_soc_dapm_widget *w, | ||
1107 | struct snd_kcontrol *kcontrol, int event) | ||
1108 | { | ||
1109 | struct snd_soc_codec *codec = w->codec; | ||
1110 | unsigned int mask = 1 << w->shift; | ||
1111 | |||
1112 | snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, | ||
1113 | mask, mask); | ||
1114 | return 0; | ||
1115 | } | ||
1116 | |||
1007 | static const char *hp_mux_text[] = { | 1117 | static const char *hp_mux_text[] = { |
1008 | "Mixer", | 1118 | "Mixer", |
1009 | "DAC", | 1119 | "DAC", |
@@ -1272,6 +1382,59 @@ static const struct soc_enum aif2dacr_src_enum = | |||
1272 | static const struct snd_kcontrol_new aif2dacr_src_mux = | 1382 | static const struct snd_kcontrol_new aif2dacr_src_mux = |
1273 | SOC_DAPM_ENUM("AIF2DACR Mux", aif2dacr_src_enum); | 1383 | SOC_DAPM_ENUM("AIF2DACR Mux", aif2dacr_src_enum); |
1274 | 1384 | ||
1385 | static const struct snd_soc_dapm_widget wm8994_lateclk_revd_widgets[] = { | ||
1386 | SND_SOC_DAPM_SUPPLY("AIF1CLK", SND_SOC_NOPM, 0, 0, aif1clk_ev, | ||
1387 | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), | ||
1388 | SND_SOC_DAPM_SUPPLY("AIF2CLK", SND_SOC_NOPM, 0, 0, aif2clk_ev, | ||
1389 | SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), | ||
1390 | |||
1391 | SND_SOC_DAPM_PGA_E("Late DAC1L Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0, | ||
1392 | late_enable_ev, SND_SOC_DAPM_PRE_PMU), | ||
1393 | SND_SOC_DAPM_PGA_E("Late DAC1R Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0, | ||
1394 | late_enable_ev, SND_SOC_DAPM_PRE_PMU), | ||
1395 | SND_SOC_DAPM_PGA_E("Late DAC2L Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0, | ||
1396 | late_enable_ev, SND_SOC_DAPM_PRE_PMU), | ||
1397 | SND_SOC_DAPM_PGA_E("Late DAC2R Enable PGA", SND_SOC_NOPM, 0, 0, NULL, 0, | ||
1398 | late_enable_ev, SND_SOC_DAPM_PRE_PMU), | ||
1399 | |||
1400 | SND_SOC_DAPM_POST("Late Disable PGA", late_disable_ev) | ||
1401 | }; | ||
1402 | |||
1403 | static const struct snd_soc_dapm_widget wm8994_lateclk_widgets[] = { | ||
1404 | SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, NULL, 0), | ||
1405 | SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, NULL, 0) | ||
1406 | }; | ||
1407 | |||
1408 | static const struct snd_soc_dapm_widget wm8994_dac_revd_widgets[] = { | ||
1409 | SND_SOC_DAPM_DAC_E("DAC2L", NULL, SND_SOC_NOPM, 3, 0, | ||
1410 | dac_ev, SND_SOC_DAPM_PRE_PMU), | ||
1411 | SND_SOC_DAPM_DAC_E("DAC2R", NULL, SND_SOC_NOPM, 2, 0, | ||
1412 | dac_ev, SND_SOC_DAPM_PRE_PMU), | ||
1413 | SND_SOC_DAPM_DAC_E("DAC1L", NULL, SND_SOC_NOPM, 1, 0, | ||
1414 | dac_ev, SND_SOC_DAPM_PRE_PMU), | ||
1415 | SND_SOC_DAPM_DAC_E("DAC1R", NULL, SND_SOC_NOPM, 0, 0, | ||
1416 | dac_ev, SND_SOC_DAPM_PRE_PMU), | ||
1417 | }; | ||
1418 | |||
1419 | static const struct snd_soc_dapm_widget wm8994_dac_widgets[] = { | ||
1420 | SND_SOC_DAPM_DAC("DAC2L", NULL, WM8994_POWER_MANAGEMENT_5, 3, 0), | ||
1421 | SND_SOC_DAPM_DAC("DAC2R", NULL, WM8994_POWER_MANAGEMENT_5, 2, 0), | ||
1422 | SND_SOC_DAPM_DAC("DAC1L", NULL, WM8994_POWER_MANAGEMENT_5, 1, 0), | ||
1423 | SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0), | ||
1424 | }; | ||
1425 | |||
1426 | static const struct snd_soc_dapm_widget wm8994_adc_revd_widgets[] = { | ||
1427 | SND_SOC_DAPM_MUX_E("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux, | ||
1428 | adc_mux_ev, SND_SOC_DAPM_PRE_PMU), | ||
1429 | SND_SOC_DAPM_MUX_E("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux, | ||
1430 | adc_mux_ev, SND_SOC_DAPM_PRE_PMU), | ||
1431 | }; | ||
1432 | |||
1433 | static const struct snd_soc_dapm_widget wm8994_adc_widgets[] = { | ||
1434 | SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux), | ||
1435 | SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux), | ||
1436 | }; | ||
1437 | |||
1275 | static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = { | 1438 | static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = { |
1276 | SND_SOC_DAPM_INPUT("DMIC1DAT"), | 1439 | SND_SOC_DAPM_INPUT("DMIC1DAT"), |
1277 | SND_SOC_DAPM_INPUT("DMIC2DAT"), | 1440 | SND_SOC_DAPM_INPUT("DMIC2DAT"), |
@@ -1284,9 +1447,6 @@ SND_SOC_DAPM_SUPPLY("DSP1CLK", WM8994_CLOCKING_1, 3, 0, NULL, 0), | |||
1284 | SND_SOC_DAPM_SUPPLY("DSP2CLK", WM8994_CLOCKING_1, 2, 0, NULL, 0), | 1447 | SND_SOC_DAPM_SUPPLY("DSP2CLK", WM8994_CLOCKING_1, 2, 0, NULL, 0), |
1285 | SND_SOC_DAPM_SUPPLY("DSPINTCLK", WM8994_CLOCKING_1, 1, 0, NULL, 0), | 1448 | SND_SOC_DAPM_SUPPLY("DSPINTCLK", WM8994_CLOCKING_1, 1, 0, NULL, 0), |
1286 | 1449 | ||
1287 | SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, NULL, 0), | ||
1288 | SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, NULL, 0), | ||
1289 | |||
1290 | SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", NULL, | 1450 | SND_SOC_DAPM_AIF_OUT("AIF1ADC1L", NULL, |
1291 | 0, WM8994_POWER_MANAGEMENT_4, 9, 0), | 1451 | 0, WM8994_POWER_MANAGEMENT_4, 9, 0), |
1292 | SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", NULL, | 1452 | SND_SOC_DAPM_AIF_OUT("AIF1ADC1R", NULL, |
@@ -1369,14 +1529,6 @@ SND_SOC_DAPM_ADC("DMIC1R", NULL, WM8994_POWER_MANAGEMENT_4, 2, 0), | |||
1369 | SND_SOC_DAPM_ADC("ADCL", NULL, SND_SOC_NOPM, 1, 0), | 1529 | SND_SOC_DAPM_ADC("ADCL", NULL, SND_SOC_NOPM, 1, 0), |
1370 | SND_SOC_DAPM_ADC("ADCR", NULL, SND_SOC_NOPM, 0, 0), | 1530 | SND_SOC_DAPM_ADC("ADCR", NULL, SND_SOC_NOPM, 0, 0), |
1371 | 1531 | ||
1372 | SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux), | ||
1373 | SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux), | ||
1374 | |||
1375 | SND_SOC_DAPM_DAC("DAC2L", NULL, WM8994_POWER_MANAGEMENT_5, 3, 0), | ||
1376 | SND_SOC_DAPM_DAC("DAC2R", NULL, WM8994_POWER_MANAGEMENT_5, 2, 0), | ||
1377 | SND_SOC_DAPM_DAC("DAC1L", NULL, WM8994_POWER_MANAGEMENT_5, 1, 0), | ||
1378 | SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0), | ||
1379 | |||
1380 | SND_SOC_DAPM_MUX("Left Headphone Mux", SND_SOC_NOPM, 0, 0, &hpl_mux), | 1532 | SND_SOC_DAPM_MUX("Left Headphone Mux", SND_SOC_NOPM, 0, 0, &hpl_mux), |
1381 | SND_SOC_DAPM_MUX("Right Headphone Mux", SND_SOC_NOPM, 0, 0, &hpr_mux), | 1533 | SND_SOC_DAPM_MUX("Right Headphone Mux", SND_SOC_NOPM, 0, 0, &hpr_mux), |
1382 | 1534 | ||
@@ -1516,14 +1668,12 @@ static const struct snd_soc_dapm_route intercon[] = { | |||
1516 | { "AIF2ADC Mux", "AIF3DACDAT", "AIF3ADCDAT" }, | 1668 | { "AIF2ADC Mux", "AIF3DACDAT", "AIF3ADCDAT" }, |
1517 | 1669 | ||
1518 | /* DAC1 inputs */ | 1670 | /* DAC1 inputs */ |
1519 | { "DAC1L", NULL, "DAC1L Mixer" }, | ||
1520 | { "DAC1L Mixer", "AIF2 Switch", "AIF2DACL" }, | 1671 | { "DAC1L Mixer", "AIF2 Switch", "AIF2DACL" }, |
1521 | { "DAC1L Mixer", "AIF1.2 Switch", "AIF1DAC2L" }, | 1672 | { "DAC1L Mixer", "AIF1.2 Switch", "AIF1DAC2L" }, |
1522 | { "DAC1L Mixer", "AIF1.1 Switch", "AIF1DAC1L" }, | 1673 | { "DAC1L Mixer", "AIF1.1 Switch", "AIF1DAC1L" }, |
1523 | { "DAC1L Mixer", "Left Sidetone Switch", "Left Sidetone" }, | 1674 | { "DAC1L Mixer", "Left Sidetone Switch", "Left Sidetone" }, |
1524 | { "DAC1L Mixer", "Right Sidetone Switch", "Right Sidetone" }, | 1675 | { "DAC1L Mixer", "Right Sidetone Switch", "Right Sidetone" }, |
1525 | 1676 | ||
1526 | { "DAC1R", NULL, "DAC1R Mixer" }, | ||
1527 | { "DAC1R Mixer", "AIF2 Switch", "AIF2DACR" }, | 1677 | { "DAC1R Mixer", "AIF2 Switch", "AIF2DACR" }, |
1528 | { "DAC1R Mixer", "AIF1.2 Switch", "AIF1DAC2R" }, | 1678 | { "DAC1R Mixer", "AIF1.2 Switch", "AIF1DAC2R" }, |
1529 | { "DAC1R Mixer", "AIF1.1 Switch", "AIF1DAC1R" }, | 1679 | { "DAC1R Mixer", "AIF1.1 Switch", "AIF1DAC1R" }, |
@@ -1532,7 +1682,6 @@ static const struct snd_soc_dapm_route intercon[] = { | |||
1532 | 1682 | ||
1533 | /* DAC2/AIF2 outputs */ | 1683 | /* DAC2/AIF2 outputs */ |
1534 | { "AIF2ADCL", NULL, "AIF2DAC2L Mixer" }, | 1684 | { "AIF2ADCL", NULL, "AIF2DAC2L Mixer" }, |
1535 | { "DAC2L", NULL, "AIF2DAC2L Mixer" }, | ||
1536 | { "AIF2DAC2L Mixer", "AIF2 Switch", "AIF2DACL" }, | 1685 | { "AIF2DAC2L Mixer", "AIF2 Switch", "AIF2DACL" }, |
1537 | { "AIF2DAC2L Mixer", "AIF1.2 Switch", "AIF1DAC2L" }, | 1686 | { "AIF2DAC2L Mixer", "AIF1.2 Switch", "AIF1DAC2L" }, |
1538 | { "AIF2DAC2L Mixer", "AIF1.1 Switch", "AIF1DAC1L" }, | 1687 | { "AIF2DAC2L Mixer", "AIF1.1 Switch", "AIF1DAC1L" }, |
@@ -1540,7 +1689,6 @@ static const struct snd_soc_dapm_route intercon[] = { | |||
1540 | { "AIF2DAC2L Mixer", "Right Sidetone Switch", "Right Sidetone" }, | 1689 | { "AIF2DAC2L Mixer", "Right Sidetone Switch", "Right Sidetone" }, |
1541 | 1690 | ||
1542 | { "AIF2ADCR", NULL, "AIF2DAC2R Mixer" }, | 1691 | { "AIF2ADCR", NULL, "AIF2DAC2R Mixer" }, |
1543 | { "DAC2R", NULL, "AIF2DAC2R Mixer" }, | ||
1544 | { "AIF2DAC2R Mixer", "AIF2 Switch", "AIF2DACR" }, | 1692 | { "AIF2DAC2R Mixer", "AIF2 Switch", "AIF2DACR" }, |
1545 | { "AIF2DAC2R Mixer", "AIF1.2 Switch", "AIF1DAC2R" }, | 1693 | { "AIF2DAC2R Mixer", "AIF1.2 Switch", "AIF1DAC2R" }, |
1546 | { "AIF2DAC2R Mixer", "AIF1.1 Switch", "AIF1DAC1R" }, | 1694 | { "AIF2DAC2R Mixer", "AIF1.1 Switch", "AIF1DAC1R" }, |
@@ -1584,6 +1732,24 @@ static const struct snd_soc_dapm_route intercon[] = { | |||
1584 | { "Right Headphone Mux", "DAC", "DAC1R" }, | 1732 | { "Right Headphone Mux", "DAC", "DAC1R" }, |
1585 | }; | 1733 | }; |
1586 | 1734 | ||
1735 | static const struct snd_soc_dapm_route wm8994_lateclk_revd_intercon[] = { | ||
1736 | { "DAC1L", NULL, "Late DAC1L Enable PGA" }, | ||
1737 | { "Late DAC1L Enable PGA", NULL, "DAC1L Mixer" }, | ||
1738 | { "DAC1R", NULL, "Late DAC1R Enable PGA" }, | ||
1739 | { "Late DAC1R Enable PGA", NULL, "DAC1R Mixer" }, | ||
1740 | { "DAC2L", NULL, "Late DAC2L Enable PGA" }, | ||
1741 | { "Late DAC2L Enable PGA", NULL, "AIF2DAC2L Mixer" }, | ||
1742 | { "DAC2R", NULL, "Late DAC2R Enable PGA" }, | ||
1743 | { "Late DAC2R Enable PGA", NULL, "AIF2DAC2R Mixer" } | ||
1744 | }; | ||
1745 | |||
1746 | static const struct snd_soc_dapm_route wm8994_lateclk_intercon[] = { | ||
1747 | { "DAC1L", NULL, "DAC1L Mixer" }, | ||
1748 | { "DAC1R", NULL, "DAC1R Mixer" }, | ||
1749 | { "DAC2L", NULL, "AIF2DAC2L Mixer" }, | ||
1750 | { "DAC2R", NULL, "AIF2DAC2R Mixer" }, | ||
1751 | }; | ||
1752 | |||
1587 | static const struct snd_soc_dapm_route wm8994_revd_intercon[] = { | 1753 | static const struct snd_soc_dapm_route wm8994_revd_intercon[] = { |
1588 | { "AIF1DACDAT", NULL, "AIF2DACDAT" }, | 1754 | { "AIF1DACDAT", NULL, "AIF2DACDAT" }, |
1589 | { "AIF2DACDAT", NULL, "AIF1DACDAT" }, | 1755 | { "AIF2DACDAT", NULL, "AIF1DACDAT" }, |
@@ -2514,6 +2680,22 @@ static int wm8994_resume(struct snd_soc_codec *codec) | |||
2514 | { | 2680 | { |
2515 | struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); | 2681 | struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); |
2516 | int i, ret; | 2682 | int i, ret; |
2683 | unsigned int val, mask; | ||
2684 | |||
2685 | if (wm8994->revision < 4) { | ||
2686 | /* force a HW read */ | ||
2687 | val = wm8994_reg_read(codec->control_data, | ||
2688 | WM8994_POWER_MANAGEMENT_5); | ||
2689 | |||
2690 | /* modify the cache only */ | ||
2691 | codec->cache_only = 1; | ||
2692 | mask = WM8994_DAC1R_ENA | WM8994_DAC1L_ENA | | ||
2693 | WM8994_DAC2R_ENA | WM8994_DAC2L_ENA; | ||
2694 | val &= mask; | ||
2695 | snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, | ||
2696 | mask, val); | ||
2697 | codec->cache_only = 0; | ||
2698 | } | ||
2517 | 2699 | ||
2518 | /* Restore the registers */ | 2700 | /* Restore the registers */ |
2519 | ret = snd_soc_cache_sync(codec); | 2701 | ret = snd_soc_cache_sync(codec); |
@@ -2847,11 +3029,10 @@ static void wm8958_default_micdet(u16 status, void *data) | |||
2847 | report |= SND_JACK_BTN_5; | 3029 | report |= SND_JACK_BTN_5; |
2848 | 3030 | ||
2849 | done: | 3031 | done: |
2850 | snd_soc_jack_report(wm8994->micdet[0].jack, | 3032 | snd_soc_jack_report(wm8994->micdet[0].jack, report, |
2851 | SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2 | | 3033 | SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2 | |
2852 | SND_JACK_BTN_3 | SND_JACK_BTN_4 | SND_JACK_BTN_5 | | 3034 | SND_JACK_BTN_3 | SND_JACK_BTN_4 | SND_JACK_BTN_5 | |
2853 | SND_JACK_MICROPHONE | SND_JACK_VIDEOOUT, | 3035 | SND_JACK_MICROPHONE | SND_JACK_VIDEOOUT); |
2854 | report); | ||
2855 | } | 3036 | } |
2856 | 3037 | ||
2857 | /** | 3038 | /** |
@@ -3125,10 +3306,31 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec) | |||
3125 | case WM8994: | 3306 | case WM8994: |
3126 | snd_soc_dapm_new_controls(dapm, wm8994_specific_dapm_widgets, | 3307 | snd_soc_dapm_new_controls(dapm, wm8994_specific_dapm_widgets, |
3127 | ARRAY_SIZE(wm8994_specific_dapm_widgets)); | 3308 | ARRAY_SIZE(wm8994_specific_dapm_widgets)); |
3309 | if (wm8994->revision < 4) { | ||
3310 | snd_soc_dapm_new_controls(dapm, wm8994_lateclk_revd_widgets, | ||
3311 | ARRAY_SIZE(wm8994_lateclk_revd_widgets)); | ||
3312 | snd_soc_dapm_new_controls(dapm, wm8994_adc_revd_widgets, | ||
3313 | ARRAY_SIZE(wm8994_adc_revd_widgets)); | ||
3314 | snd_soc_dapm_new_controls(dapm, wm8994_dac_revd_widgets, | ||
3315 | ARRAY_SIZE(wm8994_dac_revd_widgets)); | ||
3316 | } else { | ||
3317 | snd_soc_dapm_new_controls(dapm, wm8994_lateclk_widgets, | ||
3318 | ARRAY_SIZE(wm8994_lateclk_widgets)); | ||
3319 | snd_soc_dapm_new_controls(dapm, wm8994_adc_widgets, | ||
3320 | ARRAY_SIZE(wm8994_adc_widgets)); | ||
3321 | snd_soc_dapm_new_controls(dapm, wm8994_dac_widgets, | ||
3322 | ARRAY_SIZE(wm8994_dac_widgets)); | ||
3323 | } | ||
3128 | break; | 3324 | break; |
3129 | case WM8958: | 3325 | case WM8958: |
3130 | snd_soc_add_controls(codec, wm8958_snd_controls, | 3326 | snd_soc_add_controls(codec, wm8958_snd_controls, |
3131 | ARRAY_SIZE(wm8958_snd_controls)); | 3327 | ARRAY_SIZE(wm8958_snd_controls)); |
3328 | snd_soc_dapm_new_controls(dapm, wm8994_lateclk_widgets, | ||
3329 | ARRAY_SIZE(wm8994_lateclk_widgets)); | ||
3330 | snd_soc_dapm_new_controls(dapm, wm8994_adc_widgets, | ||
3331 | ARRAY_SIZE(wm8994_adc_widgets)); | ||
3332 | snd_soc_dapm_new_controls(dapm, wm8994_dac_widgets, | ||
3333 | ARRAY_SIZE(wm8994_dac_widgets)); | ||
3132 | snd_soc_dapm_new_controls(dapm, wm8958_dapm_widgets, | 3334 | snd_soc_dapm_new_controls(dapm, wm8958_dapm_widgets, |
3133 | ARRAY_SIZE(wm8958_dapm_widgets)); | 3335 | ARRAY_SIZE(wm8958_dapm_widgets)); |
3134 | break; | 3336 | break; |
@@ -3143,12 +3345,19 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec) | |||
3143 | snd_soc_dapm_add_routes(dapm, wm8994_intercon, | 3345 | snd_soc_dapm_add_routes(dapm, wm8994_intercon, |
3144 | ARRAY_SIZE(wm8994_intercon)); | 3346 | ARRAY_SIZE(wm8994_intercon)); |
3145 | 3347 | ||
3146 | if (wm8994->revision < 4) | 3348 | if (wm8994->revision < 4) { |
3147 | snd_soc_dapm_add_routes(dapm, wm8994_revd_intercon, | 3349 | snd_soc_dapm_add_routes(dapm, wm8994_revd_intercon, |
3148 | ARRAY_SIZE(wm8994_revd_intercon)); | 3350 | ARRAY_SIZE(wm8994_revd_intercon)); |
3149 | 3351 | snd_soc_dapm_add_routes(dapm, wm8994_lateclk_revd_intercon, | |
3352 | ARRAY_SIZE(wm8994_lateclk_revd_intercon)); | ||
3353 | } else { | ||
3354 | snd_soc_dapm_add_routes(dapm, wm8994_lateclk_intercon, | ||
3355 | ARRAY_SIZE(wm8994_lateclk_intercon)); | ||
3356 | } | ||
3150 | break; | 3357 | break; |
3151 | case WM8958: | 3358 | case WM8958: |
3359 | snd_soc_dapm_add_routes(dapm, wm8994_lateclk_intercon, | ||
3360 | ARRAY_SIZE(wm8994_lateclk_intercon)); | ||
3152 | snd_soc_dapm_add_routes(dapm, wm8958_intercon, | 3361 | snd_soc_dapm_add_routes(dapm, wm8958_intercon, |
3153 | ARRAY_SIZE(wm8958_intercon)); | 3362 | ARRAY_SIZE(wm8958_intercon)); |
3154 | break; | 3363 | break; |
diff --git a/sound/soc/codecs/wm9081.c b/sound/soc/codecs/wm9081.c index 43825b2102a5..cce704c275c6 100644 --- a/sound/soc/codecs/wm9081.c +++ b/sound/soc/codecs/wm9081.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/moduleparam.h> | 15 | #include <linux/moduleparam.h> |
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/delay.h> | 17 | #include <linux/delay.h> |
18 | #include <linux/device.h> | ||
18 | #include <linux/pm.h> | 19 | #include <linux/pm.h> |
19 | #include <linux/i2c.h> | 20 | #include <linux/i2c.h> |
20 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
@@ -1341,6 +1342,10 @@ static __devinit int wm9081_i2c_probe(struct i2c_client *i2c, | |||
1341 | wm9081->control_type = SND_SOC_I2C; | 1342 | wm9081->control_type = SND_SOC_I2C; |
1342 | wm9081->control_data = i2c; | 1343 | wm9081->control_data = i2c; |
1343 | 1344 | ||
1345 | if (dev_get_platdata(&i2c->dev)) | ||
1346 | memcpy(&wm9081->retune, dev_get_platdata(&i2c->dev), | ||
1347 | sizeof(wm9081->retune)); | ||
1348 | |||
1344 | ret = snd_soc_register_codec(&i2c->dev, | 1349 | ret = snd_soc_register_codec(&i2c->dev, |
1345 | &soc_codec_dev_wm9081, &wm9081_dai, 1); | 1350 | &soc_codec_dev_wm9081, &wm9081_dai, 1); |
1346 | if (ret < 0) | 1351 | if (ret < 0) |
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c index 613df5db0b32..516892706063 100644 --- a/sound/soc/codecs/wm_hubs.c +++ b/sound/soc/codecs/wm_hubs.c | |||
@@ -674,6 +674,9 @@ SND_SOC_DAPM_OUTPUT("LINEOUT2N"), | |||
674 | }; | 674 | }; |
675 | 675 | ||
676 | static const struct snd_soc_dapm_route analogue_routes[] = { | 676 | static const struct snd_soc_dapm_route analogue_routes[] = { |
677 | { "MICBIAS1", NULL, "CLK_SYS" }, | ||
678 | { "MICBIAS2", NULL, "CLK_SYS" }, | ||
679 | |||
677 | { "IN1L PGA", "IN1LP Switch", "IN1LP" }, | 680 | { "IN1L PGA", "IN1LP Switch", "IN1LP" }, |
678 | { "IN1L PGA", "IN1LN Switch", "IN1LN" }, | 681 | { "IN1L PGA", "IN1LN Switch", "IN1LN" }, |
679 | 682 | ||
diff --git a/sound/soc/imx/eukrea-tlv320.c b/sound/soc/imx/eukrea-tlv320.c index e20c9e1457c0..1e9bccae4e80 100644 --- a/sound/soc/imx/eukrea-tlv320.c +++ b/sound/soc/imx/eukrea-tlv320.c | |||
@@ -79,7 +79,7 @@ static struct snd_soc_dai_link eukrea_tlv320_dai = { | |||
79 | .name = "tlv320aic23", | 79 | .name = "tlv320aic23", |
80 | .stream_name = "TLV320AIC23", | 80 | .stream_name = "TLV320AIC23", |
81 | .codec_dai_name = "tlv320aic23-hifi", | 81 | .codec_dai_name = "tlv320aic23-hifi", |
82 | .platform_name = "imx-pcm-audio.0", | 82 | .platform_name = "imx-fiq-pcm-audio.0", |
83 | .codec_name = "tlv320aic23-codec.0-001a", | 83 | .codec_name = "tlv320aic23-codec.0-001a", |
84 | .cpu_dai_name = "imx-ssi.0", | 84 | .cpu_dai_name = "imx-ssi.0", |
85 | .ops = &eukrea_tlv320_snd_ops, | 85 | .ops = &eukrea_tlv320_snd_ops, |
diff --git a/sound/soc/omap/am3517evm.c b/sound/soc/omap/am3517evm.c index 161750443ebc..73dde4a1adc3 100644 --- a/sound/soc/omap/am3517evm.c +++ b/sound/soc/omap/am3517evm.c | |||
@@ -139,7 +139,7 @@ static struct snd_soc_dai_link am3517evm_dai = { | |||
139 | .cpu_dai_name ="omap-mcbsp-dai.0", | 139 | .cpu_dai_name ="omap-mcbsp-dai.0", |
140 | .codec_dai_name = "tlv320aic23-hifi", | 140 | .codec_dai_name = "tlv320aic23-hifi", |
141 | .platform_name = "omap-pcm-audio", | 141 | .platform_name = "omap-pcm-audio", |
142 | .codec_name = "tlv320aic23-codec", | 142 | .codec_name = "tlv320aic23-codec.2-001a", |
143 | .init = am3517evm_aic23_init, | 143 | .init = am3517evm_aic23_init, |
144 | .ops = &am3517evm_ops, | 144 | .ops = &am3517evm_ops, |
145 | }; | 145 | }; |
diff --git a/sound/soc/pxa/e740_wm9705.c b/sound/soc/pxa/e740_wm9705.c index 28333e7d9c50..dc65650a6fa1 100644 --- a/sound/soc/pxa/e740_wm9705.c +++ b/sound/soc/pxa/e740_wm9705.c | |||
@@ -117,7 +117,7 @@ static struct snd_soc_dai_link e740_dai[] = { | |||
117 | { | 117 | { |
118 | .name = "AC97", | 118 | .name = "AC97", |
119 | .stream_name = "AC97 HiFi", | 119 | .stream_name = "AC97 HiFi", |
120 | .cpu_dai_name = "pxa-ac97.0", | 120 | .cpu_dai_name = "pxa2xx-ac97", |
121 | .codec_dai_name = "wm9705-hifi", | 121 | .codec_dai_name = "wm9705-hifi", |
122 | .platform_name = "pxa-pcm-audio", | 122 | .platform_name = "pxa-pcm-audio", |
123 | .codec_name = "wm9705-codec", | 123 | .codec_name = "wm9705-codec", |
@@ -126,7 +126,7 @@ static struct snd_soc_dai_link e740_dai[] = { | |||
126 | { | 126 | { |
127 | .name = "AC97 Aux", | 127 | .name = "AC97 Aux", |
128 | .stream_name = "AC97 Aux", | 128 | .stream_name = "AC97 Aux", |
129 | .cpu_dai_name = "pxa-ac97.1", | 129 | .cpu_dai_name = "pxa2xx-ac97-aux", |
130 | .codec_dai_name = "wm9705-aux", | 130 | .codec_dai_name = "wm9705-aux", |
131 | .platform_name = "pxa-pcm-audio", | 131 | .platform_name = "pxa-pcm-audio", |
132 | .codec_name = "wm9705-codec", | 132 | .codec_name = "wm9705-codec", |
diff --git a/sound/soc/pxa/e750_wm9705.c b/sound/soc/pxa/e750_wm9705.c index 01bf31675c55..51897fcd911b 100644 --- a/sound/soc/pxa/e750_wm9705.c +++ b/sound/soc/pxa/e750_wm9705.c | |||
@@ -99,7 +99,7 @@ static struct snd_soc_dai_link e750_dai[] = { | |||
99 | { | 99 | { |
100 | .name = "AC97", | 100 | .name = "AC97", |
101 | .stream_name = "AC97 HiFi", | 101 | .stream_name = "AC97 HiFi", |
102 | .cpu_dai_name = "pxa-ac97.0", | 102 | .cpu_dai_name = "pxa2xx-ac97", |
103 | .codec_dai_name = "wm9705-hifi", | 103 | .codec_dai_name = "wm9705-hifi", |
104 | .platform_name = "pxa-pcm-audio", | 104 | .platform_name = "pxa-pcm-audio", |
105 | .codec_name = "wm9705-codec", | 105 | .codec_name = "wm9705-codec", |
@@ -109,7 +109,7 @@ static struct snd_soc_dai_link e750_dai[] = { | |||
109 | { | 109 | { |
110 | .name = "AC97 Aux", | 110 | .name = "AC97 Aux", |
111 | .stream_name = "AC97 Aux", | 111 | .stream_name = "AC97 Aux", |
112 | .cpu_dai_name = "pxa-ac97.1", | 112 | .cpu_dai_name = "pxa2xx-ac97-aux", |
113 | .codec_dai_name ="wm9705-aux", | 113 | .codec_dai_name ="wm9705-aux", |
114 | .platform_name = "pxa-pcm-audio", | 114 | .platform_name = "pxa-pcm-audio", |
115 | .codec_name = "wm9705-codec", | 115 | .codec_name = "wm9705-codec", |
diff --git a/sound/soc/pxa/e800_wm9712.c b/sound/soc/pxa/e800_wm9712.c index c6a37c6ef23b..053ed208e59f 100644 --- a/sound/soc/pxa/e800_wm9712.c +++ b/sound/soc/pxa/e800_wm9712.c | |||
@@ -89,7 +89,7 @@ static struct snd_soc_dai_link e800_dai[] = { | |||
89 | { | 89 | { |
90 | .name = "AC97", | 90 | .name = "AC97", |
91 | .stream_name = "AC97 HiFi", | 91 | .stream_name = "AC97 HiFi", |
92 | .cpu_dai_name = "pxa-ac97.0", | 92 | .cpu_dai_name = "pxa2xx-ac97", |
93 | .codec_dai_name = "wm9712-hifi", | 93 | .codec_dai_name = "wm9712-hifi", |
94 | .platform_name = "pxa-pcm-audio", | 94 | .platform_name = "pxa-pcm-audio", |
95 | .codec_name = "wm9712-codec", | 95 | .codec_name = "wm9712-codec", |
@@ -98,7 +98,7 @@ static struct snd_soc_dai_link e800_dai[] = { | |||
98 | { | 98 | { |
99 | .name = "AC97 Aux", | 99 | .name = "AC97 Aux", |
100 | .stream_name = "AC97 Aux", | 100 | .stream_name = "AC97 Aux", |
101 | .cpu_dai_name = "pxa-ac97.1", | 101 | .cpu_dai_name = "pxa2xx-ac97-aux", |
102 | .codec_dai_name ="wm9712-aux", | 102 | .codec_dai_name ="wm9712-aux", |
103 | .platform_name = "pxa-pcm-audio", | 103 | .platform_name = "pxa-pcm-audio", |
104 | .codec_name = "wm9712-codec", | 104 | .codec_name = "wm9712-codec", |
diff --git a/sound/soc/pxa/em-x270.c b/sound/soc/pxa/em-x270.c index fc22e6eefc98..b13a4252812d 100644 --- a/sound/soc/pxa/em-x270.c +++ b/sound/soc/pxa/em-x270.c | |||
@@ -37,7 +37,7 @@ static struct snd_soc_dai_link em_x270_dai[] = { | |||
37 | { | 37 | { |
38 | .name = "AC97", | 38 | .name = "AC97", |
39 | .stream_name = "AC97 HiFi", | 39 | .stream_name = "AC97 HiFi", |
40 | .cpu_dai_name = "pxa-ac97.0", | 40 | .cpu_dai_name = "pxa2xx-ac97", |
41 | .codec_dai_name = "wm9712-hifi", | 41 | .codec_dai_name = "wm9712-hifi", |
42 | .platform_name = "pxa-pcm-audio", | 42 | .platform_name = "pxa-pcm-audio", |
43 | .codec_name = "wm9712-codec", | 43 | .codec_name = "wm9712-codec", |
@@ -45,7 +45,7 @@ static struct snd_soc_dai_link em_x270_dai[] = { | |||
45 | { | 45 | { |
46 | .name = "AC97 Aux", | 46 | .name = "AC97 Aux", |
47 | .stream_name = "AC97 Aux", | 47 | .stream_name = "AC97 Aux", |
48 | .cpu_dai_name = "pxa-ac97.1", | 48 | .cpu_dai_name = "pxa2xx-ac97-aux", |
49 | .codec_dai_name ="wm9712-aux", | 49 | .codec_dai_name ="wm9712-aux", |
50 | .platform_name = "pxa-pcm-audio", | 50 | .platform_name = "pxa-pcm-audio", |
51 | .codec_name = "wm9712-codec", | 51 | .codec_name = "wm9712-codec", |
diff --git a/sound/soc/pxa/mioa701_wm9713.c b/sound/soc/pxa/mioa701_wm9713.c index 0d70fc8c12bd..38ca6759907e 100644 --- a/sound/soc/pxa/mioa701_wm9713.c +++ b/sound/soc/pxa/mioa701_wm9713.c | |||
@@ -162,7 +162,7 @@ static struct snd_soc_dai_link mioa701_dai[] = { | |||
162 | { | 162 | { |
163 | .name = "AC97", | 163 | .name = "AC97", |
164 | .stream_name = "AC97 HiFi", | 164 | .stream_name = "AC97 HiFi", |
165 | .cpu_dai_name = "pxa-ac97.0", | 165 | .cpu_dai_name = "pxa2xx-ac97", |
166 | .codec_dai_name = "wm9713-hifi", | 166 | .codec_dai_name = "wm9713-hifi", |
167 | .codec_name = "wm9713-codec", | 167 | .codec_name = "wm9713-codec", |
168 | .init = mioa701_wm9713_init, | 168 | .init = mioa701_wm9713_init, |
@@ -172,7 +172,7 @@ static struct snd_soc_dai_link mioa701_dai[] = { | |||
172 | { | 172 | { |
173 | .name = "AC97 Aux", | 173 | .name = "AC97 Aux", |
174 | .stream_name = "AC97 Aux", | 174 | .stream_name = "AC97 Aux", |
175 | .cpu_dai_name = "pxa-ac97.1", | 175 | .cpu_dai_name = "pxa2xx-ac97-aux", |
176 | .codec_dai_name ="wm9713-aux", | 176 | .codec_dai_name ="wm9713-aux", |
177 | .codec_name = "wm9713-codec", | 177 | .codec_name = "wm9713-codec", |
178 | .platform_name = "pxa-pcm-audio", | 178 | .platform_name = "pxa-pcm-audio", |
diff --git a/sound/soc/pxa/palm27x.c b/sound/soc/pxa/palm27x.c index 857db96d4a4f..504e4004f004 100644 --- a/sound/soc/pxa/palm27x.c +++ b/sound/soc/pxa/palm27x.c | |||
@@ -132,7 +132,7 @@ static struct snd_soc_dai_link palm27x_dai[] = { | |||
132 | { | 132 | { |
133 | .name = "AC97 HiFi", | 133 | .name = "AC97 HiFi", |
134 | .stream_name = "AC97 HiFi", | 134 | .stream_name = "AC97 HiFi", |
135 | .cpu_dai_name = "pxa-ac97.0", | 135 | .cpu_dai_name = "pxa2xx-ac97", |
136 | .codec_dai_name = "wm9712-hifi", | 136 | .codec_dai_name = "wm9712-hifi", |
137 | .codec_name = "wm9712-codec", | 137 | .codec_name = "wm9712-codec", |
138 | .platform_name = "pxa-pcm-audio", | 138 | .platform_name = "pxa-pcm-audio", |
@@ -141,7 +141,7 @@ static struct snd_soc_dai_link palm27x_dai[] = { | |||
141 | { | 141 | { |
142 | .name = "AC97 Aux", | 142 | .name = "AC97 Aux", |
143 | .stream_name = "AC97 Aux", | 143 | .stream_name = "AC97 Aux", |
144 | .cpu_dai_name = "pxa-ac97.1", | 144 | .cpu_dai_name = "pxa2xx-ac97-aux", |
145 | .codec_dai_name = "wm9712-aux", | 145 | .codec_dai_name = "wm9712-aux", |
146 | .codec_name = "wm9712-codec", | 146 | .codec_name = "wm9712-codec", |
147 | .platform_name = "pxa-pcm-audio", | 147 | .platform_name = "pxa-pcm-audio", |
diff --git a/sound/soc/pxa/tosa.c b/sound/soc/pxa/tosa.c index f75804ef0897..4b6e5d608b42 100644 --- a/sound/soc/pxa/tosa.c +++ b/sound/soc/pxa/tosa.c | |||
@@ -219,7 +219,7 @@ static struct snd_soc_dai_link tosa_dai[] = { | |||
219 | { | 219 | { |
220 | .name = "AC97", | 220 | .name = "AC97", |
221 | .stream_name = "AC97 HiFi", | 221 | .stream_name = "AC97 HiFi", |
222 | .cpu_dai_name = "pxa-ac97.0", | 222 | .cpu_dai_name = "pxa2xx-ac97", |
223 | .codec_dai_name = "wm9712-hifi", | 223 | .codec_dai_name = "wm9712-hifi", |
224 | .platform_name = "pxa-pcm-audio", | 224 | .platform_name = "pxa-pcm-audio", |
225 | .codec_name = "wm9712-codec", | 225 | .codec_name = "wm9712-codec", |
@@ -229,7 +229,7 @@ static struct snd_soc_dai_link tosa_dai[] = { | |||
229 | { | 229 | { |
230 | .name = "AC97 Aux", | 230 | .name = "AC97 Aux", |
231 | .stream_name = "AC97 Aux", | 231 | .stream_name = "AC97 Aux", |
232 | .cpu_dai_name = "pxa-ac97.1", | 232 | .cpu_dai_name = "pxa2xx-ac97-aux", |
233 | .codec_dai_name = "wm9712-aux", | 233 | .codec_dai_name = "wm9712-aux", |
234 | .platform_name = "pxa-pcm-audio", | 234 | .platform_name = "pxa-pcm-audio", |
235 | .codec_name = "wm9712-codec", | 235 | .codec_name = "wm9712-codec", |
diff --git a/sound/soc/pxa/zylonite.c b/sound/soc/pxa/zylonite.c index b222a7d72027..25bba108fea3 100644 --- a/sound/soc/pxa/zylonite.c +++ b/sound/soc/pxa/zylonite.c | |||
@@ -166,7 +166,7 @@ static struct snd_soc_dai_link zylonite_dai[] = { | |||
166 | .stream_name = "AC97 HiFi", | 166 | .stream_name = "AC97 HiFi", |
167 | .codec_name = "wm9713-codec", | 167 | .codec_name = "wm9713-codec", |
168 | .platform_name = "pxa-pcm-audio", | 168 | .platform_name = "pxa-pcm-audio", |
169 | .cpu_dai_name = "pxa-ac97.0", | 169 | .cpu_dai_name = "pxa2xx-ac97", |
170 | .codec_name = "wm9713-hifi", | 170 | .codec_name = "wm9713-hifi", |
171 | .init = zylonite_wm9713_init, | 171 | .init = zylonite_wm9713_init, |
172 | }, | 172 | }, |
@@ -175,7 +175,7 @@ static struct snd_soc_dai_link zylonite_dai[] = { | |||
175 | .stream_name = "AC97 Aux", | 175 | .stream_name = "AC97 Aux", |
176 | .codec_name = "wm9713-codec", | 176 | .codec_name = "wm9713-codec", |
177 | .platform_name = "pxa-pcm-audio", | 177 | .platform_name = "pxa-pcm-audio", |
178 | .cpu_dai_name = "pxa-ac97.1", | 178 | .cpu_dai_name = "pxa2xx-ac97-aux", |
179 | .codec_name = "wm9713-aux", | 179 | .codec_name = "wm9713-aux", |
180 | }, | 180 | }, |
181 | { | 181 | { |
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 8194f150bab7..1790f83ee665 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c | |||
@@ -712,7 +712,15 @@ static int dapm_supply_check_power(struct snd_soc_dapm_widget *w) | |||
712 | !path->connected(path->source, path->sink)) | 712 | !path->connected(path->source, path->sink)) |
713 | continue; | 713 | continue; |
714 | 714 | ||
715 | if (path->sink && path->sink->power_check && | 715 | if (!path->sink) |
716 | continue; | ||
717 | |||
718 | if (path->sink->force) { | ||
719 | power = 1; | ||
720 | break; | ||
721 | } | ||
722 | |||
723 | if (path->sink->power_check && | ||
716 | path->sink->power_check(path->sink)) { | 724 | path->sink->power_check(path->sink)) { |
717 | power = 1; | 725 | power = 1; |
718 | break; | 726 | break; |
@@ -933,7 +941,7 @@ static void dapm_seq_run(struct snd_soc_dapm_context *dapm, | |||
933 | } | 941 | } |
934 | 942 | ||
935 | if (!list_empty(&pending)) | 943 | if (!list_empty(&pending)) |
936 | dapm_seq_run_coalesced(dapm, &pending); | 944 | dapm_seq_run_coalesced(cur_dapm, &pending); |
937 | } | 945 | } |
938 | 946 | ||
939 | static void dapm_widget_update(struct snd_soc_dapm_context *dapm) | 947 | static void dapm_widget_update(struct snd_soc_dapm_context *dapm) |
@@ -1627,6 +1635,7 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_add_routes); | |||
1627 | int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm) | 1635 | int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm) |
1628 | { | 1636 | { |
1629 | struct snd_soc_dapm_widget *w; | 1637 | struct snd_soc_dapm_widget *w; |
1638 | unsigned int val; | ||
1630 | 1639 | ||
1631 | list_for_each_entry(w, &dapm->card->widgets, list) | 1640 | list_for_each_entry(w, &dapm->card->widgets, list) |
1632 | { | 1641 | { |
@@ -1675,6 +1684,18 @@ int snd_soc_dapm_new_widgets(struct snd_soc_dapm_context *dapm) | |||
1675 | case snd_soc_dapm_post: | 1684 | case snd_soc_dapm_post: |
1676 | break; | 1685 | break; |
1677 | } | 1686 | } |
1687 | |||
1688 | /* Read the initial power state from the device */ | ||
1689 | if (w->reg >= 0) { | ||
1690 | val = snd_soc_read(w->codec, w->reg); | ||
1691 | val &= 1 << w->shift; | ||
1692 | if (w->invert) | ||
1693 | val = !val; | ||
1694 | |||
1695 | if (val) | ||
1696 | w->power = 1; | ||
1697 | } | ||
1698 | |||
1678 | w->new = 1; | 1699 | w->new = 1; |
1679 | } | 1700 | } |
1680 | 1701 | ||
diff --git a/sound/usb/card.c b/sound/usb/card.c index 800f7cb4f251..c0f8270bc199 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c | |||
@@ -323,6 +323,7 @@ static int snd_usb_audio_create(struct usb_device *dev, int idx, | |||
323 | return -ENOMEM; | 323 | return -ENOMEM; |
324 | } | 324 | } |
325 | 325 | ||
326 | mutex_init(&chip->shutdown_mutex); | ||
326 | chip->index = idx; | 327 | chip->index = idx; |
327 | chip->dev = dev; | 328 | chip->dev = dev; |
328 | chip->card = card; | 329 | chip->card = card; |
@@ -531,6 +532,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, void *ptr) | |||
531 | chip = ptr; | 532 | chip = ptr; |
532 | card = chip->card; | 533 | card = chip->card; |
533 | mutex_lock(®ister_mutex); | 534 | mutex_lock(®ister_mutex); |
535 | mutex_lock(&chip->shutdown_mutex); | ||
534 | chip->shutdown = 1; | 536 | chip->shutdown = 1; |
535 | chip->num_interfaces--; | 537 | chip->num_interfaces--; |
536 | if (chip->num_interfaces <= 0) { | 538 | if (chip->num_interfaces <= 0) { |
@@ -548,9 +550,11 @@ static void snd_usb_audio_disconnect(struct usb_device *dev, void *ptr) | |||
548 | snd_usb_mixer_disconnect(p); | 550 | snd_usb_mixer_disconnect(p); |
549 | } | 551 | } |
550 | usb_chip[chip->index] = NULL; | 552 | usb_chip[chip->index] = NULL; |
553 | mutex_unlock(&chip->shutdown_mutex); | ||
551 | mutex_unlock(®ister_mutex); | 554 | mutex_unlock(®ister_mutex); |
552 | snd_card_free_when_closed(card); | 555 | snd_card_free_when_closed(card); |
553 | } else { | 556 | } else { |
557 | mutex_unlock(&chip->shutdown_mutex); | ||
554 | mutex_unlock(®ister_mutex); | 558 | mutex_unlock(®ister_mutex); |
555 | } | 559 | } |
556 | } | 560 | } |
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c index 4132522ac90f..e3f680526cb5 100644 --- a/sound/usb/pcm.c +++ b/sound/usb/pcm.c | |||
@@ -361,6 +361,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream, | |||
361 | } | 361 | } |
362 | 362 | ||
363 | if (changed) { | 363 | if (changed) { |
364 | mutex_lock(&subs->stream->chip->shutdown_mutex); | ||
364 | /* format changed */ | 365 | /* format changed */ |
365 | snd_usb_release_substream_urbs(subs, 0); | 366 | snd_usb_release_substream_urbs(subs, 0); |
366 | /* influenced: period_bytes, channels, rate, format, */ | 367 | /* influenced: period_bytes, channels, rate, format, */ |
@@ -368,6 +369,7 @@ static int snd_usb_hw_params(struct snd_pcm_substream *substream, | |||
368 | params_rate(hw_params), | 369 | params_rate(hw_params), |
369 | snd_pcm_format_physical_width(params_format(hw_params)) * | 370 | snd_pcm_format_physical_width(params_format(hw_params)) * |
370 | params_channels(hw_params)); | 371 | params_channels(hw_params)); |
372 | mutex_unlock(&subs->stream->chip->shutdown_mutex); | ||
371 | } | 373 | } |
372 | 374 | ||
373 | return ret; | 375 | return ret; |
@@ -385,8 +387,9 @@ static int snd_usb_hw_free(struct snd_pcm_substream *substream) | |||
385 | subs->cur_audiofmt = NULL; | 387 | subs->cur_audiofmt = NULL; |
386 | subs->cur_rate = 0; | 388 | subs->cur_rate = 0; |
387 | subs->period_bytes = 0; | 389 | subs->period_bytes = 0; |
388 | if (!subs->stream->chip->shutdown) | 390 | mutex_lock(&subs->stream->chip->shutdown_mutex); |
389 | snd_usb_release_substream_urbs(subs, 0); | 391 | snd_usb_release_substream_urbs(subs, 0); |
392 | mutex_unlock(&subs->stream->chip->shutdown_mutex); | ||
390 | return snd_pcm_lib_free_vmalloc_buffer(substream); | 393 | return snd_pcm_lib_free_vmalloc_buffer(substream); |
391 | } | 394 | } |
392 | 395 | ||
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h index db3eb21627ee..6e66fffe87f5 100644 --- a/sound/usb/usbaudio.h +++ b/sound/usb/usbaudio.h | |||
@@ -36,6 +36,7 @@ struct snd_usb_audio { | |||
36 | struct snd_card *card; | 36 | struct snd_card *card; |
37 | u32 usb_id; | 37 | u32 usb_id; |
38 | int shutdown; | 38 | int shutdown; |
39 | struct mutex shutdown_mutex; | ||
39 | unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */ | 40 | unsigned int txfr_quirk:1; /* Subframe boundaries on transfers */ |
40 | int num_interfaces; | 41 | int num_interfaces; |
41 | int num_suspended_intf; | 42 | int num_suspended_intf; |
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c index 746cf03cb05d..0ace786e83e0 100644 --- a/tools/perf/builtin-timechart.c +++ b/tools/perf/builtin-timechart.c | |||
@@ -264,9 +264,6 @@ pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end) | |||
264 | c->start_time = start; | 264 | c->start_time = start; |
265 | if (p->start_time == 0 || p->start_time > start) | 265 | if (p->start_time == 0 || p->start_time > start) |
266 | p->start_time = start; | 266 | p->start_time = start; |
267 | |||
268 | if (cpu > numcpus) | ||
269 | numcpus = cpu; | ||
270 | } | 267 | } |
271 | 268 | ||
272 | #define MAX_CPUS 4096 | 269 | #define MAX_CPUS 4096 |
@@ -511,6 +508,9 @@ static int process_sample_event(event_t *event __used, | |||
511 | if (!event_str) | 508 | if (!event_str) |
512 | return 0; | 509 | return 0; |
513 | 510 | ||
511 | if (sample->cpu > numcpus) | ||
512 | numcpus = sample->cpu; | ||
513 | |||
514 | if (strcmp(event_str, "power:cpu_idle") == 0) { | 514 | if (strcmp(event_str, "power:cpu_idle") == 0) { |
515 | struct power_processor_entry *ppe = (void *)te; | 515 | struct power_processor_entry *ppe = (void *)te; |
516 | if (ppe->state == (u32)PWR_EVENT_EXIT) | 516 | if (ppe->state == (u32)PWR_EVENT_EXIT) |
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index f6a929e74981..0866bcdb5e8e 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c | |||
@@ -270,11 +270,15 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, | |||
270 | const char *name, bool is_kallsyms) | 270 | const char *name, bool is_kallsyms) |
271 | { | 271 | { |
272 | const size_t size = PATH_MAX; | 272 | const size_t size = PATH_MAX; |
273 | char *realname = realpath(name, NULL), | 273 | char *realname, *filename = malloc(size), |
274 | *filename = malloc(size), | ||
275 | *linkname = malloc(size), *targetname; | 274 | *linkname = malloc(size), *targetname; |
276 | int len, err = -1; | 275 | int len, err = -1; |
277 | 276 | ||
277 | if (is_kallsyms) | ||
278 | realname = (char *)name; | ||
279 | else | ||
280 | realname = realpath(name, NULL); | ||
281 | |||
278 | if (realname == NULL || filename == NULL || linkname == NULL) | 282 | if (realname == NULL || filename == NULL || linkname == NULL) |
279 | goto out_free; | 283 | goto out_free; |
280 | 284 | ||
@@ -306,7 +310,8 @@ int build_id_cache__add_s(const char *sbuild_id, const char *debugdir, | |||
306 | if (symlink(targetname, linkname) == 0) | 310 | if (symlink(targetname, linkname) == 0) |
307 | err = 0; | 311 | err = 0; |
308 | out_free: | 312 | out_free: |
309 | free(realname); | 313 | if (!is_kallsyms) |
314 | free(realname); | ||
310 | free(filename); | 315 | free(filename); |
311 | free(linkname); | 316 | free(linkname); |
312 | return err; | 317 | return err; |
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 32f4f1f2f6e4..df51560f16f7 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c | |||
@@ -585,6 +585,7 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size, | |||
585 | { | 585 | { |
586 | struct sort_entry *se; | 586 | struct sort_entry *se; |
587 | u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us; | 587 | u64 period, total, period_sys, period_us, period_guest_sys, period_guest_us; |
588 | u64 nr_events; | ||
588 | const char *sep = symbol_conf.field_sep; | 589 | const char *sep = symbol_conf.field_sep; |
589 | int ret; | 590 | int ret; |
590 | 591 | ||
@@ -593,6 +594,7 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size, | |||
593 | 594 | ||
594 | if (pair_hists) { | 595 | if (pair_hists) { |
595 | period = self->pair ? self->pair->period : 0; | 596 | period = self->pair ? self->pair->period : 0; |
597 | nr_events = self->pair ? self->pair->nr_events : 0; | ||
596 | total = pair_hists->stats.total_period; | 598 | total = pair_hists->stats.total_period; |
597 | period_sys = self->pair ? self->pair->period_sys : 0; | 599 | period_sys = self->pair ? self->pair->period_sys : 0; |
598 | period_us = self->pair ? self->pair->period_us : 0; | 600 | period_us = self->pair ? self->pair->period_us : 0; |
@@ -600,6 +602,7 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size, | |||
600 | period_guest_us = self->pair ? self->pair->period_guest_us : 0; | 602 | period_guest_us = self->pair ? self->pair->period_guest_us : 0; |
601 | } else { | 603 | } else { |
602 | period = self->period; | 604 | period = self->period; |
605 | nr_events = self->nr_events; | ||
603 | total = session_total; | 606 | total = session_total; |
604 | period_sys = self->period_sys; | 607 | period_sys = self->period_sys; |
605 | period_us = self->period_us; | 608 | period_us = self->period_us; |
@@ -640,9 +643,9 @@ int hist_entry__snprintf(struct hist_entry *self, char *s, size_t size, | |||
640 | 643 | ||
641 | if (symbol_conf.show_nr_samples) { | 644 | if (symbol_conf.show_nr_samples) { |
642 | if (sep) | 645 | if (sep) |
643 | ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, period); | 646 | ret += snprintf(s + ret, size - ret, "%c%" PRIu64, *sep, nr_events); |
644 | else | 647 | else |
645 | ret += snprintf(s + ret, size - ret, "%11" PRIu64, period); | 648 | ret += snprintf(s + ret, size - ret, "%11" PRIu64, nr_events); |
646 | } | 649 | } |
647 | 650 | ||
648 | if (pair_hists) { | 651 | if (pair_hists) { |
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c index fb737fe9be91..96c866045d60 100644 --- a/tools/perf/util/svghelper.c +++ b/tools/perf/util/svghelper.c | |||
@@ -456,9 +456,9 @@ void svg_legenda(void) | |||
456 | return; | 456 | return; |
457 | 457 | ||
458 | svg_legenda_box(0, "Running", "sample"); | 458 | svg_legenda_box(0, "Running", "sample"); |
459 | svg_legenda_box(100, "Idle","rect.c1"); | 459 | svg_legenda_box(100, "Idle","c1"); |
460 | svg_legenda_box(200, "Deeper Idle", "rect.c3"); | 460 | svg_legenda_box(200, "Deeper Idle", "c3"); |
461 | svg_legenda_box(350, "Deepest Idle", "rect.c6"); | 461 | svg_legenda_box(350, "Deepest Idle", "c6"); |
462 | svg_legenda_box(550, "Sleeping", "process2"); | 462 | svg_legenda_box(550, "Sleeping", "process2"); |
463 | svg_legenda_box(650, "Waiting for cpu", "waiting"); | 463 | svg_legenda_box(650, "Waiting for cpu", "waiting"); |
464 | svg_legenda_box(800, "Blocked on IO", "blocked"); | 464 | svg_legenda_box(800, "Blocked on IO", "blocked"); |
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 7821d0e6866f..b1bf490aff88 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c | |||
@@ -1836,7 +1836,7 @@ int dso__load_vmlinux(struct dso *self, struct map *map, | |||
1836 | int err = -1, fd; | 1836 | int err = -1, fd; |
1837 | char symfs_vmlinux[PATH_MAX]; | 1837 | char symfs_vmlinux[PATH_MAX]; |
1838 | 1838 | ||
1839 | snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s/%s", | 1839 | snprintf(symfs_vmlinux, sizeof(symfs_vmlinux), "%s%s", |
1840 | symbol_conf.symfs, vmlinux); | 1840 | symbol_conf.symfs, vmlinux); |
1841 | fd = open(symfs_vmlinux, O_RDONLY); | 1841 | fd = open(symfs_vmlinux, O_RDONLY); |
1842 | if (fd < 0) | 1842 | if (fd < 0) |