diff options
185 files changed, 1401 insertions, 862 deletions
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX index fe5c099b8fc8..4edd78dfb362 100644 --- a/Documentation/networking/00-INDEX +++ b/Documentation/networking/00-INDEX | |||
| @@ -40,8 +40,6 @@ decnet.txt | |||
| 40 | - info on using the DECnet networking layer in Linux. | 40 | - info on using the DECnet networking layer in Linux. |
| 41 | depca.txt | 41 | depca.txt |
| 42 | - the Digital DEPCA/EtherWORKS DE1?? and DE2?? LANCE Ethernet driver | 42 | - the Digital DEPCA/EtherWORKS DE1?? and DE2?? LANCE Ethernet driver |
| 43 | dgrs.txt | ||
| 44 | - the Digi International RightSwitch SE-X Ethernet driver | ||
| 45 | dmfe.txt | 43 | dmfe.txt |
| 46 | - info on the Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver. | 44 | - info on the Davicom DM9102(A)/DM9132/DM9801 fast ethernet driver. |
| 47 | e100.txt | 45 | e100.txt |
| @@ -50,8 +48,6 @@ e1000.txt | |||
| 50 | - info on Intel's E1000 line of gigabit ethernet boards | 48 | - info on Intel's E1000 line of gigabit ethernet boards |
| 51 | eql.txt | 49 | eql.txt |
| 52 | - serial IP load balancing | 50 | - serial IP load balancing |
| 53 | ethertap.txt | ||
| 54 | - the Ethertap user space packet reception and transmission driver | ||
| 55 | ewrk3.txt | 51 | ewrk3.txt |
| 56 | - the Digital EtherWORKS 3 DE203/4/5 Ethernet driver | 52 | - the Digital EtherWORKS 3 DE203/4/5 Ethernet driver |
| 57 | filter.txt | 53 | filter.txt |
| @@ -104,8 +100,6 @@ tuntap.txt | |||
| 104 | - TUN/TAP device driver, allowing user space Rx/Tx of packets. | 100 | - TUN/TAP device driver, allowing user space Rx/Tx of packets. |
| 105 | vortex.txt | 101 | vortex.txt |
| 106 | - info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards. | 102 | - info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards. |
| 107 | wavelan.txt | ||
| 108 | - AT&T GIS (nee NCR) WaveLAN card: An Ethernet-like radio transceiver | ||
| 109 | x25.txt | 103 | x25.txt |
| 110 | - general info on X.25 development. | 104 | - general info on X.25 development. |
| 111 | x25-iface.txt | 105 | x25-iface.txt |
diff --git a/Documentation/networking/dns_resolver.txt b/Documentation/networking/dns_resolver.txt index aefd1e681804..04ca06325b08 100644 --- a/Documentation/networking/dns_resolver.txt +++ b/Documentation/networking/dns_resolver.txt | |||
| @@ -61,7 +61,6 @@ before the more general line given above as the first match is the one taken. | |||
| 61 | create dns_resolver foo:* * /usr/sbin/dns.foo %k | 61 | create dns_resolver foo:* * /usr/sbin/dns.foo %k |
| 62 | 62 | ||
| 63 | 63 | ||
| 64 | |||
| 65 | ===== | 64 | ===== |
| 66 | USAGE | 65 | USAGE |
| 67 | ===== | 66 | ===== |
| @@ -104,6 +103,14 @@ implemented in the module can be called after doing: | |||
| 104 | returned also. | 103 | returned also. |
| 105 | 104 | ||
| 106 | 105 | ||
| 106 | =============================== | ||
| 107 | READING DNS KEYS FROM USERSPACE | ||
| 108 | =============================== | ||
| 109 | |||
| 110 | Keys of dns_resolver type can be read from userspace using keyctl_read() or | ||
| 111 | "keyctl read/print/pipe". | ||
| 112 | |||
| 113 | |||
| 107 | ========= | 114 | ========= |
| 108 | MECHANISM | 115 | MECHANISM |
| 109 | ========= | 116 | ========= |
diff --git a/MAINTAINERS b/MAINTAINERS index 8afba6321e24..560ecce38ff5 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -1010,6 +1010,15 @@ L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers) | |||
| 1010 | S: Maintained | 1010 | S: Maintained |
| 1011 | F: arch/arm/mach-s5p*/ | 1011 | F: arch/arm/mach-s5p*/ |
| 1012 | 1012 | ||
| 1013 | ARM/SAMSUNG MOBILE MACHINE SUPPORT | ||
| 1014 | M: Kyungmin Park <kyungmin.park@samsung.com> | ||
| 1015 | L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) | ||
| 1016 | S: Maintained | ||
| 1017 | F: arch/arm/mach-s5pv210/mach-aquila.c | ||
| 1018 | F: arch/arm/mach-s5pv210/mach-goni.c | ||
| 1019 | F: arch/arm/mach-exynos4/mach-universal_c210.c | ||
| 1020 | F: arch/arm/mach-exynos4/mach-nuri.c | ||
| 1021 | |||
| 1013 | ARM/SAMSUNG S5P SERIES FIMC SUPPORT | 1022 | ARM/SAMSUNG S5P SERIES FIMC SUPPORT |
| 1014 | M: Kyungmin Park <kyungmin.park@samsung.com> | 1023 | M: Kyungmin Park <kyungmin.park@samsung.com> |
| 1015 | M: Sylwester Nawrocki <s.nawrocki@samsung.com> | 1024 | M: Sylwester Nawrocki <s.nawrocki@samsung.com> |
| @@ -1467,6 +1476,7 @@ F: include/net/bluetooth/ | |||
| 1467 | 1476 | ||
| 1468 | BONDING DRIVER | 1477 | BONDING DRIVER |
| 1469 | M: Jay Vosburgh <fubar@us.ibm.com> | 1478 | M: Jay Vosburgh <fubar@us.ibm.com> |
| 1479 | M: Andy Gospodarek <andy@greyhouse.net> | ||
| 1470 | L: netdev@vger.kernel.org | 1480 | L: netdev@vger.kernel.org |
| 1471 | W: http://sourceforge.net/projects/bonding/ | 1481 | W: http://sourceforge.net/projects/bonding/ |
| 1472 | S: Supported | 1482 | S: Supported |
| @@ -2033,7 +2043,7 @@ F: Documentation/scsi/dc395x.txt | |||
| 2033 | F: drivers/scsi/dc395x.* | 2043 | F: drivers/scsi/dc395x.* |
| 2034 | 2044 | ||
| 2035 | DCCP PROTOCOL | 2045 | DCCP PROTOCOL |
| 2036 | M: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> | 2046 | M: Gerrit Renker <gerrit@erg.abdn.ac.uk> |
| 2037 | L: dccp@vger.kernel.org | 2047 | L: dccp@vger.kernel.org |
| 2038 | W: http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp | 2048 | W: http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp |
| 2039 | S: Maintained | 2049 | S: Maintained |
| @@ -3519,7 +3529,7 @@ F: drivers/hwmon/jc42.c | |||
| 3519 | F: Documentation/hwmon/jc42 | 3529 | F: Documentation/hwmon/jc42 |
| 3520 | 3530 | ||
| 3521 | JFS FILESYSTEM | 3531 | JFS FILESYSTEM |
| 3522 | M: Dave Kleikamp <shaggy@linux.vnet.ibm.com> | 3532 | M: Dave Kleikamp <shaggy@kernel.org> |
| 3523 | L: jfs-discussion@lists.sourceforge.net | 3533 | L: jfs-discussion@lists.sourceforge.net |
| 3524 | W: http://jfs.sourceforge.net/ | 3534 | W: http://jfs.sourceforge.net/ |
| 3525 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git | 3535 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/shaggy/jfs-2.6.git |
| @@ -5171,6 +5181,7 @@ F: drivers/char/random.c | |||
| 5171 | 5181 | ||
| 5172 | RAPIDIO SUBSYSTEM | 5182 | RAPIDIO SUBSYSTEM |
| 5173 | M: Matt Porter <mporter@kernel.crashing.org> | 5183 | M: Matt Porter <mporter@kernel.crashing.org> |
| 5184 | M: Alexandre Bounine <alexandre.bounine@idt.com> | ||
| 5174 | S: Maintained | 5185 | S: Maintained |
| 5175 | F: drivers/rapidio/ | 5186 | F: drivers/rapidio/ |
| 5176 | 5187 | ||
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 47f63d480141..cc31bec2e316 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig | |||
| @@ -11,6 +11,7 @@ config ALPHA | |||
| 11 | select HAVE_GENERIC_HARDIRQS | 11 | select HAVE_GENERIC_HARDIRQS |
| 12 | select GENERIC_IRQ_PROBE | 12 | select GENERIC_IRQ_PROBE |
| 13 | select AUTO_IRQ_AFFINITY if SMP | 13 | select AUTO_IRQ_AFFINITY if SMP |
| 14 | select GENERIC_HARDIRQS_NO_DEPRECATED | ||
| 14 | help | 15 | help |
| 15 | The Alpha is a 64-bit general-purpose processor designed and | 16 | The Alpha is a 64-bit general-purpose processor designed and |
| 16 | marketed by the Digital Equipment Corporation of blessed memory, | 17 | marketed by the Digital Equipment Corporation of blessed memory, |
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index 9ab234f48dd8..a19d60082299 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c | |||
| @@ -44,11 +44,16 @@ static char irq_user_affinity[NR_IRQS]; | |||
| 44 | 44 | ||
| 45 | int irq_select_affinity(unsigned int irq) | 45 | int irq_select_affinity(unsigned int irq) |
| 46 | { | 46 | { |
| 47 | struct irq_desc *desc = irq_to_desc[irq]; | 47 | struct irq_data *data = irq_get_irq_data(irq); |
| 48 | struct irq_chip *chip; | ||
| 48 | static int last_cpu; | 49 | static int last_cpu; |
| 49 | int cpu = last_cpu + 1; | 50 | int cpu = last_cpu + 1; |
| 50 | 51 | ||
| 51 | if (!desc || !get_irq_desc_chip(desc)->set_affinity || irq_user_affinity[irq]) | 52 | if (!data) |
| 53 | return 1; | ||
| 54 | chip = irq_data_get_irq_chip(data); | ||
| 55 | |||
| 56 | if (!chip->irq_set_affinity || irq_user_affinity[irq]) | ||
| 52 | return 1; | 57 | return 1; |
| 53 | 58 | ||
| 54 | while (!cpu_possible(cpu) || | 59 | while (!cpu_possible(cpu) || |
| @@ -56,8 +61,8 @@ int irq_select_affinity(unsigned int irq) | |||
| 56 | cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); | 61 | cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0); |
| 57 | last_cpu = cpu; | 62 | last_cpu = cpu; |
| 58 | 63 | ||
| 59 | cpumask_copy(desc->affinity, cpumask_of(cpu)); | 64 | cpumask_copy(data->affinity, cpumask_of(cpu)); |
| 60 | get_irq_desc_chip(desc)->set_affinity(irq, cpumask_of(cpu)); | 65 | chip->irq_set_affinity(data, cpumask_of(cpu), false); |
| 61 | return 0; | 66 | return 0; |
| 62 | } | 67 | } |
| 63 | #endif /* CONFIG_SMP */ | 68 | #endif /* CONFIG_SMP */ |
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c index 2d0679b60939..411ca11d0a18 100644 --- a/arch/alpha/kernel/irq_alpha.c +++ b/arch/alpha/kernel/irq_alpha.c | |||
| @@ -228,14 +228,9 @@ struct irqaction timer_irqaction = { | |||
| 228 | void __init | 228 | void __init |
| 229 | init_rtc_irq(void) | 229 | init_rtc_irq(void) |
| 230 | { | 230 | { |
| 231 | struct irq_desc *desc = irq_to_desc(RTC_IRQ); | 231 | set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip, |
| 232 | 232 | handle_simple_irq, "RTC"); | |
| 233 | if (desc) { | 233 | setup_irq(RTC_IRQ, &timer_irqaction); |
| 234 | desc->status |= IRQ_DISABLED; | ||
| 235 | set_irq_chip_and_handler_name(RTC_IRQ, &no_irq_chip, | ||
| 236 | handle_simple_irq, "RTC"); | ||
| 237 | setup_irq(RTC_IRQ, &timer_irqaction); | ||
| 238 | } | ||
| 239 | } | 234 | } |
| 240 | 235 | ||
| 241 | /* Dummy irqactions. */ | 236 | /* Dummy irqactions. */ |
diff --git a/arch/alpha/kernel/irq_i8259.c b/arch/alpha/kernel/irq_i8259.c index 956ea0ed1694..c7cc9813e45f 100644 --- a/arch/alpha/kernel/irq_i8259.c +++ b/arch/alpha/kernel/irq_i8259.c | |||
| @@ -33,10 +33,10 @@ i8259_update_irq_hw(unsigned int irq, unsigned long mask) | |||
| 33 | } | 33 | } |
| 34 | 34 | ||
| 35 | inline void | 35 | inline void |
| 36 | i8259a_enable_irq(unsigned int irq) | 36 | i8259a_enable_irq(struct irq_data *d) |
| 37 | { | 37 | { |
| 38 | spin_lock(&i8259_irq_lock); | 38 | spin_lock(&i8259_irq_lock); |
| 39 | i8259_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq)); | 39 | i8259_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq)); |
| 40 | spin_unlock(&i8259_irq_lock); | 40 | spin_unlock(&i8259_irq_lock); |
| 41 | } | 41 | } |
| 42 | 42 | ||
| @@ -47,16 +47,18 @@ __i8259a_disable_irq(unsigned int irq) | |||
| 47 | } | 47 | } |
| 48 | 48 | ||
| 49 | void | 49 | void |
| 50 | i8259a_disable_irq(unsigned int irq) | 50 | i8259a_disable_irq(struct irq_data *d) |
| 51 | { | 51 | { |
| 52 | spin_lock(&i8259_irq_lock); | 52 | spin_lock(&i8259_irq_lock); |
| 53 | __i8259a_disable_irq(irq); | 53 | __i8259a_disable_irq(d->irq); |
| 54 | spin_unlock(&i8259_irq_lock); | 54 | spin_unlock(&i8259_irq_lock); |
| 55 | } | 55 | } |
| 56 | 56 | ||
| 57 | void | 57 | void |
| 58 | i8259a_mask_and_ack_irq(unsigned int irq) | 58 | i8259a_mask_and_ack_irq(struct irq_data *d) |
| 59 | { | 59 | { |
| 60 | unsigned int irq = d->irq; | ||
| 61 | |||
| 60 | spin_lock(&i8259_irq_lock); | 62 | spin_lock(&i8259_irq_lock); |
| 61 | __i8259a_disable_irq(irq); | 63 | __i8259a_disable_irq(irq); |
| 62 | 64 | ||
| @@ -71,9 +73,9 @@ i8259a_mask_and_ack_irq(unsigned int irq) | |||
| 71 | 73 | ||
| 72 | struct irq_chip i8259a_irq_type = { | 74 | struct irq_chip i8259a_irq_type = { |
| 73 | .name = "XT-PIC", | 75 | .name = "XT-PIC", |
| 74 | .unmask = i8259a_enable_irq, | 76 | .irq_unmask = i8259a_enable_irq, |
| 75 | .mask = i8259a_disable_irq, | 77 | .irq_mask = i8259a_disable_irq, |
| 76 | .mask_ack = i8259a_mask_and_ack_irq, | 78 | .irq_mask_ack = i8259a_mask_and_ack_irq, |
| 77 | }; | 79 | }; |
| 78 | 80 | ||
| 79 | void __init | 81 | void __init |
diff --git a/arch/alpha/kernel/irq_impl.h b/arch/alpha/kernel/irq_impl.h index b63ccd7386f1..d507a234b05d 100644 --- a/arch/alpha/kernel/irq_impl.h +++ b/arch/alpha/kernel/irq_impl.h | |||
| @@ -31,11 +31,9 @@ extern void init_rtc_irq(void); | |||
| 31 | 31 | ||
| 32 | extern void common_init_isa_dma(void); | 32 | extern void common_init_isa_dma(void); |
| 33 | 33 | ||
| 34 | extern void i8259a_enable_irq(unsigned int); | 34 | extern void i8259a_enable_irq(struct irq_data *d); |
| 35 | extern void i8259a_disable_irq(unsigned int); | 35 | extern void i8259a_disable_irq(struct irq_data *d); |
| 36 | extern void i8259a_mask_and_ack_irq(unsigned int); | 36 | extern void i8259a_mask_and_ack_irq(struct irq_data *d); |
| 37 | extern unsigned int i8259a_startup_irq(unsigned int); | ||
| 38 | extern void i8259a_end_irq(unsigned int); | ||
| 39 | extern struct irq_chip i8259a_irq_type; | 37 | extern struct irq_chip i8259a_irq_type; |
| 40 | extern void init_i8259a_irqs(void); | 38 | extern void init_i8259a_irqs(void); |
| 41 | 39 | ||
diff --git a/arch/alpha/kernel/irq_pyxis.c b/arch/alpha/kernel/irq_pyxis.c index 2863458c853e..b30227fa7f5f 100644 --- a/arch/alpha/kernel/irq_pyxis.c +++ b/arch/alpha/kernel/irq_pyxis.c | |||
| @@ -29,21 +29,21 @@ pyxis_update_irq_hw(unsigned long mask) | |||
| 29 | } | 29 | } |
| 30 | 30 | ||
| 31 | static inline void | 31 | static inline void |
| 32 | pyxis_enable_irq(unsigned int irq) | 32 | pyxis_enable_irq(struct irq_data *d) |
| 33 | { | 33 | { |
| 34 | pyxis_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); | 34 | pyxis_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16)); |
| 35 | } | 35 | } |
| 36 | 36 | ||
| 37 | static void | 37 | static void |
| 38 | pyxis_disable_irq(unsigned int irq) | 38 | pyxis_disable_irq(struct irq_data *d) |
| 39 | { | 39 | { |
| 40 | pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); | 40 | pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16))); |
| 41 | } | 41 | } |
| 42 | 42 | ||
| 43 | static void | 43 | static void |
| 44 | pyxis_mask_and_ack_irq(unsigned int irq) | 44 | pyxis_mask_and_ack_irq(struct irq_data *d) |
| 45 | { | 45 | { |
| 46 | unsigned long bit = 1UL << (irq - 16); | 46 | unsigned long bit = 1UL << (d->irq - 16); |
| 47 | unsigned long mask = cached_irq_mask &= ~bit; | 47 | unsigned long mask = cached_irq_mask &= ~bit; |
| 48 | 48 | ||
| 49 | /* Disable the interrupt. */ | 49 | /* Disable the interrupt. */ |
| @@ -58,9 +58,9 @@ pyxis_mask_and_ack_irq(unsigned int irq) | |||
| 58 | 58 | ||
| 59 | static struct irq_chip pyxis_irq_type = { | 59 | static struct irq_chip pyxis_irq_type = { |
| 60 | .name = "PYXIS", | 60 | .name = "PYXIS", |
| 61 | .mask_ack = pyxis_mask_and_ack_irq, | 61 | .irq_mask_ack = pyxis_mask_and_ack_irq, |
| 62 | .mask = pyxis_disable_irq, | 62 | .irq_mask = pyxis_disable_irq, |
| 63 | .unmask = pyxis_enable_irq, | 63 | .irq_unmask = pyxis_enable_irq, |
| 64 | }; | 64 | }; |
| 65 | 65 | ||
| 66 | void | 66 | void |
| @@ -103,7 +103,7 @@ init_pyxis_irqs(unsigned long ignore_mask) | |||
| 103 | if ((ignore_mask >> i) & 1) | 103 | if ((ignore_mask >> i) & 1) |
| 104 | continue; | 104 | continue; |
| 105 | set_irq_chip_and_handler(i, &pyxis_irq_type, handle_level_irq); | 105 | set_irq_chip_and_handler(i, &pyxis_irq_type, handle_level_irq); |
| 106 | irq_to_desc(i)->status |= IRQ_LEVEL; | 106 | irq_set_status_flags(i, IRQ_LEVEL); |
| 107 | } | 107 | } |
| 108 | 108 | ||
| 109 | setup_irq(16+7, &isa_cascade_irqaction); | 109 | setup_irq(16+7, &isa_cascade_irqaction); |
diff --git a/arch/alpha/kernel/irq_srm.c b/arch/alpha/kernel/irq_srm.c index 0e57e828b413..82a47bba41c4 100644 --- a/arch/alpha/kernel/irq_srm.c +++ b/arch/alpha/kernel/irq_srm.c | |||
| @@ -18,27 +18,27 @@ | |||
| 18 | DEFINE_SPINLOCK(srm_irq_lock); | 18 | DEFINE_SPINLOCK(srm_irq_lock); |
| 19 | 19 | ||
| 20 | static inline void | 20 | static inline void |
| 21 | srm_enable_irq(unsigned int irq) | 21 | srm_enable_irq(struct irq_data *d) |
| 22 | { | 22 | { |
| 23 | spin_lock(&srm_irq_lock); | 23 | spin_lock(&srm_irq_lock); |
| 24 | cserve_ena(irq - 16); | 24 | cserve_ena(d->irq - 16); |
| 25 | spin_unlock(&srm_irq_lock); | 25 | spin_unlock(&srm_irq_lock); |
| 26 | } | 26 | } |
| 27 | 27 | ||
| 28 | static void | 28 | static void |
| 29 | srm_disable_irq(unsigned int irq) | 29 | srm_disable_irq(struct irq_data *d) |
| 30 | { | 30 | { |
| 31 | spin_lock(&srm_irq_lock); | 31 | spin_lock(&srm_irq_lock); |
| 32 | cserve_dis(irq - 16); | 32 | cserve_dis(d->irq - 16); |
| 33 | spin_unlock(&srm_irq_lock); | 33 | spin_unlock(&srm_irq_lock); |
| 34 | } | 34 | } |
| 35 | 35 | ||
| 36 | /* Handle interrupts from the SRM, assuming no additional weirdness. */ | 36 | /* Handle interrupts from the SRM, assuming no additional weirdness. */ |
| 37 | static struct irq_chip srm_irq_type = { | 37 | static struct irq_chip srm_irq_type = { |
| 38 | .name = "SRM", | 38 | .name = "SRM", |
| 39 | .unmask = srm_enable_irq, | 39 | .irq_unmask = srm_enable_irq, |
| 40 | .mask = srm_disable_irq, | 40 | .irq_mask = srm_disable_irq, |
| 41 | .mask_ack = srm_disable_irq, | 41 | .irq_mask_ack = srm_disable_irq, |
| 42 | }; | 42 | }; |
| 43 | 43 | ||
| 44 | void __init | 44 | void __init |
| @@ -52,7 +52,7 @@ init_srm_irqs(long max, unsigned long ignore_mask) | |||
| 52 | if (i < 64 && ((ignore_mask >> i) & 1)) | 52 | if (i < 64 && ((ignore_mask >> i) & 1)) |
| 53 | continue; | 53 | continue; |
| 54 | set_irq_chip_and_handler(i, &srm_irq_type, handle_level_irq); | 54 | set_irq_chip_and_handler(i, &srm_irq_type, handle_level_irq); |
| 55 | irq_to_desc(i)->status |= IRQ_LEVEL; | 55 | irq_set_status_flags(i, IRQ_LEVEL); |
| 56 | } | 56 | } |
| 57 | } | 57 | } |
| 58 | 58 | ||
diff --git a/arch/alpha/kernel/sys_alcor.c b/arch/alpha/kernel/sys_alcor.c index 7bef61768236..88d95e872f55 100644 --- a/arch/alpha/kernel/sys_alcor.c +++ b/arch/alpha/kernel/sys_alcor.c | |||
| @@ -44,31 +44,31 @@ alcor_update_irq_hw(unsigned long mask) | |||
| 44 | } | 44 | } |
| 45 | 45 | ||
| 46 | static inline void | 46 | static inline void |
| 47 | alcor_enable_irq(unsigned int irq) | 47 | alcor_enable_irq(struct irq_data *d) |
| 48 | { | 48 | { |
| 49 | alcor_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); | 49 | alcor_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16)); |
| 50 | } | 50 | } |
| 51 | 51 | ||
| 52 | static void | 52 | static void |
| 53 | alcor_disable_irq(unsigned int irq) | 53 | alcor_disable_irq(struct irq_data *d) |
| 54 | { | 54 | { |
| 55 | alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); | 55 | alcor_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16))); |
| 56 | } | 56 | } |
| 57 | 57 | ||
| 58 | static void | 58 | static void |
| 59 | alcor_mask_and_ack_irq(unsigned int irq) | 59 | alcor_mask_and_ack_irq(struct irq_data *d) |
| 60 | { | 60 | { |
| 61 | alcor_disable_irq(irq); | 61 | alcor_disable_irq(d); |
| 62 | 62 | ||
| 63 | /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ | 63 | /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ |
| 64 | *(vuip)GRU_INT_CLEAR = 1 << (irq - 16); mb(); | 64 | *(vuip)GRU_INT_CLEAR = 1 << (d->irq - 16); mb(); |
| 65 | *(vuip)GRU_INT_CLEAR = 0; mb(); | 65 | *(vuip)GRU_INT_CLEAR = 0; mb(); |
| 66 | } | 66 | } |
| 67 | 67 | ||
| 68 | static void | 68 | static void |
| 69 | alcor_isa_mask_and_ack_irq(unsigned int irq) | 69 | alcor_isa_mask_and_ack_irq(struct irq_data *d) |
| 70 | { | 70 | { |
| 71 | i8259a_mask_and_ack_irq(irq); | 71 | i8259a_mask_and_ack_irq(d); |
| 72 | 72 | ||
| 73 | /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ | 73 | /* On ALCOR/XLT, need to dismiss interrupt via GRU. */ |
| 74 | *(vuip)GRU_INT_CLEAR = 0x80000000; mb(); | 74 | *(vuip)GRU_INT_CLEAR = 0x80000000; mb(); |
| @@ -77,9 +77,9 @@ alcor_isa_mask_and_ack_irq(unsigned int irq) | |||
| 77 | 77 | ||
| 78 | static struct irq_chip alcor_irq_type = { | 78 | static struct irq_chip alcor_irq_type = { |
| 79 | .name = "ALCOR", | 79 | .name = "ALCOR", |
| 80 | .unmask = alcor_enable_irq, | 80 | .irq_unmask = alcor_enable_irq, |
| 81 | .mask = alcor_disable_irq, | 81 | .irq_mask = alcor_disable_irq, |
| 82 | .mask_ack = alcor_mask_and_ack_irq, | 82 | .irq_mask_ack = alcor_mask_and_ack_irq, |
| 83 | }; | 83 | }; |
| 84 | 84 | ||
| 85 | static void | 85 | static void |
| @@ -126,9 +126,9 @@ alcor_init_irq(void) | |||
| 126 | if (i >= 16+20 && i <= 16+30) | 126 | if (i >= 16+20 && i <= 16+30) |
| 127 | continue; | 127 | continue; |
| 128 | set_irq_chip_and_handler(i, &alcor_irq_type, handle_level_irq); | 128 | set_irq_chip_and_handler(i, &alcor_irq_type, handle_level_irq); |
| 129 | irq_to_desc(i)->status |= IRQ_LEVEL; | 129 | irq_set_status_flags(i, IRQ_LEVEL); |
| 130 | } | 130 | } |
| 131 | i8259a_irq_type.ack = alcor_isa_mask_and_ack_irq; | 131 | i8259a_irq_type.irq_ack = alcor_isa_mask_and_ack_irq; |
| 132 | 132 | ||
| 133 | init_i8259a_irqs(); | 133 | init_i8259a_irqs(); |
| 134 | common_init_isa_dma(); | 134 | common_init_isa_dma(); |
diff --git a/arch/alpha/kernel/sys_cabriolet.c b/arch/alpha/kernel/sys_cabriolet.c index b0c916493aea..57eb6307bc27 100644 --- a/arch/alpha/kernel/sys_cabriolet.c +++ b/arch/alpha/kernel/sys_cabriolet.c | |||
| @@ -46,22 +46,22 @@ cabriolet_update_irq_hw(unsigned int irq, unsigned long mask) | |||
| 46 | } | 46 | } |
| 47 | 47 | ||
| 48 | static inline void | 48 | static inline void |
| 49 | cabriolet_enable_irq(unsigned int irq) | 49 | cabriolet_enable_irq(struct irq_data *d) |
| 50 | { | 50 | { |
| 51 | cabriolet_update_irq_hw(irq, cached_irq_mask &= ~(1UL << irq)); | 51 | cabriolet_update_irq_hw(d->irq, cached_irq_mask &= ~(1UL << d->irq)); |
| 52 | } | 52 | } |
| 53 | 53 | ||
| 54 | static void | 54 | static void |
| 55 | cabriolet_disable_irq(unsigned int irq) | 55 | cabriolet_disable_irq(struct irq_data *d) |
| 56 | { | 56 | { |
| 57 | cabriolet_update_irq_hw(irq, cached_irq_mask |= 1UL << irq); | 57 | cabriolet_update_irq_hw(d->irq, cached_irq_mask |= 1UL << d->irq); |
| 58 | } | 58 | } |
| 59 | 59 | ||
| 60 | static struct irq_chip cabriolet_irq_type = { | 60 | static struct irq_chip cabriolet_irq_type = { |
| 61 | .name = "CABRIOLET", | 61 | .name = "CABRIOLET", |
| 62 | .unmask = cabriolet_enable_irq, | 62 | .irq_unmask = cabriolet_enable_irq, |
| 63 | .mask = cabriolet_disable_irq, | 63 | .irq_mask = cabriolet_disable_irq, |
| 64 | .mask_ack = cabriolet_disable_irq, | 64 | .irq_mask_ack = cabriolet_disable_irq, |
| 65 | }; | 65 | }; |
| 66 | 66 | ||
| 67 | static void | 67 | static void |
| @@ -107,7 +107,7 @@ common_init_irq(void (*srm_dev_int)(unsigned long v)) | |||
| 107 | for (i = 16; i < 35; ++i) { | 107 | for (i = 16; i < 35; ++i) { |
| 108 | set_irq_chip_and_handler(i, &cabriolet_irq_type, | 108 | set_irq_chip_and_handler(i, &cabriolet_irq_type, |
| 109 | handle_level_irq); | 109 | handle_level_irq); |
| 110 | irq_to_desc(i)->status |= IRQ_LEVEL; | 110 | irq_set_status_flags(i, IRQ_LEVEL); |
| 111 | } | 111 | } |
| 112 | } | 112 | } |
| 113 | 113 | ||
diff --git a/arch/alpha/kernel/sys_dp264.c b/arch/alpha/kernel/sys_dp264.c index edad5f759ccd..481df4ecb651 100644 --- a/arch/alpha/kernel/sys_dp264.c +++ b/arch/alpha/kernel/sys_dp264.c | |||
| @@ -98,37 +98,37 @@ tsunami_update_irq_hw(unsigned long mask) | |||
| 98 | } | 98 | } |
| 99 | 99 | ||
| 100 | static void | 100 | static void |
| 101 | dp264_enable_irq(unsigned int irq) | 101 | dp264_enable_irq(struct irq_data *d) |
| 102 | { | 102 | { |
| 103 | spin_lock(&dp264_irq_lock); | 103 | spin_lock(&dp264_irq_lock); |
| 104 | cached_irq_mask |= 1UL << irq; | 104 | cached_irq_mask |= 1UL << d->irq; |
| 105 | tsunami_update_irq_hw(cached_irq_mask); | 105 | tsunami_update_irq_hw(cached_irq_mask); |
| 106 | spin_unlock(&dp264_irq_lock); | 106 | spin_unlock(&dp264_irq_lock); |
| 107 | } | 107 | } |
| 108 | 108 | ||
| 109 | static void | 109 | static void |
| 110 | dp264_disable_irq(unsigned int irq) | 110 | dp264_disable_irq(struct irq_data *d) |
| 111 | { | 111 | { |
| 112 | spin_lock(&dp264_irq_lock); | 112 | spin_lock(&dp264_irq_lock); |
| 113 | cached_irq_mask &= ~(1UL << irq); | 113 | cached_irq_mask &= ~(1UL << d->irq); |
| 114 | tsunami_update_irq_hw(cached_irq_mask); | 114 | tsunami_update_irq_hw(cached_irq_mask); |
| 115 | spin_unlock(&dp264_irq_lock); | 115 | spin_unlock(&dp264_irq_lock); |
| 116 | } | 116 | } |
| 117 | 117 | ||
| 118 | static void | 118 | static void |
| 119 | clipper_enable_irq(unsigned int irq) | 119 | clipper_enable_irq(struct irq_data *d) |
| 120 | { | 120 | { |
| 121 | spin_lock(&dp264_irq_lock); | 121 | spin_lock(&dp264_irq_lock); |
| 122 | cached_irq_mask |= 1UL << (irq - 16); | 122 | cached_irq_mask |= 1UL << (d->irq - 16); |
| 123 | tsunami_update_irq_hw(cached_irq_mask); | 123 | tsunami_update_irq_hw(cached_irq_mask); |
| 124 | spin_unlock(&dp264_irq_lock); | 124 | spin_unlock(&dp264_irq_lock); |
| 125 | } | 125 | } |
| 126 | 126 | ||
| 127 | static void | 127 | static void |
| 128 | clipper_disable_irq(unsigned int irq) | 128 | clipper_disable_irq(struct irq_data *d) |
| 129 | { | 129 | { |
| 130 | spin_lock(&dp264_irq_lock); | 130 | spin_lock(&dp264_irq_lock); |
| 131 | cached_irq_mask &= ~(1UL << (irq - 16)); | 131 | cached_irq_mask &= ~(1UL << (d->irq - 16)); |
| 132 | tsunami_update_irq_hw(cached_irq_mask); | 132 | tsunami_update_irq_hw(cached_irq_mask); |
| 133 | spin_unlock(&dp264_irq_lock); | 133 | spin_unlock(&dp264_irq_lock); |
| 134 | } | 134 | } |
| @@ -149,10 +149,11 @@ cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) | |||
| 149 | } | 149 | } |
| 150 | 150 | ||
| 151 | static int | 151 | static int |
| 152 | dp264_set_affinity(unsigned int irq, const struct cpumask *affinity) | 152 | dp264_set_affinity(struct irq_data *d, const struct cpumask *affinity, |
| 153 | { | 153 | bool force) |
| 154 | { | ||
| 154 | spin_lock(&dp264_irq_lock); | 155 | spin_lock(&dp264_irq_lock); |
| 155 | cpu_set_irq_affinity(irq, *affinity); | 156 | cpu_set_irq_affinity(d->irq, *affinity); |
| 156 | tsunami_update_irq_hw(cached_irq_mask); | 157 | tsunami_update_irq_hw(cached_irq_mask); |
| 157 | spin_unlock(&dp264_irq_lock); | 158 | spin_unlock(&dp264_irq_lock); |
| 158 | 159 | ||
| @@ -160,10 +161,11 @@ dp264_set_affinity(unsigned int irq, const struct cpumask *affinity) | |||
| 160 | } | 161 | } |
| 161 | 162 | ||
| 162 | static int | 163 | static int |
| 163 | clipper_set_affinity(unsigned int irq, const struct cpumask *affinity) | 164 | clipper_set_affinity(struct irq_data *d, const struct cpumask *affinity, |
| 164 | { | 165 | bool force) |
| 166 | { | ||
| 165 | spin_lock(&dp264_irq_lock); | 167 | spin_lock(&dp264_irq_lock); |
| 166 | cpu_set_irq_affinity(irq - 16, *affinity); | 168 | cpu_set_irq_affinity(d->irq - 16, *affinity); |
| 167 | tsunami_update_irq_hw(cached_irq_mask); | 169 | tsunami_update_irq_hw(cached_irq_mask); |
| 168 | spin_unlock(&dp264_irq_lock); | 170 | spin_unlock(&dp264_irq_lock); |
| 169 | 171 | ||
| @@ -171,19 +173,19 @@ clipper_set_affinity(unsigned int irq, const struct cpumask *affinity) | |||
| 171 | } | 173 | } |
| 172 | 174 | ||
| 173 | static struct irq_chip dp264_irq_type = { | 175 | static struct irq_chip dp264_irq_type = { |
| 174 | .name = "DP264", | 176 | .name = "DP264", |
| 175 | .unmask = dp264_enable_irq, | 177 | .irq_unmask = dp264_enable_irq, |
| 176 | .mask = dp264_disable_irq, | 178 | .irq_mask = dp264_disable_irq, |
| 177 | .mask_ack = dp264_disable_irq, | 179 | .irq_mask_ack = dp264_disable_irq, |
| 178 | .set_affinity = dp264_set_affinity, | 180 | .irq_set_affinity = dp264_set_affinity, |
| 179 | }; | 181 | }; |
| 180 | 182 | ||
| 181 | static struct irq_chip clipper_irq_type = { | 183 | static struct irq_chip clipper_irq_type = { |
| 182 | .name = "CLIPPER", | 184 | .name = "CLIPPER", |
| 183 | .unmask = clipper_enable_irq, | 185 | .irq_unmask = clipper_enable_irq, |
| 184 | .mask = clipper_disable_irq, | 186 | .irq_mask = clipper_disable_irq, |
| 185 | .mask_ack = clipper_disable_irq, | 187 | .irq_mask_ack = clipper_disable_irq, |
| 186 | .set_affinity = clipper_set_affinity, | 188 | .irq_set_affinity = clipper_set_affinity, |
| 187 | }; | 189 | }; |
| 188 | 190 | ||
| 189 | static void | 191 | static void |
| @@ -268,8 +270,8 @@ init_tsunami_irqs(struct irq_chip * ops, int imin, int imax) | |||
| 268 | { | 270 | { |
| 269 | long i; | 271 | long i; |
| 270 | for (i = imin; i <= imax; ++i) { | 272 | for (i = imin; i <= imax; ++i) { |
| 271 | irq_to_desc(i)->status |= IRQ_LEVEL; | ||
| 272 | set_irq_chip_and_handler(i, ops, handle_level_irq); | 273 | set_irq_chip_and_handler(i, ops, handle_level_irq); |
| 274 | irq_set_status_flags(i, IRQ_LEVEL); | ||
| 273 | } | 275 | } |
| 274 | } | 276 | } |
| 275 | 277 | ||
diff --git a/arch/alpha/kernel/sys_eb64p.c b/arch/alpha/kernel/sys_eb64p.c index ae5f29d127b0..402e908ffb3e 100644 --- a/arch/alpha/kernel/sys_eb64p.c +++ b/arch/alpha/kernel/sys_eb64p.c | |||
| @@ -44,22 +44,22 @@ eb64p_update_irq_hw(unsigned int irq, unsigned long mask) | |||
| 44 | } | 44 | } |
| 45 | 45 | ||
| 46 | static inline void | 46 | static inline void |
| 47 | eb64p_enable_irq(unsigned int irq) | 47 | eb64p_enable_irq(struct irq_data *d) |
| 48 | { | 48 | { |
| 49 | eb64p_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq)); | 49 | eb64p_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << d->irq)); |
| 50 | } | 50 | } |
| 51 | 51 | ||
| 52 | static void | 52 | static void |
| 53 | eb64p_disable_irq(unsigned int irq) | 53 | eb64p_disable_irq(struct irq_data *d) |
| 54 | { | 54 | { |
| 55 | eb64p_update_irq_hw(irq, cached_irq_mask |= 1 << irq); | 55 | eb64p_update_irq_hw(d->irq, cached_irq_mask |= 1 << d->irq); |
| 56 | } | 56 | } |
| 57 | 57 | ||
| 58 | static struct irq_chip eb64p_irq_type = { | 58 | static struct irq_chip eb64p_irq_type = { |
| 59 | .name = "EB64P", | 59 | .name = "EB64P", |
| 60 | .unmask = eb64p_enable_irq, | 60 | .irq_unmask = eb64p_enable_irq, |
| 61 | .mask = eb64p_disable_irq, | 61 | .irq_mask = eb64p_disable_irq, |
| 62 | .mask_ack = eb64p_disable_irq, | 62 | .irq_mask_ack = eb64p_disable_irq, |
| 63 | }; | 63 | }; |
| 64 | 64 | ||
| 65 | static void | 65 | static void |
| @@ -118,9 +118,9 @@ eb64p_init_irq(void) | |||
| 118 | init_i8259a_irqs(); | 118 | init_i8259a_irqs(); |
| 119 | 119 | ||
| 120 | for (i = 16; i < 32; ++i) { | 120 | for (i = 16; i < 32; ++i) { |
| 121 | irq_to_desc(i)->status |= IRQ_LEVEL; | ||
| 122 | set_irq_chip_and_handler(i, &eb64p_irq_type, handle_level_irq); | 121 | set_irq_chip_and_handler(i, &eb64p_irq_type, handle_level_irq); |
| 123 | } | 122 | irq_set_status_flags(i, IRQ_LEVEL); |
| 123 | } | ||
| 124 | 124 | ||
| 125 | common_init_isa_dma(); | 125 | common_init_isa_dma(); |
| 126 | setup_irq(16+5, &isa_cascade_irqaction); | 126 | setup_irq(16+5, &isa_cascade_irqaction); |
diff --git a/arch/alpha/kernel/sys_eiger.c b/arch/alpha/kernel/sys_eiger.c index 1121bc5c6c6c..0b44a54c1522 100644 --- a/arch/alpha/kernel/sys_eiger.c +++ b/arch/alpha/kernel/sys_eiger.c | |||
| @@ -51,16 +51,18 @@ eiger_update_irq_hw(unsigned long irq, unsigned long mask) | |||
| 51 | } | 51 | } |
| 52 | 52 | ||
| 53 | static inline void | 53 | static inline void |
| 54 | eiger_enable_irq(unsigned int irq) | 54 | eiger_enable_irq(struct irq_data *d) |
| 55 | { | 55 | { |
| 56 | unsigned int irq = d->irq; | ||
| 56 | unsigned long mask; | 57 | unsigned long mask; |
| 57 | mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); | 58 | mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); |
| 58 | eiger_update_irq_hw(irq, mask); | 59 | eiger_update_irq_hw(irq, mask); |
| 59 | } | 60 | } |
| 60 | 61 | ||
| 61 | static void | 62 | static void |
| 62 | eiger_disable_irq(unsigned int irq) | 63 | eiger_disable_irq(struct irq_data *d) |
| 63 | { | 64 | { |
| 65 | unsigned int irq = d->irq; | ||
| 64 | unsigned long mask; | 66 | unsigned long mask; |
| 65 | mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); | 67 | mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); |
| 66 | eiger_update_irq_hw(irq, mask); | 68 | eiger_update_irq_hw(irq, mask); |
| @@ -68,9 +70,9 @@ eiger_disable_irq(unsigned int irq) | |||
| 68 | 70 | ||
| 69 | static struct irq_chip eiger_irq_type = { | 71 | static struct irq_chip eiger_irq_type = { |
| 70 | .name = "EIGER", | 72 | .name = "EIGER", |
| 71 | .unmask = eiger_enable_irq, | 73 | .irq_unmask = eiger_enable_irq, |
| 72 | .mask = eiger_disable_irq, | 74 | .irq_mask = eiger_disable_irq, |
| 73 | .mask_ack = eiger_disable_irq, | 75 | .irq_mask_ack = eiger_disable_irq, |
| 74 | }; | 76 | }; |
| 75 | 77 | ||
| 76 | static void | 78 | static void |
| @@ -136,8 +138,8 @@ eiger_init_irq(void) | |||
| 136 | init_i8259a_irqs(); | 138 | init_i8259a_irqs(); |
| 137 | 139 | ||
| 138 | for (i = 16; i < 128; ++i) { | 140 | for (i = 16; i < 128; ++i) { |
| 139 | irq_to_desc(i)->status |= IRQ_LEVEL; | ||
| 140 | set_irq_chip_and_handler(i, &eiger_irq_type, handle_level_irq); | 141 | set_irq_chip_and_handler(i, &eiger_irq_type, handle_level_irq); |
| 142 | irq_set_status_flags(i, IRQ_LEVEL); | ||
| 141 | } | 143 | } |
| 142 | } | 144 | } |
| 143 | 145 | ||
diff --git a/arch/alpha/kernel/sys_jensen.c b/arch/alpha/kernel/sys_jensen.c index 34f55e03d331..00341b75c8b2 100644 --- a/arch/alpha/kernel/sys_jensen.c +++ b/arch/alpha/kernel/sys_jensen.c | |||
| @@ -63,34 +63,34 @@ | |||
| 63 | */ | 63 | */ |
| 64 | 64 | ||
| 65 | static void | 65 | static void |
| 66 | jensen_local_enable(unsigned int irq) | 66 | jensen_local_enable(struct irq_data *d) |
| 67 | { | 67 | { |
| 68 | /* the parport is really hw IRQ 1, silly Jensen. */ | 68 | /* the parport is really hw IRQ 1, silly Jensen. */ |
| 69 | if (irq == 7) | 69 | if (d->irq == 7) |
| 70 | i8259a_enable_irq(1); | 70 | i8259a_enable_irq(d); |
| 71 | } | 71 | } |
| 72 | 72 | ||
| 73 | static void | 73 | static void |
| 74 | jensen_local_disable(unsigned int irq) | 74 | jensen_local_disable(struct irq_data *d) |
| 75 | { | 75 | { |
| 76 | /* the parport is really hw IRQ 1, silly Jensen. */ | 76 | /* the parport is really hw IRQ 1, silly Jensen. */ |
| 77 | if (irq == 7) | 77 | if (d->irq == 7) |
| 78 | i8259a_disable_irq(1); | 78 | i8259a_disable_irq(d); |
| 79 | } | 79 | } |
| 80 | 80 | ||
| 81 | static void | 81 | static void |
| 82 | jensen_local_mask_ack(unsigned int irq) | 82 | jensen_local_mask_ack(struct irq_data *d) |
| 83 | { | 83 | { |
| 84 | /* the parport is really hw IRQ 1, silly Jensen. */ | 84 | /* the parport is really hw IRQ 1, silly Jensen. */ |
| 85 | if (irq == 7) | 85 | if (d->irq == 7) |
| 86 | i8259a_mask_and_ack_irq(1); | 86 | i8259a_mask_and_ack_irq(d); |
| 87 | } | 87 | } |
| 88 | 88 | ||
| 89 | static struct irq_chip jensen_local_irq_type = { | 89 | static struct irq_chip jensen_local_irq_type = { |
| 90 | .name = "LOCAL", | 90 | .name = "LOCAL", |
| 91 | .unmask = jensen_local_enable, | 91 | .irq_unmask = jensen_local_enable, |
| 92 | .mask = jensen_local_disable, | 92 | .irq_mask = jensen_local_disable, |
| 93 | .mask_ack = jensen_local_mask_ack, | 93 | .irq_mask_ack = jensen_local_mask_ack, |
| 94 | }; | 94 | }; |
| 95 | 95 | ||
| 96 | static void | 96 | static void |
diff --git a/arch/alpha/kernel/sys_marvel.c b/arch/alpha/kernel/sys_marvel.c index 2bfc9f1b1ddc..e61910734e41 100644 --- a/arch/alpha/kernel/sys_marvel.c +++ b/arch/alpha/kernel/sys_marvel.c | |||
| @@ -104,9 +104,10 @@ io7_get_irq_ctl(unsigned int irq, struct io7 **pio7) | |||
| 104 | } | 104 | } |
| 105 | 105 | ||
| 106 | static void | 106 | static void |
| 107 | io7_enable_irq(unsigned int irq) | 107 | io7_enable_irq(struct irq_data *d) |
| 108 | { | 108 | { |
| 109 | volatile unsigned long *ctl; | 109 | volatile unsigned long *ctl; |
| 110 | unsigned int irq = d->irq; | ||
| 110 | struct io7 *io7; | 111 | struct io7 *io7; |
| 111 | 112 | ||
| 112 | ctl = io7_get_irq_ctl(irq, &io7); | 113 | ctl = io7_get_irq_ctl(irq, &io7); |
| @@ -115,7 +116,7 @@ io7_enable_irq(unsigned int irq) | |||
| 115 | __func__, irq); | 116 | __func__, irq); |
| 116 | return; | 117 | return; |
| 117 | } | 118 | } |
| 118 | 119 | ||
| 119 | spin_lock(&io7->irq_lock); | 120 | spin_lock(&io7->irq_lock); |
| 120 | *ctl |= 1UL << 24; | 121 | *ctl |= 1UL << 24; |
| 121 | mb(); | 122 | mb(); |
| @@ -124,9 +125,10 @@ io7_enable_irq(unsigned int irq) | |||
| 124 | } | 125 | } |
| 125 | 126 | ||
| 126 | static void | 127 | static void |
| 127 | io7_disable_irq(unsigned int irq) | 128 | io7_disable_irq(struct irq_data *d) |
| 128 | { | 129 | { |
| 129 | volatile unsigned long *ctl; | 130 | volatile unsigned long *ctl; |
| 131 | unsigned int irq = d->irq; | ||
| 130 | struct io7 *io7; | 132 | struct io7 *io7; |
| 131 | 133 | ||
| 132 | ctl = io7_get_irq_ctl(irq, &io7); | 134 | ctl = io7_get_irq_ctl(irq, &io7); |
| @@ -135,7 +137,7 @@ io7_disable_irq(unsigned int irq) | |||
| 135 | __func__, irq); | 137 | __func__, irq); |
| 136 | return; | 138 | return; |
| 137 | } | 139 | } |
| 138 | 140 | ||
| 139 | spin_lock(&io7->irq_lock); | 141 | spin_lock(&io7->irq_lock); |
| 140 | *ctl &= ~(1UL << 24); | 142 | *ctl &= ~(1UL << 24); |
| 141 | mb(); | 143 | mb(); |
| @@ -144,35 +146,29 @@ io7_disable_irq(unsigned int irq) | |||
| 144 | } | 146 | } |
| 145 | 147 | ||
| 146 | static void | 148 | static void |
| 147 | marvel_irq_noop(unsigned int irq) | 149 | marvel_irq_noop(struct irq_data *d) |
| 148 | { | 150 | { |
| 149 | return; | 151 | return; |
| 150 | } | ||
| 151 | |||
| 152 | static unsigned int | ||
| 153 | marvel_irq_noop_return(unsigned int irq) | ||
| 154 | { | ||
| 155 | return 0; | ||
| 156 | } | 152 | } |
| 157 | 153 | ||
| 158 | static struct irq_chip marvel_legacy_irq_type = { | 154 | static struct irq_chip marvel_legacy_irq_type = { |
| 159 | .name = "LEGACY", | 155 | .name = "LEGACY", |
| 160 | .mask = marvel_irq_noop, | 156 | .irq_mask = marvel_irq_noop, |
| 161 | .unmask = marvel_irq_noop, | 157 | .irq_unmask = marvel_irq_noop, |
| 162 | }; | 158 | }; |
| 163 | 159 | ||
| 164 | static struct irq_chip io7_lsi_irq_type = { | 160 | static struct irq_chip io7_lsi_irq_type = { |
| 165 | .name = "LSI", | 161 | .name = "LSI", |
| 166 | .unmask = io7_enable_irq, | 162 | .irq_unmask = io7_enable_irq, |
| 167 | .mask = io7_disable_irq, | 163 | .irq_mask = io7_disable_irq, |
| 168 | .mask_ack = io7_disable_irq, | 164 | .irq_mask_ack = io7_disable_irq, |
| 169 | }; | 165 | }; |
| 170 | 166 | ||
| 171 | static struct irq_chip io7_msi_irq_type = { | 167 | static struct irq_chip io7_msi_irq_type = { |
| 172 | .name = "MSI", | 168 | .name = "MSI", |
| 173 | .unmask = io7_enable_irq, | 169 | .irq_unmask = io7_enable_irq, |
| 174 | .mask = io7_disable_irq, | 170 | .irq_mask = io7_disable_irq, |
| 175 | .ack = marvel_irq_noop, | 171 | .irq_ack = marvel_irq_noop, |
| 176 | }; | 172 | }; |
| 177 | 173 | ||
| 178 | static void | 174 | static void |
| @@ -280,8 +276,8 @@ init_io7_irqs(struct io7 *io7, | |||
| 280 | 276 | ||
| 281 | /* Set up the lsi irqs. */ | 277 | /* Set up the lsi irqs. */ |
| 282 | for (i = 0; i < 128; ++i) { | 278 | for (i = 0; i < 128; ++i) { |
| 283 | irq_to_desc(base + i)->status |= IRQ_LEVEL; | ||
| 284 | set_irq_chip_and_handler(base + i, lsi_ops, handle_level_irq); | 279 | set_irq_chip_and_handler(base + i, lsi_ops, handle_level_irq); |
| 280 | irq_set_status_flags(i, IRQ_LEVEL); | ||
| 285 | } | 281 | } |
| 286 | 282 | ||
| 287 | /* Disable the implemented irqs in hardware. */ | 283 | /* Disable the implemented irqs in hardware. */ |
| @@ -294,8 +290,8 @@ init_io7_irqs(struct io7 *io7, | |||
| 294 | 290 | ||
| 295 | /* Set up the msi irqs. */ | 291 | /* Set up the msi irqs. */ |
| 296 | for (i = 128; i < (128 + 512); ++i) { | 292 | for (i = 128; i < (128 + 512); ++i) { |
| 297 | irq_to_desc(base + i)->status |= IRQ_LEVEL; | ||
| 298 | set_irq_chip_and_handler(base + i, msi_ops, handle_level_irq); | 293 | set_irq_chip_and_handler(base + i, msi_ops, handle_level_irq); |
| 294 | irq_set_status_flags(i, IRQ_LEVEL); | ||
| 299 | } | 295 | } |
| 300 | 296 | ||
| 301 | for (i = 0; i < 16; ++i) | 297 | for (i = 0; i < 16; ++i) |
diff --git a/arch/alpha/kernel/sys_mikasa.c b/arch/alpha/kernel/sys_mikasa.c index bcc1639e8efb..cf7f43dd3147 100644 --- a/arch/alpha/kernel/sys_mikasa.c +++ b/arch/alpha/kernel/sys_mikasa.c | |||
| @@ -43,22 +43,22 @@ mikasa_update_irq_hw(int mask) | |||
| 43 | } | 43 | } |
| 44 | 44 | ||
| 45 | static inline void | 45 | static inline void |
| 46 | mikasa_enable_irq(unsigned int irq) | 46 | mikasa_enable_irq(struct irq_data *d) |
| 47 | { | 47 | { |
| 48 | mikasa_update_irq_hw(cached_irq_mask |= 1 << (irq - 16)); | 48 | mikasa_update_irq_hw(cached_irq_mask |= 1 << (d->irq - 16)); |
| 49 | } | 49 | } |
| 50 | 50 | ||
| 51 | static void | 51 | static void |
| 52 | mikasa_disable_irq(unsigned int irq) | 52 | mikasa_disable_irq(struct irq_data *d) |
| 53 | { | 53 | { |
| 54 | mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (irq - 16))); | 54 | mikasa_update_irq_hw(cached_irq_mask &= ~(1 << (d->irq - 16))); |
| 55 | } | 55 | } |
| 56 | 56 | ||
| 57 | static struct irq_chip mikasa_irq_type = { | 57 | static struct irq_chip mikasa_irq_type = { |
| 58 | .name = "MIKASA", | 58 | .name = "MIKASA", |
| 59 | .unmask = mikasa_enable_irq, | 59 | .irq_unmask = mikasa_enable_irq, |
| 60 | .mask = mikasa_disable_irq, | 60 | .irq_mask = mikasa_disable_irq, |
| 61 | .mask_ack = mikasa_disable_irq, | 61 | .irq_mask_ack = mikasa_disable_irq, |
| 62 | }; | 62 | }; |
| 63 | 63 | ||
| 64 | static void | 64 | static void |
| @@ -98,8 +98,8 @@ mikasa_init_irq(void) | |||
| 98 | mikasa_update_irq_hw(0); | 98 | mikasa_update_irq_hw(0); |
| 99 | 99 | ||
| 100 | for (i = 16; i < 32; ++i) { | 100 | for (i = 16; i < 32; ++i) { |
| 101 | irq_to_desc(i)->status |= IRQ_LEVEL; | ||
| 102 | set_irq_chip_and_handler(i, &mikasa_irq_type, handle_level_irq); | 101 | set_irq_chip_and_handler(i, &mikasa_irq_type, handle_level_irq); |
| 102 | irq_set_status_flags(i, IRQ_LEVEL); | ||
| 103 | } | 103 | } |
| 104 | 104 | ||
| 105 | init_i8259a_irqs(); | 105 | init_i8259a_irqs(); |
diff --git a/arch/alpha/kernel/sys_noritake.c b/arch/alpha/kernel/sys_noritake.c index e88f4ae1260e..92bc188e94a9 100644 --- a/arch/alpha/kernel/sys_noritake.c +++ b/arch/alpha/kernel/sys_noritake.c | |||
| @@ -48,22 +48,22 @@ noritake_update_irq_hw(int irq, int mask) | |||
| 48 | } | 48 | } |
| 49 | 49 | ||
| 50 | static void | 50 | static void |
| 51 | noritake_enable_irq(unsigned int irq) | 51 | noritake_enable_irq(struct irq_data *d) |
| 52 | { | 52 | { |
| 53 | noritake_update_irq_hw(irq, cached_irq_mask |= 1 << (irq - 16)); | 53 | noritake_update_irq_hw(d->irq, cached_irq_mask |= 1 << (d->irq - 16)); |
| 54 | } | 54 | } |
| 55 | 55 | ||
| 56 | static void | 56 | static void |
| 57 | noritake_disable_irq(unsigned int irq) | 57 | noritake_disable_irq(struct irq_data *d) |
| 58 | { | 58 | { |
| 59 | noritake_update_irq_hw(irq, cached_irq_mask &= ~(1 << (irq - 16))); | 59 | noritake_update_irq_hw(d->irq, cached_irq_mask &= ~(1 << (d->irq - 16))); |
| 60 | } | 60 | } |
| 61 | 61 | ||
| 62 | static struct irq_chip noritake_irq_type = { | 62 | static struct irq_chip noritake_irq_type = { |
| 63 | .name = "NORITAKE", | 63 | .name = "NORITAKE", |
| 64 | .unmask = noritake_enable_irq, | 64 | .irq_unmask = noritake_enable_irq, |
| 65 | .mask = noritake_disable_irq, | 65 | .irq_mask = noritake_disable_irq, |
| 66 | .mask_ack = noritake_disable_irq, | 66 | .irq_mask_ack = noritake_disable_irq, |
| 67 | }; | 67 | }; |
| 68 | 68 | ||
| 69 | static void | 69 | static void |
| @@ -127,8 +127,8 @@ noritake_init_irq(void) | |||
| 127 | outw(0, 0x54c); | 127 | outw(0, 0x54c); |
| 128 | 128 | ||
| 129 | for (i = 16; i < 48; ++i) { | 129 | for (i = 16; i < 48; ++i) { |
| 130 | irq_to_desc(i)->status |= IRQ_LEVEL; | ||
| 131 | set_irq_chip_and_handler(i, &noritake_irq_type, handle_level_irq); | 130 | set_irq_chip_and_handler(i, &noritake_irq_type, handle_level_irq); |
| 131 | irq_set_status_flags(i, IRQ_LEVEL); | ||
| 132 | } | 132 | } |
| 133 | 133 | ||
| 134 | init_i8259a_irqs(); | 134 | init_i8259a_irqs(); |
diff --git a/arch/alpha/kernel/sys_rawhide.c b/arch/alpha/kernel/sys_rawhide.c index 6a51364dd1cc..936d4140ed5f 100644 --- a/arch/alpha/kernel/sys_rawhide.c +++ b/arch/alpha/kernel/sys_rawhide.c | |||
| @@ -56,9 +56,10 @@ rawhide_update_irq_hw(int hose, int mask) | |||
| 56 | (((h) < MCPCIA_MAX_HOSES) && (cached_irq_masks[(h)] != 0)) | 56 | (((h) < MCPCIA_MAX_HOSES) && (cached_irq_masks[(h)] != 0)) |
| 57 | 57 | ||
| 58 | static inline void | 58 | static inline void |
| 59 | rawhide_enable_irq(unsigned int irq) | 59 | rawhide_enable_irq(struct irq_data *d) |
| 60 | { | 60 | { |
| 61 | unsigned int mask, hose; | 61 | unsigned int mask, hose; |
| 62 | unsigned int irq = d->irq; | ||
| 62 | 63 | ||
| 63 | irq -= 16; | 64 | irq -= 16; |
| 64 | hose = irq / 24; | 65 | hose = irq / 24; |
| @@ -76,9 +77,10 @@ rawhide_enable_irq(unsigned int irq) | |||
| 76 | } | 77 | } |
| 77 | 78 | ||
| 78 | static void | 79 | static void |
| 79 | rawhide_disable_irq(unsigned int irq) | 80 | rawhide_disable_irq(struct irq_data *d) |
| 80 | { | 81 | { |
| 81 | unsigned int mask, hose; | 82 | unsigned int mask, hose; |
| 83 | unsigned int irq = d->irq; | ||
| 82 | 84 | ||
| 83 | irq -= 16; | 85 | irq -= 16; |
| 84 | hose = irq / 24; | 86 | hose = irq / 24; |
| @@ -96,9 +98,10 @@ rawhide_disable_irq(unsigned int irq) | |||
| 96 | } | 98 | } |
| 97 | 99 | ||
| 98 | static void | 100 | static void |
| 99 | rawhide_mask_and_ack_irq(unsigned int irq) | 101 | rawhide_mask_and_ack_irq(struct irq_data *d) |
| 100 | { | 102 | { |
| 101 | unsigned int mask, mask1, hose; | 103 | unsigned int mask, mask1, hose; |
| 104 | unsigned int irq = d->irq; | ||
| 102 | 105 | ||
| 103 | irq -= 16; | 106 | irq -= 16; |
| 104 | hose = irq / 24; | 107 | hose = irq / 24; |
| @@ -123,9 +126,9 @@ rawhide_mask_and_ack_irq(unsigned int irq) | |||
| 123 | 126 | ||
| 124 | static struct irq_chip rawhide_irq_type = { | 127 | static struct irq_chip rawhide_irq_type = { |
| 125 | .name = "RAWHIDE", | 128 | .name = "RAWHIDE", |
| 126 | .unmask = rawhide_enable_irq, | 129 | .irq_unmask = rawhide_enable_irq, |
| 127 | .mask = rawhide_disable_irq, | 130 | .irq_mask = rawhide_disable_irq, |
| 128 | .mask_ack = rawhide_mask_and_ack_irq, | 131 | .irq_mask_ack = rawhide_mask_and_ack_irq, |
| 129 | }; | 132 | }; |
| 130 | 133 | ||
| 131 | static void | 134 | static void |
| @@ -177,8 +180,8 @@ rawhide_init_irq(void) | |||
| 177 | } | 180 | } |
| 178 | 181 | ||
| 179 | for (i = 16; i < 128; ++i) { | 182 | for (i = 16; i < 128; ++i) { |
| 180 | irq_to_desc(i)->status |= IRQ_LEVEL; | ||
| 181 | set_irq_chip_and_handler(i, &rawhide_irq_type, handle_level_irq); | 183 | set_irq_chip_and_handler(i, &rawhide_irq_type, handle_level_irq); |
| 184 | irq_set_status_flags(i, IRQ_LEVEL); | ||
| 182 | } | 185 | } |
| 183 | 186 | ||
| 184 | init_i8259a_irqs(); | 187 | init_i8259a_irqs(); |
diff --git a/arch/alpha/kernel/sys_rx164.c b/arch/alpha/kernel/sys_rx164.c index 89e7e37ec84c..cea22a62913b 100644 --- a/arch/alpha/kernel/sys_rx164.c +++ b/arch/alpha/kernel/sys_rx164.c | |||
| @@ -47,22 +47,22 @@ rx164_update_irq_hw(unsigned long mask) | |||
| 47 | } | 47 | } |
| 48 | 48 | ||
| 49 | static inline void | 49 | static inline void |
| 50 | rx164_enable_irq(unsigned int irq) | 50 | rx164_enable_irq(struct irq_data *d) |
| 51 | { | 51 | { |
| 52 | rx164_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16)); | 52 | rx164_update_irq_hw(cached_irq_mask |= 1UL << (d->irq - 16)); |
| 53 | } | 53 | } |
| 54 | 54 | ||
| 55 | static void | 55 | static void |
| 56 | rx164_disable_irq(unsigned int irq) | 56 | rx164_disable_irq(struct irq_data *d) |
| 57 | { | 57 | { |
| 58 | rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16))); | 58 | rx164_update_irq_hw(cached_irq_mask &= ~(1UL << (d->irq - 16))); |
| 59 | } | 59 | } |
| 60 | 60 | ||
| 61 | static struct irq_chip rx164_irq_type = { | 61 | static struct irq_chip rx164_irq_type = { |
| 62 | .name = "RX164", | 62 | .name = "RX164", |
| 63 | .unmask = rx164_enable_irq, | 63 | .irq_unmask = rx164_enable_irq, |
| 64 | .mask = rx164_disable_irq, | 64 | .irq_mask = rx164_disable_irq, |
| 65 | .mask_ack = rx164_disable_irq, | 65 | .irq_mask_ack = rx164_disable_irq, |
| 66 | }; | 66 | }; |
| 67 | 67 | ||
| 68 | static void | 68 | static void |
| @@ -99,8 +99,8 @@ rx164_init_irq(void) | |||
| 99 | 99 | ||
| 100 | rx164_update_irq_hw(0); | 100 | rx164_update_irq_hw(0); |
| 101 | for (i = 16; i < 40; ++i) { | 101 | for (i = 16; i < 40; ++i) { |
| 102 | irq_to_desc(i)->status |= IRQ_LEVEL; | ||
| 103 | set_irq_chip_and_handler(i, &rx164_irq_type, handle_level_irq); | 102 | set_irq_chip_and_handler(i, &rx164_irq_type, handle_level_irq); |
| 103 | irq_set_status_flags(i, IRQ_LEVEL); | ||
| 104 | } | 104 | } |
| 105 | 105 | ||
| 106 | init_i8259a_irqs(); | 106 | init_i8259a_irqs(); |
diff --git a/arch/alpha/kernel/sys_sable.c b/arch/alpha/kernel/sys_sable.c index 5c4423d1b06c..a349538aabc9 100644 --- a/arch/alpha/kernel/sys_sable.c +++ b/arch/alpha/kernel/sys_sable.c | |||
| @@ -443,11 +443,11 @@ lynx_swizzle(struct pci_dev *dev, u8 *pinp) | |||
| 443 | /* GENERIC irq routines */ | 443 | /* GENERIC irq routines */ |
| 444 | 444 | ||
| 445 | static inline void | 445 | static inline void |
| 446 | sable_lynx_enable_irq(unsigned int irq) | 446 | sable_lynx_enable_irq(struct irq_data *d) |
| 447 | { | 447 | { |
| 448 | unsigned long bit, mask; | 448 | unsigned long bit, mask; |
| 449 | 449 | ||
| 450 | bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; | 450 | bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq]; |
| 451 | spin_lock(&sable_lynx_irq_lock); | 451 | spin_lock(&sable_lynx_irq_lock); |
| 452 | mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit); | 452 | mask = sable_lynx_irq_swizzle->shadow_mask &= ~(1UL << bit); |
| 453 | sable_lynx_irq_swizzle->update_irq_hw(bit, mask); | 453 | sable_lynx_irq_swizzle->update_irq_hw(bit, mask); |
| @@ -459,11 +459,11 @@ sable_lynx_enable_irq(unsigned int irq) | |||
| 459 | } | 459 | } |
| 460 | 460 | ||
| 461 | static void | 461 | static void |
| 462 | sable_lynx_disable_irq(unsigned int irq) | 462 | sable_lynx_disable_irq(struct irq_data *d) |
| 463 | { | 463 | { |
| 464 | unsigned long bit, mask; | 464 | unsigned long bit, mask; |
| 465 | 465 | ||
| 466 | bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; | 466 | bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq]; |
| 467 | spin_lock(&sable_lynx_irq_lock); | 467 | spin_lock(&sable_lynx_irq_lock); |
| 468 | mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; | 468 | mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; |
| 469 | sable_lynx_irq_swizzle->update_irq_hw(bit, mask); | 469 | sable_lynx_irq_swizzle->update_irq_hw(bit, mask); |
| @@ -475,11 +475,11 @@ sable_lynx_disable_irq(unsigned int irq) | |||
| 475 | } | 475 | } |
| 476 | 476 | ||
| 477 | static void | 477 | static void |
| 478 | sable_lynx_mask_and_ack_irq(unsigned int irq) | 478 | sable_lynx_mask_and_ack_irq(struct irq_data *d) |
| 479 | { | 479 | { |
| 480 | unsigned long bit, mask; | 480 | unsigned long bit, mask; |
| 481 | 481 | ||
| 482 | bit = sable_lynx_irq_swizzle->irq_to_mask[irq]; | 482 | bit = sable_lynx_irq_swizzle->irq_to_mask[d->irq]; |
| 483 | spin_lock(&sable_lynx_irq_lock); | 483 | spin_lock(&sable_lynx_irq_lock); |
| 484 | mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; | 484 | mask = sable_lynx_irq_swizzle->shadow_mask |= 1UL << bit; |
| 485 | sable_lynx_irq_swizzle->update_irq_hw(bit, mask); | 485 | sable_lynx_irq_swizzle->update_irq_hw(bit, mask); |
| @@ -489,9 +489,9 @@ sable_lynx_mask_and_ack_irq(unsigned int irq) | |||
| 489 | 489 | ||
| 490 | static struct irq_chip sable_lynx_irq_type = { | 490 | static struct irq_chip sable_lynx_irq_type = { |
| 491 | .name = "SABLE/LYNX", | 491 | .name = "SABLE/LYNX", |
| 492 | .unmask = sable_lynx_enable_irq, | 492 | .irq_unmask = sable_lynx_enable_irq, |
| 493 | .mask = sable_lynx_disable_irq, | 493 | .irq_mask = sable_lynx_disable_irq, |
| 494 | .mask_ack = sable_lynx_mask_and_ack_irq, | 494 | .irq_mask_ack = sable_lynx_mask_and_ack_irq, |
| 495 | }; | 495 | }; |
| 496 | 496 | ||
| 497 | static void | 497 | static void |
| @@ -518,9 +518,9 @@ sable_lynx_init_irq(int nr_of_irqs) | |||
| 518 | long i; | 518 | long i; |
| 519 | 519 | ||
| 520 | for (i = 0; i < nr_of_irqs; ++i) { | 520 | for (i = 0; i < nr_of_irqs; ++i) { |
| 521 | irq_to_desc(i)->status |= IRQ_LEVEL; | ||
| 522 | set_irq_chip_and_handler(i, &sable_lynx_irq_type, | 521 | set_irq_chip_and_handler(i, &sable_lynx_irq_type, |
| 523 | handle_level_irq); | 522 | handle_level_irq); |
| 523 | irq_set_status_flags(i, IRQ_LEVEL); | ||
| 524 | } | 524 | } |
| 525 | 525 | ||
| 526 | common_init_isa_dma(); | 526 | common_init_isa_dma(); |
diff --git a/arch/alpha/kernel/sys_takara.c b/arch/alpha/kernel/sys_takara.c index f8a1e8a862fb..42a5331f13c4 100644 --- a/arch/alpha/kernel/sys_takara.c +++ b/arch/alpha/kernel/sys_takara.c | |||
| @@ -45,16 +45,18 @@ takara_update_irq_hw(unsigned long irq, unsigned long mask) | |||
| 45 | } | 45 | } |
| 46 | 46 | ||
| 47 | static inline void | 47 | static inline void |
| 48 | takara_enable_irq(unsigned int irq) | 48 | takara_enable_irq(struct irq_data *d) |
| 49 | { | 49 | { |
| 50 | unsigned int irq = d->irq; | ||
| 50 | unsigned long mask; | 51 | unsigned long mask; |
| 51 | mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); | 52 | mask = (cached_irq_mask[irq >= 64] &= ~(1UL << (irq & 63))); |
| 52 | takara_update_irq_hw(irq, mask); | 53 | takara_update_irq_hw(irq, mask); |
| 53 | } | 54 | } |
| 54 | 55 | ||
| 55 | static void | 56 | static void |
| 56 | takara_disable_irq(unsigned int irq) | 57 | takara_disable_irq(struct irq_data *d) |
| 57 | { | 58 | { |
| 59 | unsigned int irq = d->irq; | ||
| 58 | unsigned long mask; | 60 | unsigned long mask; |
| 59 | mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); | 61 | mask = (cached_irq_mask[irq >= 64] |= 1UL << (irq & 63)); |
| 60 | takara_update_irq_hw(irq, mask); | 62 | takara_update_irq_hw(irq, mask); |
| @@ -62,9 +64,9 @@ takara_disable_irq(unsigned int irq) | |||
| 62 | 64 | ||
| 63 | static struct irq_chip takara_irq_type = { | 65 | static struct irq_chip takara_irq_type = { |
| 64 | .name = "TAKARA", | 66 | .name = "TAKARA", |
| 65 | .unmask = takara_enable_irq, | 67 | .irq_unmask = takara_enable_irq, |
| 66 | .mask = takara_disable_irq, | 68 | .irq_mask = takara_disable_irq, |
| 67 | .mask_ack = takara_disable_irq, | 69 | .irq_mask_ack = takara_disable_irq, |
| 68 | }; | 70 | }; |
| 69 | 71 | ||
| 70 | static void | 72 | static void |
| @@ -136,8 +138,8 @@ takara_init_irq(void) | |||
| 136 | takara_update_irq_hw(i, -1); | 138 | takara_update_irq_hw(i, -1); |
| 137 | 139 | ||
| 138 | for (i = 16; i < 128; ++i) { | 140 | for (i = 16; i < 128; ++i) { |
| 139 | irq_to_desc(i)->status |= IRQ_LEVEL; | ||
| 140 | set_irq_chip_and_handler(i, &takara_irq_type, handle_level_irq); | 141 | set_irq_chip_and_handler(i, &takara_irq_type, handle_level_irq); |
| 142 | irq_set_status_flags(i, IRQ_LEVEL); | ||
| 141 | } | 143 | } |
| 142 | 144 | ||
| 143 | common_init_isa_dma(); | 145 | common_init_isa_dma(); |
diff --git a/arch/alpha/kernel/sys_titan.c b/arch/alpha/kernel/sys_titan.c index e02494bf5ef3..f6c108a3d673 100644 --- a/arch/alpha/kernel/sys_titan.c +++ b/arch/alpha/kernel/sys_titan.c | |||
| @@ -112,8 +112,9 @@ titan_update_irq_hw(unsigned long mask) | |||
| 112 | } | 112 | } |
| 113 | 113 | ||
| 114 | static inline void | 114 | static inline void |
| 115 | titan_enable_irq(unsigned int irq) | 115 | titan_enable_irq(struct irq_data *d) |
| 116 | { | 116 | { |
| 117 | unsigned int irq = d->irq; | ||
| 117 | spin_lock(&titan_irq_lock); | 118 | spin_lock(&titan_irq_lock); |
| 118 | titan_cached_irq_mask |= 1UL << (irq - 16); | 119 | titan_cached_irq_mask |= 1UL << (irq - 16); |
| 119 | titan_update_irq_hw(titan_cached_irq_mask); | 120 | titan_update_irq_hw(titan_cached_irq_mask); |
| @@ -121,8 +122,9 @@ titan_enable_irq(unsigned int irq) | |||
| 121 | } | 122 | } |
| 122 | 123 | ||
| 123 | static inline void | 124 | static inline void |
| 124 | titan_disable_irq(unsigned int irq) | 125 | titan_disable_irq(struct irq_data *d) |
| 125 | { | 126 | { |
| 127 | unsigned int irq = d->irq; | ||
| 126 | spin_lock(&titan_irq_lock); | 128 | spin_lock(&titan_irq_lock); |
| 127 | titan_cached_irq_mask &= ~(1UL << (irq - 16)); | 129 | titan_cached_irq_mask &= ~(1UL << (irq - 16)); |
| 128 | titan_update_irq_hw(titan_cached_irq_mask); | 130 | titan_update_irq_hw(titan_cached_irq_mask); |
| @@ -144,7 +146,8 @@ titan_cpu_set_irq_affinity(unsigned int irq, cpumask_t affinity) | |||
| 144 | } | 146 | } |
| 145 | 147 | ||
| 146 | static int | 148 | static int |
| 147 | titan_set_irq_affinity(unsigned int irq, const struct cpumask *affinity) | 149 | titan_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, |
| 150 | bool force) | ||
| 148 | { | 151 | { |
| 149 | spin_lock(&titan_irq_lock); | 152 | spin_lock(&titan_irq_lock); |
| 150 | titan_cpu_set_irq_affinity(irq - 16, *affinity); | 153 | titan_cpu_set_irq_affinity(irq - 16, *affinity); |
| @@ -175,17 +178,17 @@ init_titan_irqs(struct irq_chip * ops, int imin, int imax) | |||
| 175 | { | 178 | { |
| 176 | long i; | 179 | long i; |
| 177 | for (i = imin; i <= imax; ++i) { | 180 | for (i = imin; i <= imax; ++i) { |
| 178 | irq_to_desc(i)->status |= IRQ_LEVEL; | ||
| 179 | set_irq_chip_and_handler(i, ops, handle_level_irq); | 181 | set_irq_chip_and_handler(i, ops, handle_level_irq); |
| 182 | irq_set_status_flags(i, IRQ_LEVEL); | ||
| 180 | } | 183 | } |
| 181 | } | 184 | } |
| 182 | 185 | ||
| 183 | static struct irq_chip titan_irq_type = { | 186 | static struct irq_chip titan_irq_type = { |
| 184 | .name = "TITAN", | 187 | .name = "TITAN", |
| 185 | .unmask = titan_enable_irq, | 188 | .irq_unmask = titan_enable_irq, |
| 186 | .mask = titan_disable_irq, | 189 | .irq_mask = titan_disable_irq, |
| 187 | .mask_ack = titan_disable_irq, | 190 | .irq_mask_ack = titan_disable_irq, |
| 188 | .set_affinity = titan_set_irq_affinity, | 191 | .irq_set_affinity = titan_set_irq_affinity, |
| 189 | }; | 192 | }; |
| 190 | 193 | ||
| 191 | static irqreturn_t | 194 | static irqreturn_t |
diff --git a/arch/alpha/kernel/sys_wildfire.c b/arch/alpha/kernel/sys_wildfire.c index eec52594d410..ca60a387ef0a 100644 --- a/arch/alpha/kernel/sys_wildfire.c +++ b/arch/alpha/kernel/sys_wildfire.c | |||
| @@ -104,10 +104,12 @@ wildfire_init_irq_hw(void) | |||
| 104 | } | 104 | } |
| 105 | 105 | ||
| 106 | static void | 106 | static void |
| 107 | wildfire_enable_irq(unsigned int irq) | 107 | wildfire_enable_irq(struct irq_data *d) |
| 108 | { | 108 | { |
| 109 | unsigned int irq = d->irq; | ||
| 110 | |||
| 109 | if (irq < 16) | 111 | if (irq < 16) |
| 110 | i8259a_enable_irq(irq); | 112 | i8259a_enable_irq(d); |
| 111 | 113 | ||
| 112 | spin_lock(&wildfire_irq_lock); | 114 | spin_lock(&wildfire_irq_lock); |
| 113 | set_bit(irq, &cached_irq_mask); | 115 | set_bit(irq, &cached_irq_mask); |
| @@ -116,10 +118,12 @@ wildfire_enable_irq(unsigned int irq) | |||
| 116 | } | 118 | } |
| 117 | 119 | ||
| 118 | static void | 120 | static void |
| 119 | wildfire_disable_irq(unsigned int irq) | 121 | wildfire_disable_irq(struct irq_data *d) |
| 120 | { | 122 | { |
| 123 | unsigned int irq = d->irq; | ||
| 124 | |||
| 121 | if (irq < 16) | 125 | if (irq < 16) |
| 122 | i8259a_disable_irq(irq); | 126 | i8259a_disable_irq(d); |
| 123 | 127 | ||
| 124 | spin_lock(&wildfire_irq_lock); | 128 | spin_lock(&wildfire_irq_lock); |
| 125 | clear_bit(irq, &cached_irq_mask); | 129 | clear_bit(irq, &cached_irq_mask); |
| @@ -128,10 +132,12 @@ wildfire_disable_irq(unsigned int irq) | |||
| 128 | } | 132 | } |
| 129 | 133 | ||
| 130 | static void | 134 | static void |
| 131 | wildfire_mask_and_ack_irq(unsigned int irq) | 135 | wildfire_mask_and_ack_irq(struct irq_data *d) |
| 132 | { | 136 | { |
| 137 | unsigned int irq = d->irq; | ||
| 138 | |||
| 133 | if (irq < 16) | 139 | if (irq < 16) |
| 134 | i8259a_mask_and_ack_irq(irq); | 140 | i8259a_mask_and_ack_irq(d); |
| 135 | 141 | ||
| 136 | spin_lock(&wildfire_irq_lock); | 142 | spin_lock(&wildfire_irq_lock); |
| 137 | clear_bit(irq, &cached_irq_mask); | 143 | clear_bit(irq, &cached_irq_mask); |
| @@ -141,9 +147,9 @@ wildfire_mask_and_ack_irq(unsigned int irq) | |||
| 141 | 147 | ||
| 142 | static struct irq_chip wildfire_irq_type = { | 148 | static struct irq_chip wildfire_irq_type = { |
| 143 | .name = "WILDFIRE", | 149 | .name = "WILDFIRE", |
| 144 | .unmask = wildfire_enable_irq, | 150 | .irq_unmask = wildfire_enable_irq, |
| 145 | .mask = wildfire_disable_irq, | 151 | .irq_mask = wildfire_disable_irq, |
| 146 | .mask_ack = wildfire_mask_and_ack_irq, | 152 | .irq_mask_ack = wildfire_mask_and_ack_irq, |
| 147 | }; | 153 | }; |
| 148 | 154 | ||
| 149 | static void __init | 155 | static void __init |
| @@ -177,21 +183,21 @@ wildfire_init_irq_per_pca(int qbbno, int pcano) | |||
| 177 | for (i = 0; i < 16; ++i) { | 183 | for (i = 0; i < 16; ++i) { |
| 178 | if (i == 2) | 184 | if (i == 2) |
| 179 | continue; | 185 | continue; |
| 180 | irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL; | ||
| 181 | set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type, | 186 | set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type, |
| 182 | handle_level_irq); | 187 | handle_level_irq); |
| 188 | irq_set_status_flags(i + irq_bias, IRQ_LEVEL); | ||
| 183 | } | 189 | } |
| 184 | 190 | ||
| 185 | irq_to_desc(36+irq_bias)->status |= IRQ_LEVEL; | ||
| 186 | set_irq_chip_and_handler(36+irq_bias, &wildfire_irq_type, | 191 | set_irq_chip_and_handler(36+irq_bias, &wildfire_irq_type, |
| 187 | handle_level_irq); | 192 | handle_level_irq); |
| 193 | irq_set_status_flags(36 + irq_bias, IRQ_LEVEL); | ||
| 188 | for (i = 40; i < 64; ++i) { | 194 | for (i = 40; i < 64; ++i) { |
| 189 | irq_to_desc(i+irq_bias)->status |= IRQ_LEVEL; | ||
| 190 | set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type, | 195 | set_irq_chip_and_handler(i+irq_bias, &wildfire_irq_type, |
| 191 | handle_level_irq); | 196 | handle_level_irq); |
| 197 | irq_set_status_flags(i + irq_bias, IRQ_LEVEL); | ||
| 192 | } | 198 | } |
| 193 | 199 | ||
| 194 | setup_irq(32+irq_bias, &isa_enable); | 200 | setup_irq(32+irq_bias, &isa_enable); |
| 195 | } | 201 | } |
| 196 | 202 | ||
| 197 | static void __init | 203 | static void __init |
diff --git a/arch/arm/common/Kconfig b/arch/arm/common/Kconfig index 778655f0257a..ea5ee4d067f3 100644 --- a/arch/arm/common/Kconfig +++ b/arch/arm/common/Kconfig | |||
| @@ -6,6 +6,8 @@ config ARM_VIC | |||
| 6 | 6 | ||
| 7 | config ARM_VIC_NR | 7 | config ARM_VIC_NR |
| 8 | int | 8 | int |
| 9 | default 4 if ARCH_S5PV210 | ||
| 10 | default 3 if ARCH_S5P6442 || ARCH_S5PC100 | ||
| 9 | default 2 | 11 | default 2 |
| 10 | depends on ARM_VIC | 12 | depends on ARM_VIC |
| 11 | help | 13 | help |
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h index 3a0893a76a3b..bf13b814c1b8 100644 --- a/arch/arm/include/asm/mach/arch.h +++ b/arch/arm/include/asm/mach/arch.h | |||
| @@ -15,10 +15,6 @@ struct meminfo; | |||
| 15 | struct sys_timer; | 15 | struct sys_timer; |
| 16 | 16 | ||
| 17 | struct machine_desc { | 17 | struct machine_desc { |
| 18 | /* | ||
| 19 | * Note! The first two elements are used | ||
| 20 | * by assembler code in head.S, head-common.S | ||
| 21 | */ | ||
| 22 | unsigned int nr; /* architecture number */ | 18 | unsigned int nr; /* architecture number */ |
| 23 | const char *name; /* architecture name */ | 19 | const char *name; /* architecture name */ |
| 24 | unsigned long boot_params; /* tagged list */ | 20 | unsigned long boot_params; /* tagged list */ |
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index 9763be04f77e..22de005f159c 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h | |||
| @@ -10,6 +10,8 @@ | |||
| 10 | #ifndef _ASMARM_PGALLOC_H | 10 | #ifndef _ASMARM_PGALLOC_H |
| 11 | #define _ASMARM_PGALLOC_H | 11 | #define _ASMARM_PGALLOC_H |
| 12 | 12 | ||
| 13 | #include <linux/pagemap.h> | ||
| 14 | |||
| 13 | #include <asm/domain.h> | 15 | #include <asm/domain.h> |
| 14 | #include <asm/pgtable-hwdef.h> | 16 | #include <asm/pgtable-hwdef.h> |
| 15 | #include <asm/processor.h> | 17 | #include <asm/processor.h> |
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index d600bd350704..44b84fe6e1b0 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c | |||
| @@ -836,9 +836,11 @@ static int hw_breakpoint_pending(unsigned long addr, unsigned int fsr, | |||
| 836 | /* | 836 | /* |
| 837 | * One-time initialisation. | 837 | * One-time initialisation. |
| 838 | */ | 838 | */ |
| 839 | static void reset_ctrl_regs(void *unused) | 839 | static void reset_ctrl_regs(void *info) |
| 840 | { | 840 | { |
| 841 | int i; | 841 | int i, cpu = smp_processor_id(); |
| 842 | u32 dbg_power; | ||
| 843 | cpumask_t *cpumask = info; | ||
| 842 | 844 | ||
| 843 | /* | 845 | /* |
| 844 | * v7 debug contains save and restore registers so that debug state | 846 | * v7 debug contains save and restore registers so that debug state |
| @@ -850,6 +852,17 @@ static void reset_ctrl_regs(void *unused) | |||
| 850 | */ | 852 | */ |
| 851 | if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) { | 853 | if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) { |
| 852 | /* | 854 | /* |
| 855 | * Ensure sticky power-down is clear (i.e. debug logic is | ||
| 856 | * powered up). | ||
| 857 | */ | ||
| 858 | asm volatile("mrc p14, 0, %0, c1, c5, 4" : "=r" (dbg_power)); | ||
| 859 | if ((dbg_power & 0x1) == 0) { | ||
| 860 | pr_warning("CPU %d debug is powered down!\n", cpu); | ||
| 861 | cpumask_or(cpumask, cpumask, cpumask_of(cpu)); | ||
| 862 | return; | ||
| 863 | } | ||
| 864 | |||
| 865 | /* | ||
| 853 | * Unconditionally clear the lock by writing a value | 866 | * Unconditionally clear the lock by writing a value |
| 854 | * other than 0xC5ACCE55 to the access register. | 867 | * other than 0xC5ACCE55 to the access register. |
| 855 | */ | 868 | */ |
| @@ -887,6 +900,7 @@ static struct notifier_block __cpuinitdata dbg_reset_nb = { | |||
| 887 | static int __init arch_hw_breakpoint_init(void) | 900 | static int __init arch_hw_breakpoint_init(void) |
| 888 | { | 901 | { |
| 889 | u32 dscr; | 902 | u32 dscr; |
| 903 | cpumask_t cpumask = { CPU_BITS_NONE }; | ||
| 890 | 904 | ||
| 891 | debug_arch = get_debug_arch(); | 905 | debug_arch = get_debug_arch(); |
| 892 | 906 | ||
| @@ -911,7 +925,13 @@ static int __init arch_hw_breakpoint_init(void) | |||
| 911 | * Reset the breakpoint resources. We assume that a halting | 925 | * Reset the breakpoint resources. We assume that a halting |
| 912 | * debugger will leave the world in a nice state for us. | 926 | * debugger will leave the world in a nice state for us. |
| 913 | */ | 927 | */ |
| 914 | on_each_cpu(reset_ctrl_regs, NULL, 1); | 928 | on_each_cpu(reset_ctrl_regs, &cpumask, 1); |
| 929 | if (!cpumask_empty(&cpumask)) { | ||
| 930 | core_num_brps = 0; | ||
| 931 | core_num_reserved_brps = 0; | ||
| 932 | core_num_wrps = 0; | ||
| 933 | return 0; | ||
| 934 | } | ||
| 915 | 935 | ||
| 916 | ARM_DBG_READ(c1, 0, dscr); | 936 | ARM_DBG_READ(c1, 0, dscr); |
| 917 | if (dscr & ARM_DSCR_HDBGEN) { | 937 | if (dscr & ARM_DSCR_HDBGEN) { |
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 19c6816db61e..b13e70f63d71 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c | |||
| @@ -996,10 +996,10 @@ static int ptrace_gethbpregs(struct task_struct *tsk, long num, | |||
| 996 | while (!(arch_ctrl.len & 0x1)) | 996 | while (!(arch_ctrl.len & 0x1)) |
| 997 | arch_ctrl.len >>= 1; | 997 | arch_ctrl.len >>= 1; |
| 998 | 998 | ||
| 999 | if (idx & 0x1) | 999 | if (num & 0x1) |
| 1000 | reg = encode_ctrl_reg(arch_ctrl); | ||
| 1001 | else | ||
| 1002 | reg = bp->attr.bp_addr; | 1000 | reg = bp->attr.bp_addr; |
| 1001 | else | ||
| 1002 | reg = encode_ctrl_reg(arch_ctrl); | ||
| 1003 | } | 1003 | } |
| 1004 | 1004 | ||
| 1005 | put: | 1005 | put: |
diff --git a/arch/arm/mach-davinci/cpufreq.c b/arch/arm/mach-davinci/cpufreq.c index 343de73161fa..4a68c2b1ec11 100644 --- a/arch/arm/mach-davinci/cpufreq.c +++ b/arch/arm/mach-davinci/cpufreq.c | |||
| @@ -132,7 +132,7 @@ out: | |||
| 132 | return ret; | 132 | return ret; |
| 133 | } | 133 | } |
| 134 | 134 | ||
| 135 | static int __init davinci_cpu_init(struct cpufreq_policy *policy) | 135 | static int davinci_cpu_init(struct cpufreq_policy *policy) |
| 136 | { | 136 | { |
| 137 | int result = 0; | 137 | int result = 0; |
| 138 | struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data; | 138 | struct davinci_cpufreq_config *pdata = cpufreq.dev->platform_data; |
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c index 9eec63070e0c..beda8a4133a0 100644 --- a/arch/arm/mach-davinci/devices-da8xx.c +++ b/arch/arm/mach-davinci/devices-da8xx.c | |||
| @@ -480,8 +480,15 @@ static struct platform_device da850_mcasp_device = { | |||
| 480 | .resource = da850_mcasp_resources, | 480 | .resource = da850_mcasp_resources, |
| 481 | }; | 481 | }; |
| 482 | 482 | ||
| 483 | struct platform_device davinci_pcm_device = { | ||
| 484 | .name = "davinci-pcm-audio", | ||
| 485 | .id = -1, | ||
| 486 | }; | ||
| 487 | |||
| 483 | void __init da8xx_register_mcasp(int id, struct snd_platform_data *pdata) | 488 | void __init da8xx_register_mcasp(int id, struct snd_platform_data *pdata) |
| 484 | { | 489 | { |
| 490 | platform_device_register(&davinci_pcm_device); | ||
| 491 | |||
| 485 | /* DA830/OMAP-L137 has 3 instances of McASP */ | 492 | /* DA830/OMAP-L137 has 3 instances of McASP */ |
| 486 | if (cpu_is_davinci_da830() && id == 1) { | 493 | if (cpu_is_davinci_da830() && id == 1) { |
| 487 | da830_mcasp1_device.dev.platform_data = pdata; | 494 | da830_mcasp1_device.dev.platform_data = pdata; |
diff --git a/arch/arm/mach-davinci/gpio-tnetv107x.c b/arch/arm/mach-davinci/gpio-tnetv107x.c index d10298620e2c..3fa3e2867e19 100644 --- a/arch/arm/mach-davinci/gpio-tnetv107x.c +++ b/arch/arm/mach-davinci/gpio-tnetv107x.c | |||
| @@ -58,7 +58,7 @@ static int tnetv107x_gpio_request(struct gpio_chip *chip, unsigned offset) | |||
| 58 | 58 | ||
| 59 | spin_lock_irqsave(&ctlr->lock, flags); | 59 | spin_lock_irqsave(&ctlr->lock, flags); |
| 60 | 60 | ||
| 61 | gpio_reg_set_bit(®s->enable, gpio); | 61 | gpio_reg_set_bit(regs->enable, gpio); |
| 62 | 62 | ||
| 63 | spin_unlock_irqrestore(&ctlr->lock, flags); | 63 | spin_unlock_irqrestore(&ctlr->lock, flags); |
| 64 | 64 | ||
| @@ -74,7 +74,7 @@ static void tnetv107x_gpio_free(struct gpio_chip *chip, unsigned offset) | |||
| 74 | 74 | ||
| 75 | spin_lock_irqsave(&ctlr->lock, flags); | 75 | spin_lock_irqsave(&ctlr->lock, flags); |
| 76 | 76 | ||
| 77 | gpio_reg_clear_bit(®s->enable, gpio); | 77 | gpio_reg_clear_bit(regs->enable, gpio); |
| 78 | 78 | ||
| 79 | spin_unlock_irqrestore(&ctlr->lock, flags); | 79 | spin_unlock_irqrestore(&ctlr->lock, flags); |
| 80 | } | 80 | } |
| @@ -88,7 +88,7 @@ static int tnetv107x_gpio_dir_in(struct gpio_chip *chip, unsigned offset) | |||
| 88 | 88 | ||
| 89 | spin_lock_irqsave(&ctlr->lock, flags); | 89 | spin_lock_irqsave(&ctlr->lock, flags); |
| 90 | 90 | ||
| 91 | gpio_reg_set_bit(®s->direction, gpio); | 91 | gpio_reg_set_bit(regs->direction, gpio); |
| 92 | 92 | ||
| 93 | spin_unlock_irqrestore(&ctlr->lock, flags); | 93 | spin_unlock_irqrestore(&ctlr->lock, flags); |
| 94 | 94 | ||
| @@ -106,11 +106,11 @@ static int tnetv107x_gpio_dir_out(struct gpio_chip *chip, | |||
| 106 | spin_lock_irqsave(&ctlr->lock, flags); | 106 | spin_lock_irqsave(&ctlr->lock, flags); |
| 107 | 107 | ||
| 108 | if (value) | 108 | if (value) |
| 109 | gpio_reg_set_bit(®s->data_out, gpio); | 109 | gpio_reg_set_bit(regs->data_out, gpio); |
| 110 | else | 110 | else |
| 111 | gpio_reg_clear_bit(®s->data_out, gpio); | 111 | gpio_reg_clear_bit(regs->data_out, gpio); |
| 112 | 112 | ||
| 113 | gpio_reg_clear_bit(®s->direction, gpio); | 113 | gpio_reg_clear_bit(regs->direction, gpio); |
| 114 | 114 | ||
| 115 | spin_unlock_irqrestore(&ctlr->lock, flags); | 115 | spin_unlock_irqrestore(&ctlr->lock, flags); |
| 116 | 116 | ||
| @@ -124,7 +124,7 @@ static int tnetv107x_gpio_get(struct gpio_chip *chip, unsigned offset) | |||
| 124 | unsigned gpio = chip->base + offset; | 124 | unsigned gpio = chip->base + offset; |
| 125 | int ret; | 125 | int ret; |
| 126 | 126 | ||
| 127 | ret = gpio_reg_get_bit(®s->data_in, gpio); | 127 | ret = gpio_reg_get_bit(regs->data_in, gpio); |
| 128 | 128 | ||
| 129 | return ret ? 1 : 0; | 129 | return ret ? 1 : 0; |
| 130 | } | 130 | } |
| @@ -140,9 +140,9 @@ static void tnetv107x_gpio_set(struct gpio_chip *chip, | |||
| 140 | spin_lock_irqsave(&ctlr->lock, flags); | 140 | spin_lock_irqsave(&ctlr->lock, flags); |
| 141 | 141 | ||
| 142 | if (value) | 142 | if (value) |
| 143 | gpio_reg_set_bit(®s->data_out, gpio); | 143 | gpio_reg_set_bit(regs->data_out, gpio); |
| 144 | else | 144 | else |
| 145 | gpio_reg_clear_bit(®s->data_out, gpio); | 145 | gpio_reg_clear_bit(regs->data_out, gpio); |
| 146 | 146 | ||
| 147 | spin_unlock_irqrestore(&ctlr->lock, flags); | 147 | spin_unlock_irqrestore(&ctlr->lock, flags); |
| 148 | } | 148 | } |
diff --git a/arch/arm/mach-davinci/include/mach/clkdev.h b/arch/arm/mach-davinci/include/mach/clkdev.h index 730c49d1ebd8..14a504887189 100644 --- a/arch/arm/mach-davinci/include/mach/clkdev.h +++ b/arch/arm/mach-davinci/include/mach/clkdev.h | |||
| @@ -1,6 +1,8 @@ | |||
| 1 | #ifndef __MACH_CLKDEV_H | 1 | #ifndef __MACH_CLKDEV_H |
| 2 | #define __MACH_CLKDEV_H | 2 | #define __MACH_CLKDEV_H |
| 3 | 3 | ||
| 4 | struct clk; | ||
| 5 | |||
| 4 | static inline int __clk_get(struct clk *clk) | 6 | static inline int __clk_get(struct clk *clk) |
| 5 | { | 7 | { |
| 6 | return 1; | 8 | return 1; |
diff --git a/arch/arm/mach-omap2/mailbox.c b/arch/arm/mach-omap2/mailbox.c index 0a585dfa9874..24b88504df0f 100644 --- a/arch/arm/mach-omap2/mailbox.c +++ b/arch/arm/mach-omap2/mailbox.c | |||
| @@ -193,10 +193,12 @@ static void omap2_mbox_disable_irq(struct omap_mbox *mbox, | |||
| 193 | omap_mbox_type_t irq) | 193 | omap_mbox_type_t irq) |
| 194 | { | 194 | { |
| 195 | struct omap_mbox2_priv *p = mbox->priv; | 195 | struct omap_mbox2_priv *p = mbox->priv; |
| 196 | u32 l, bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit; | 196 | u32 bit = (irq == IRQ_TX) ? p->notfull_bit : p->newmsg_bit; |
| 197 | l = mbox_read_reg(p->irqdisable); | 197 | |
| 198 | l &= ~bit; | 198 | if (!cpu_is_omap44xx()) |
| 199 | mbox_write_reg(l, p->irqdisable); | 199 | bit = mbox_read_reg(p->irqdisable) & ~bit; |
| 200 | |||
| 201 | mbox_write_reg(bit, p->irqdisable); | ||
| 200 | } | 202 | } |
| 201 | 203 | ||
| 202 | static void omap2_mbox_ack_irq(struct omap_mbox *mbox, | 204 | static void omap2_mbox_ack_irq(struct omap_mbox *mbox, |
diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c index 95ac336fe3f7..1a777e34d0c2 100644 --- a/arch/arm/mach-omap2/smartreflex.c +++ b/arch/arm/mach-omap2/smartreflex.c | |||
| @@ -282,6 +282,7 @@ error: | |||
| 282 | dev_err(&sr_info->pdev->dev, "%s: ERROR in registering" | 282 | dev_err(&sr_info->pdev->dev, "%s: ERROR in registering" |
| 283 | "interrupt handler. Smartreflex will" | 283 | "interrupt handler. Smartreflex will" |
| 284 | "not function as desired\n", __func__); | 284 | "not function as desired\n", __func__); |
| 285 | kfree(name); | ||
| 285 | kfree(sr_info); | 286 | kfree(sr_info); |
| 286 | return ret; | 287 | return ret; |
| 287 | } | 288 | } |
| @@ -879,7 +880,7 @@ static int __init omap_sr_probe(struct platform_device *pdev) | |||
| 879 | ret = sr_late_init(sr_info); | 880 | ret = sr_late_init(sr_info); |
| 880 | if (ret) { | 881 | if (ret) { |
| 881 | pr_warning("%s: Error in SR late init\n", __func__); | 882 | pr_warning("%s: Error in SR late init\n", __func__); |
| 882 | return ret; | 883 | goto err_release_region; |
| 883 | } | 884 | } |
| 884 | } | 885 | } |
| 885 | 886 | ||
| @@ -890,14 +891,17 @@ static int __init omap_sr_probe(struct platform_device *pdev) | |||
| 890 | * not try to create rest of the debugfs entries. | 891 | * not try to create rest of the debugfs entries. |
| 891 | */ | 892 | */ |
| 892 | vdd_dbg_dir = omap_voltage_get_dbgdir(sr_info->voltdm); | 893 | vdd_dbg_dir = omap_voltage_get_dbgdir(sr_info->voltdm); |
| 893 | if (!vdd_dbg_dir) | 894 | if (!vdd_dbg_dir) { |
| 894 | return -EINVAL; | 895 | ret = -EINVAL; |
| 896 | goto err_release_region; | ||
| 897 | } | ||
| 895 | 898 | ||
| 896 | dbg_dir = debugfs_create_dir("smartreflex", vdd_dbg_dir); | 899 | dbg_dir = debugfs_create_dir("smartreflex", vdd_dbg_dir); |
| 897 | if (IS_ERR(dbg_dir)) { | 900 | if (IS_ERR(dbg_dir)) { |
| 898 | dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n", | 901 | dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n", |
| 899 | __func__); | 902 | __func__); |
| 900 | return PTR_ERR(dbg_dir); | 903 | ret = PTR_ERR(dbg_dir); |
| 904 | goto err_release_region; | ||
| 901 | } | 905 | } |
| 902 | 906 | ||
| 903 | (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, dbg_dir, | 907 | (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, dbg_dir, |
| @@ -913,7 +917,8 @@ static int __init omap_sr_probe(struct platform_device *pdev) | |||
| 913 | if (IS_ERR(nvalue_dir)) { | 917 | if (IS_ERR(nvalue_dir)) { |
| 914 | dev_err(&pdev->dev, "%s: Unable to create debugfs directory" | 918 | dev_err(&pdev->dev, "%s: Unable to create debugfs directory" |
| 915 | "for n-values\n", __func__); | 919 | "for n-values\n", __func__); |
| 916 | return PTR_ERR(nvalue_dir); | 920 | ret = PTR_ERR(nvalue_dir); |
| 921 | goto err_release_region; | ||
| 917 | } | 922 | } |
| 918 | 923 | ||
| 919 | omap_voltage_get_volttable(sr_info->voltdm, &volt_data); | 924 | omap_voltage_get_volttable(sr_info->voltdm, &volt_data); |
| @@ -922,23 +927,15 @@ static int __init omap_sr_probe(struct platform_device *pdev) | |||
| 922 | " corresponding vdd vdd_%s. Cannot create debugfs" | 927 | " corresponding vdd vdd_%s. Cannot create debugfs" |
| 923 | "entries for n-values\n", | 928 | "entries for n-values\n", |
| 924 | __func__, sr_info->voltdm->name); | 929 | __func__, sr_info->voltdm->name); |
| 925 | return -ENODATA; | 930 | ret = -ENODATA; |
| 931 | goto err_release_region; | ||
| 926 | } | 932 | } |
| 927 | 933 | ||
| 928 | for (i = 0; i < sr_info->nvalue_count; i++) { | 934 | for (i = 0; i < sr_info->nvalue_count; i++) { |
| 929 | char *name; | 935 | char name[NVALUE_NAME_LEN + 1]; |
| 930 | char volt_name[32]; | ||
| 931 | |||
| 932 | name = kzalloc(NVALUE_NAME_LEN + 1, GFP_KERNEL); | ||
| 933 | if (!name) { | ||
| 934 | dev_err(&pdev->dev, "%s: Unable to allocate memory" | ||
| 935 | " for n-value directory name\n", __func__); | ||
| 936 | return -ENOMEM; | ||
| 937 | } | ||
| 938 | 936 | ||
| 939 | strcpy(name, "volt_"); | 937 | snprintf(name, sizeof(name), "volt_%d", |
| 940 | sprintf(volt_name, "%d", volt_data[i].volt_nominal); | 938 | volt_data[i].volt_nominal); |
| 941 | strcat(name, volt_name); | ||
| 942 | (void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir, | 939 | (void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir, |
| 943 | &(sr_info->nvalue_table[i].nvalue)); | 940 | &(sr_info->nvalue_table[i].nvalue)); |
| 944 | } | 941 | } |
diff --git a/arch/arm/mach-pxa/pxa25x.c b/arch/arm/mach-pxa/pxa25x.c index fbc5b775f895..b166b1d845d7 100644 --- a/arch/arm/mach-pxa/pxa25x.c +++ b/arch/arm/mach-pxa/pxa25x.c | |||
| @@ -347,6 +347,7 @@ static struct platform_device *pxa25x_devices[] __initdata = { | |||
| 347 | &pxa25x_device_assp, | 347 | &pxa25x_device_assp, |
| 348 | &pxa25x_device_pwm0, | 348 | &pxa25x_device_pwm0, |
| 349 | &pxa25x_device_pwm1, | 349 | &pxa25x_device_pwm1, |
| 350 | &pxa_device_asoc_platform, | ||
| 350 | }; | 351 | }; |
| 351 | 352 | ||
| 352 | static struct sys_device pxa25x_sysdev[] = { | 353 | static struct sys_device pxa25x_sysdev[] = { |
diff --git a/arch/arm/mach-pxa/tosa-bt.c b/arch/arm/mach-pxa/tosa-bt.c index c31e601eb49c..b9b1e5c2b290 100644 --- a/arch/arm/mach-pxa/tosa-bt.c +++ b/arch/arm/mach-pxa/tosa-bt.c | |||
| @@ -81,8 +81,6 @@ static int tosa_bt_probe(struct platform_device *dev) | |||
| 81 | goto err_rfk_alloc; | 81 | goto err_rfk_alloc; |
| 82 | } | 82 | } |
| 83 | 83 | ||
| 84 | rfkill_set_led_trigger_name(rfk, "tosa-bt"); | ||
| 85 | |||
| 86 | rc = rfkill_register(rfk); | 84 | rc = rfkill_register(rfk); |
| 87 | if (rc) | 85 | if (rc) |
| 88 | goto err_rfkill; | 86 | goto err_rfkill; |
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c index af152e70cfcf..f2582ec300d9 100644 --- a/arch/arm/mach-pxa/tosa.c +++ b/arch/arm/mach-pxa/tosa.c | |||
| @@ -875,6 +875,11 @@ static struct platform_device sharpsl_rom_device = { | |||
| 875 | .dev.platform_data = &sharpsl_rom_data, | 875 | .dev.platform_data = &sharpsl_rom_data, |
| 876 | }; | 876 | }; |
| 877 | 877 | ||
| 878 | static struct platform_device wm9712_device = { | ||
| 879 | .name = "wm9712-codec", | ||
| 880 | .id = -1, | ||
| 881 | }; | ||
| 882 | |||
| 878 | static struct platform_device *devices[] __initdata = { | 883 | static struct platform_device *devices[] __initdata = { |
| 879 | &tosascoop_device, | 884 | &tosascoop_device, |
| 880 | &tosascoop_jc_device, | 885 | &tosascoop_jc_device, |
| @@ -885,6 +890,7 @@ static struct platform_device *devices[] __initdata = { | |||
| 885 | &tosaled_device, | 890 | &tosaled_device, |
| 886 | &tosa_bt_device, | 891 | &tosa_bt_device, |
| 887 | &sharpsl_rom_device, | 892 | &sharpsl_rom_device, |
| 893 | &wm9712_device, | ||
| 888 | }; | 894 | }; |
| 889 | 895 | ||
| 890 | static void tosa_poweroff(void) | 896 | static void tosa_poweroff(void) |
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c index 2123b96b5638..4303a86e6e38 100644 --- a/arch/arm/mach-shmobile/board-ag5evm.c +++ b/arch/arm/mach-shmobile/board-ag5evm.c | |||
| @@ -454,6 +454,7 @@ static void __init ag5evm_init(void) | |||
| 454 | gpio_direction_output(GPIO_PORT217, 0); | 454 | gpio_direction_output(GPIO_PORT217, 0); |
| 455 | mdelay(1); | 455 | mdelay(1); |
| 456 | gpio_set_value(GPIO_PORT217, 1); | 456 | gpio_set_value(GPIO_PORT217, 1); |
| 457 | mdelay(100); | ||
| 457 | 458 | ||
| 458 | /* LCD backlight controller */ | 459 | /* LCD backlight controller */ |
| 459 | gpio_request(GPIO_PORT235, NULL); /* RESET */ | 460 | gpio_request(GPIO_PORT235, NULL); /* RESET */ |
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c index 3cf0951caa2d..81d6536552a9 100644 --- a/arch/arm/mach-shmobile/board-ap4evb.c +++ b/arch/arm/mach-shmobile/board-ap4evb.c | |||
| @@ -1303,7 +1303,7 @@ static void __init ap4evb_init(void) | |||
| 1303 | 1303 | ||
| 1304 | lcdc_info.clock_source = LCDC_CLK_BUS; | 1304 | lcdc_info.clock_source = LCDC_CLK_BUS; |
| 1305 | lcdc_info.ch[0].interface_type = RGB18; | 1305 | lcdc_info.ch[0].interface_type = RGB18; |
| 1306 | lcdc_info.ch[0].clock_divider = 2; | 1306 | lcdc_info.ch[0].clock_divider = 3; |
| 1307 | lcdc_info.ch[0].flags = 0; | 1307 | lcdc_info.ch[0].flags = 0; |
| 1308 | lcdc_info.ch[0].lcd_size_cfg.width = 152; | 1308 | lcdc_info.ch[0].lcd_size_cfg.width = 152; |
| 1309 | lcdc_info.ch[0].lcd_size_cfg.height = 91; | 1309 | lcdc_info.ch[0].lcd_size_cfg.height = 91; |
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c index fb4213a4e15a..1657eac5dde2 100644 --- a/arch/arm/mach-shmobile/board-mackerel.c +++ b/arch/arm/mach-shmobile/board-mackerel.c | |||
| @@ -303,7 +303,7 @@ static struct sh_mobile_lcdc_info lcdc_info = { | |||
| 303 | .lcd_cfg = mackerel_lcdc_modes, | 303 | .lcd_cfg = mackerel_lcdc_modes, |
| 304 | .num_cfg = ARRAY_SIZE(mackerel_lcdc_modes), | 304 | .num_cfg = ARRAY_SIZE(mackerel_lcdc_modes), |
| 305 | .interface_type = RGB24, | 305 | .interface_type = RGB24, |
| 306 | .clock_divider = 2, | 306 | .clock_divider = 3, |
| 307 | .flags = 0, | 307 | .flags = 0, |
| 308 | .lcd_size_cfg.width = 152, | 308 | .lcd_size_cfg.width = 152, |
| 309 | .lcd_size_cfg.height = 91, | 309 | .lcd_size_cfg.height = 91, |
diff --git a/arch/arm/mach-shmobile/clock-sh73a0.c b/arch/arm/mach-shmobile/clock-sh73a0.c index ddd4a1b775f0..7e58904c1c8c 100644 --- a/arch/arm/mach-shmobile/clock-sh73a0.c +++ b/arch/arm/mach-shmobile/clock-sh73a0.c | |||
| @@ -263,7 +263,7 @@ static struct clk div6_clks[DIV6_NR] = { | |||
| 263 | }; | 263 | }; |
| 264 | 264 | ||
| 265 | enum { MSTP001, | 265 | enum { MSTP001, |
| 266 | MSTP125, MSTP118, MSTP116, MSTP100, | 266 | MSTP129, MSTP128, MSTP127, MSTP126, MSTP125, MSTP118, MSTP116, MSTP100, |
| 267 | MSTP219, | 267 | MSTP219, |
| 268 | MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, | 268 | MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, |
| 269 | MSTP331, MSTP329, MSTP325, MSTP323, MSTP312, | 269 | MSTP331, MSTP329, MSTP325, MSTP323, MSTP312, |
| @@ -275,6 +275,10 @@ enum { MSTP001, | |||
| 275 | 275 | ||
| 276 | static struct clk mstp_clks[MSTP_NR] = { | 276 | static struct clk mstp_clks[MSTP_NR] = { |
| 277 | [MSTP001] = MSTP(&div4_clks[DIV4_HP], SMSTPCR0, 1, 0), /* IIC2 */ | 277 | [MSTP001] = MSTP(&div4_clks[DIV4_HP], SMSTPCR0, 1, 0), /* IIC2 */ |
| 278 | [MSTP129] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 29, 0), /* CEU1 */ | ||
| 279 | [MSTP128] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 28, 0), /* CSI2-RX1 */ | ||
| 280 | [MSTP127] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 27, 0), /* CEU0 */ | ||
| 281 | [MSTP126] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 26, 0), /* CSI2-RX0 */ | ||
| 278 | [MSTP125] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */ | 282 | [MSTP125] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */ |
| 279 | [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX0 */ | 283 | [MSTP118] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 18, 0), /* DSITX0 */ |
| 280 | [MSTP116] = MSTP(&div4_clks[DIV4_HP], SMSTPCR1, 16, 0), /* IIC0 */ | 284 | [MSTP116] = MSTP(&div4_clks[DIV4_HP], SMSTPCR1, 16, 0), /* IIC0 */ |
| @@ -306,6 +310,9 @@ static struct clk_lookup lookups[] = { | |||
| 306 | CLKDEV_CON_ID("r_clk", &r_clk), | 310 | CLKDEV_CON_ID("r_clk", &r_clk), |
| 307 | 311 | ||
| 308 | /* DIV6 clocks */ | 312 | /* DIV6 clocks */ |
| 313 | CLKDEV_CON_ID("vck1_clk", &div6_clks[DIV6_VCK1]), | ||
| 314 | CLKDEV_CON_ID("vck2_clk", &div6_clks[DIV6_VCK2]), | ||
| 315 | CLKDEV_CON_ID("vck3_clk", &div6_clks[DIV6_VCK3]), | ||
| 309 | CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSIT]), | 316 | CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSIT]), |
| 310 | CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSIT]), | 317 | CLKDEV_ICK_ID("dsit_clk", "sh-mipi-dsi.1", &div6_clks[DIV6_DSIT]), |
| 311 | CLKDEV_ICK_ID("dsi0p_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSI0P]), | 318 | CLKDEV_ICK_ID("dsi0p_clk", "sh-mipi-dsi.0", &div6_clks[DIV6_DSI0P]), |
| @@ -313,11 +320,15 @@ static struct clk_lookup lookups[] = { | |||
| 313 | 320 | ||
| 314 | /* MSTP32 clocks */ | 321 | /* MSTP32 clocks */ |
| 315 | CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* I2C2 */ | 322 | CLKDEV_DEV_ID("i2c-sh_mobile.2", &mstp_clks[MSTP001]), /* I2C2 */ |
| 316 | CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */ | 323 | CLKDEV_DEV_ID("sh_mobile_ceu.1", &mstp_clks[MSTP129]), /* CEU1 */ |
| 324 | CLKDEV_DEV_ID("sh-mobile-csi2.1", &mstp_clks[MSTP128]), /* CSI2-RX1 */ | ||
| 325 | CLKDEV_DEV_ID("sh_mobile_ceu.0", &mstp_clks[MSTP127]), /* CEU0 */ | ||
| 326 | CLKDEV_DEV_ID("sh-mobile-csi2.0", &mstp_clks[MSTP126]), /* CSI2-RX0 */ | ||
| 317 | CLKDEV_DEV_ID("sh_tmu.0", &mstp_clks[MSTP125]), /* TMU00 */ | 327 | CLKDEV_DEV_ID("sh_tmu.0", &mstp_clks[MSTP125]), /* TMU00 */ |
| 318 | CLKDEV_DEV_ID("sh_tmu.1", &mstp_clks[MSTP125]), /* TMU01 */ | 328 | CLKDEV_DEV_ID("sh_tmu.1", &mstp_clks[MSTP125]), /* TMU01 */ |
| 319 | CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */ | ||
| 320 | CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */ | 329 | CLKDEV_DEV_ID("sh-mipi-dsi.0", &mstp_clks[MSTP118]), /* DSITX */ |
| 330 | CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[MSTP116]), /* I2C0 */ | ||
| 331 | CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */ | ||
| 321 | CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */ | 332 | CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP219]), /* SCIFA7 */ |
| 322 | CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */ | 333 | CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */ |
| 323 | CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */ | 334 | CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */ |
diff --git a/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt b/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt index efd3687ba190..3029aba38688 100644 --- a/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt +++ b/arch/arm/mach-shmobile/include/mach/head-ap4evb.txt | |||
| @@ -6,13 +6,10 @@ LIST "RWT Setting" | |||
| 6 | EW 0xE6020004, 0xA500 | 6 | EW 0xE6020004, 0xA500 |
| 7 | EW 0xE6030004, 0xA500 | 7 | EW 0xE6030004, 0xA500 |
| 8 | 8 | ||
| 9 | DD 0x01001000, 0x01001000 | ||
| 10 | |||
| 11 | LIST "GPIO Setting" | 9 | LIST "GPIO Setting" |
| 12 | EB 0xE6051013, 0xA2 | 10 | EB 0xE6051013, 0xA2 |
| 13 | 11 | ||
| 14 | LIST "CPG" | 12 | LIST "CPG" |
| 15 | ED 0xE6150080, 0x00000180 | ||
| 16 | ED 0xE61500C0, 0x00000002 | 13 | ED 0xE61500C0, 0x00000002 |
| 17 | 14 | ||
| 18 | WAIT 1, 0xFE40009C | 15 | WAIT 1, 0xFE40009C |
| @@ -37,6 +34,9 @@ ED 0xE615002C, 0x93000040 | |||
| 37 | 34 | ||
| 38 | WAIT 1, 0xFE40009C | 35 | WAIT 1, 0xFE40009C |
| 39 | 36 | ||
| 37 | LIST "SUB/USBClk" | ||
| 38 | ED 0xE6150080, 0x00000180 | ||
| 39 | |||
| 40 | LIST "BSC" | 40 | LIST "BSC" |
| 41 | ED 0xFEC10000, 0x00E0001B | 41 | ED 0xFEC10000, 0x00E0001B |
| 42 | 42 | ||
| @@ -53,7 +53,7 @@ ED 0xFE400048, 0x20C18505 | |||
| 53 | ED 0xFE40004C, 0x00110209 | 53 | ED 0xFE40004C, 0x00110209 |
| 54 | ED 0xFE400010, 0x00000087 | 54 | ED 0xFE400010, 0x00000087 |
| 55 | 55 | ||
| 56 | WAIT 10, 0xFE40009C | 56 | WAIT 30, 0xFE40009C |
| 57 | 57 | ||
| 58 | ED 0xFE400084, 0x0000003F | 58 | ED 0xFE400084, 0x0000003F |
| 59 | EB 0xFE500000, 0x00 | 59 | EB 0xFE500000, 0x00 |
| @@ -84,7 +84,7 @@ ED 0xE6150004, 0x80331050 | |||
| 84 | 84 | ||
| 85 | WAIT 1, 0xFE40009C | 85 | WAIT 1, 0xFE40009C |
| 86 | 86 | ||
| 87 | ED 0xE6150354, 0x00000002 | 87 | ED 0xFE400354, 0x01AD8002 |
| 88 | 88 | ||
| 89 | LIST "SCIF0 - Serial port for earlyprintk" | 89 | LIST "SCIF0 - Serial port for earlyprintk" |
| 90 | EB 0xE6053098, 0x11 | 90 | EB 0xE6053098, 0x11 |
diff --git a/arch/arm/mach-shmobile/include/mach/head-mackerel.txt b/arch/arm/mach-shmobile/include/mach/head-mackerel.txt index efd3687ba190..3029aba38688 100644 --- a/arch/arm/mach-shmobile/include/mach/head-mackerel.txt +++ b/arch/arm/mach-shmobile/include/mach/head-mackerel.txt | |||
| @@ -6,13 +6,10 @@ LIST "RWT Setting" | |||
| 6 | EW 0xE6020004, 0xA500 | 6 | EW 0xE6020004, 0xA500 |
| 7 | EW 0xE6030004, 0xA500 | 7 | EW 0xE6030004, 0xA500 |
| 8 | 8 | ||
| 9 | DD 0x01001000, 0x01001000 | ||
| 10 | |||
| 11 | LIST "GPIO Setting" | 9 | LIST "GPIO Setting" |
| 12 | EB 0xE6051013, 0xA2 | 10 | EB 0xE6051013, 0xA2 |
| 13 | 11 | ||
| 14 | LIST "CPG" | 12 | LIST "CPG" |
| 15 | ED 0xE6150080, 0x00000180 | ||
| 16 | ED 0xE61500C0, 0x00000002 | 13 | ED 0xE61500C0, 0x00000002 |
| 17 | 14 | ||
| 18 | WAIT 1, 0xFE40009C | 15 | WAIT 1, 0xFE40009C |
| @@ -37,6 +34,9 @@ ED 0xE615002C, 0x93000040 | |||
| 37 | 34 | ||
| 38 | WAIT 1, 0xFE40009C | 35 | WAIT 1, 0xFE40009C |
| 39 | 36 | ||
| 37 | LIST "SUB/USBClk" | ||
| 38 | ED 0xE6150080, 0x00000180 | ||
| 39 | |||
| 40 | LIST "BSC" | 40 | LIST "BSC" |
| 41 | ED 0xFEC10000, 0x00E0001B | 41 | ED 0xFEC10000, 0x00E0001B |
| 42 | 42 | ||
| @@ -53,7 +53,7 @@ ED 0xFE400048, 0x20C18505 | |||
| 53 | ED 0xFE40004C, 0x00110209 | 53 | ED 0xFE40004C, 0x00110209 |
| 54 | ED 0xFE400010, 0x00000087 | 54 | ED 0xFE400010, 0x00000087 |
| 55 | 55 | ||
| 56 | WAIT 10, 0xFE40009C | 56 | WAIT 30, 0xFE40009C |
| 57 | 57 | ||
| 58 | ED 0xFE400084, 0x0000003F | 58 | ED 0xFE400084, 0x0000003F |
| 59 | EB 0xFE500000, 0x00 | 59 | EB 0xFE500000, 0x00 |
| @@ -84,7 +84,7 @@ ED 0xE6150004, 0x80331050 | |||
| 84 | 84 | ||
| 85 | WAIT 1, 0xFE40009C | 85 | WAIT 1, 0xFE40009C |
| 86 | 86 | ||
| 87 | ED 0xE6150354, 0x00000002 | 87 | ED 0xFE400354, 0x01AD8002 |
| 88 | 88 | ||
| 89 | LIST "SCIF0 - Serial port for earlyprintk" | 89 | LIST "SCIF0 - Serial port for earlyprintk" |
| 90 | EB 0xE6053098, 0x11 | 90 | EB 0xE6053098, 0x11 |
diff --git a/arch/blackfin/lib/outs.S b/arch/blackfin/lib/outs.S index 250f4d4b9436..06a5e674401f 100644 --- a/arch/blackfin/lib/outs.S +++ b/arch/blackfin/lib/outs.S | |||
| @@ -13,6 +13,8 @@ | |||
| 13 | .align 2 | 13 | .align 2 |
| 14 | 14 | ||
| 15 | ENTRY(_outsl) | 15 | ENTRY(_outsl) |
| 16 | CC = R2 == 0; | ||
| 17 | IF CC JUMP 1f; | ||
| 16 | P0 = R0; /* P0 = port */ | 18 | P0 = R0; /* P0 = port */ |
| 17 | P1 = R1; /* P1 = address */ | 19 | P1 = R1; /* P1 = address */ |
| 18 | P2 = R2; /* P2 = count */ | 20 | P2 = R2; /* P2 = count */ |
| @@ -20,10 +22,12 @@ ENTRY(_outsl) | |||
| 20 | LSETUP( .Llong_loop_s, .Llong_loop_e) LC0 = P2; | 22 | LSETUP( .Llong_loop_s, .Llong_loop_e) LC0 = P2; |
| 21 | .Llong_loop_s: R0 = [P1++]; | 23 | .Llong_loop_s: R0 = [P1++]; |
| 22 | .Llong_loop_e: [P0] = R0; | 24 | .Llong_loop_e: [P0] = R0; |
| 23 | RTS; | 25 | 1: RTS; |
| 24 | ENDPROC(_outsl) | 26 | ENDPROC(_outsl) |
| 25 | 27 | ||
| 26 | ENTRY(_outsw) | 28 | ENTRY(_outsw) |
| 29 | CC = R2 == 0; | ||
| 30 | IF CC JUMP 1f; | ||
| 27 | P0 = R0; /* P0 = port */ | 31 | P0 = R0; /* P0 = port */ |
| 28 | P1 = R1; /* P1 = address */ | 32 | P1 = R1; /* P1 = address */ |
| 29 | P2 = R2; /* P2 = count */ | 33 | P2 = R2; /* P2 = count */ |
| @@ -31,10 +35,12 @@ ENTRY(_outsw) | |||
| 31 | LSETUP( .Lword_loop_s, .Lword_loop_e) LC0 = P2; | 35 | LSETUP( .Lword_loop_s, .Lword_loop_e) LC0 = P2; |
| 32 | .Lword_loop_s: R0 = W[P1++]; | 36 | .Lword_loop_s: R0 = W[P1++]; |
| 33 | .Lword_loop_e: W[P0] = R0; | 37 | .Lword_loop_e: W[P0] = R0; |
| 34 | RTS; | 38 | 1: RTS; |
| 35 | ENDPROC(_outsw) | 39 | ENDPROC(_outsw) |
| 36 | 40 | ||
| 37 | ENTRY(_outsb) | 41 | ENTRY(_outsb) |
| 42 | CC = R2 == 0; | ||
| 43 | IF CC JUMP 1f; | ||
| 38 | P0 = R0; /* P0 = port */ | 44 | P0 = R0; /* P0 = port */ |
| 39 | P1 = R1; /* P1 = address */ | 45 | P1 = R1; /* P1 = address */ |
| 40 | P2 = R2; /* P2 = count */ | 46 | P2 = R2; /* P2 = count */ |
| @@ -42,10 +48,12 @@ ENTRY(_outsb) | |||
| 42 | LSETUP( .Lbyte_loop_s, .Lbyte_loop_e) LC0 = P2; | 48 | LSETUP( .Lbyte_loop_s, .Lbyte_loop_e) LC0 = P2; |
| 43 | .Lbyte_loop_s: R0 = B[P1++]; | 49 | .Lbyte_loop_s: R0 = B[P1++]; |
| 44 | .Lbyte_loop_e: B[P0] = R0; | 50 | .Lbyte_loop_e: B[P0] = R0; |
| 45 | RTS; | 51 | 1: RTS; |
| 46 | ENDPROC(_outsb) | 52 | ENDPROC(_outsb) |
| 47 | 53 | ||
| 48 | ENTRY(_outsw_8) | 54 | ENTRY(_outsw_8) |
| 55 | CC = R2 == 0; | ||
| 56 | IF CC JUMP 1f; | ||
| 49 | P0 = R0; /* P0 = port */ | 57 | P0 = R0; /* P0 = port */ |
| 50 | P1 = R1; /* P1 = address */ | 58 | P1 = R1; /* P1 = address */ |
| 51 | P2 = R2; /* P2 = count */ | 59 | P2 = R2; /* P2 = count */ |
| @@ -56,5 +64,5 @@ ENTRY(_outsw_8) | |||
| 56 | R0 = R0 << 8; | 64 | R0 = R0 << 8; |
| 57 | R0 = R0 + R1; | 65 | R0 = R0 + R1; |
| 58 | .Lword8_loop_e: W[P0] = R0; | 66 | .Lword8_loop_e: W[P0] = R0; |
| 59 | RTS; | 67 | 1: RTS; |
| 60 | ENDPROC(_outsw_8) | 68 | ENDPROC(_outsw_8) |
diff --git a/arch/blackfin/mach-common/cache.S b/arch/blackfin/mach-common/cache.S index 790c767ca95a..ab4a925a443e 100644 --- a/arch/blackfin/mach-common/cache.S +++ b/arch/blackfin/mach-common/cache.S | |||
| @@ -58,6 +58,8 @@ | |||
| 58 | 1: | 58 | 1: |
| 59 | .ifeqs "\flushins", BROK_FLUSH_INST | 59 | .ifeqs "\flushins", BROK_FLUSH_INST |
| 60 | \flushins [P0++]; | 60 | \flushins [P0++]; |
| 61 | nop; | ||
| 62 | nop; | ||
| 61 | 2: nop; | 63 | 2: nop; |
| 62 | .else | 64 | .else |
| 63 | 2: \flushins [P0++]; | 65 | 2: \flushins [P0++]; |
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 991d5998d6be..fe56a23e1ff0 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h | |||
| @@ -240,6 +240,12 @@ struct machdep_calls { | |||
| 240 | * claims to support kexec. | 240 | * claims to support kexec. |
| 241 | */ | 241 | */ |
| 242 | int (*machine_kexec_prepare)(struct kimage *image); | 242 | int (*machine_kexec_prepare)(struct kimage *image); |
| 243 | |||
| 244 | /* Called to perform the _real_ kexec. | ||
| 245 | * Do NOT allocate memory or fail here. We are past the point of | ||
| 246 | * no return. | ||
| 247 | */ | ||
| 248 | void (*machine_kexec)(struct kimage *image); | ||
| 243 | #endif /* CONFIG_KEXEC */ | 249 | #endif /* CONFIG_KEXEC */ |
| 244 | 250 | ||
| 245 | #ifdef CONFIG_SUSPEND | 251 | #ifdef CONFIG_SUSPEND |
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index 49a170af8145..a5f8672eeff3 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c | |||
| @@ -87,7 +87,10 @@ void machine_kexec(struct kimage *image) | |||
| 87 | 87 | ||
| 88 | save_ftrace_enabled = __ftrace_enabled_save(); | 88 | save_ftrace_enabled = __ftrace_enabled_save(); |
| 89 | 89 | ||
| 90 | default_machine_kexec(image); | 90 | if (ppc_md.machine_kexec) |
| 91 | ppc_md.machine_kexec(image); | ||
| 92 | else | ||
| 93 | default_machine_kexec(image); | ||
| 91 | 94 | ||
| 92 | __ftrace_enabled_restore(save_ftrace_enabled); | 95 | __ftrace_enabled_restore(save_ftrace_enabled); |
| 93 | 96 | ||
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 7a1d5cb76932..8303a6c65ef7 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
| @@ -353,6 +353,7 @@ static void switch_booke_debug_regs(struct thread_struct *new_thread) | |||
| 353 | prime_debug_regs(new_thread); | 353 | prime_debug_regs(new_thread); |
| 354 | } | 354 | } |
| 355 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ | 355 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ |
| 356 | #ifndef CONFIG_HAVE_HW_BREAKPOINT | ||
| 356 | static void set_debug_reg_defaults(struct thread_struct *thread) | 357 | static void set_debug_reg_defaults(struct thread_struct *thread) |
| 357 | { | 358 | { |
| 358 | if (thread->dabr) { | 359 | if (thread->dabr) { |
| @@ -360,6 +361,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread) | |||
| 360 | set_dabr(0); | 361 | set_dabr(0); |
| 361 | } | 362 | } |
| 362 | } | 363 | } |
| 364 | #endif /* !CONFIG_HAVE_HW_BREAKPOINT */ | ||
| 363 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ | 365 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ |
| 364 | 366 | ||
| 365 | int set_dabr(unsigned long dabr) | 367 | int set_dabr(unsigned long dabr) |
| @@ -670,11 +672,11 @@ void flush_thread(void) | |||
| 670 | { | 672 | { |
| 671 | discard_lazy_cpu_state(); | 673 | discard_lazy_cpu_state(); |
| 672 | 674 | ||
| 673 | #ifdef CONFIG_HAVE_HW_BREAKPOINTS | 675 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
| 674 | flush_ptrace_hw_breakpoint(current); | 676 | flush_ptrace_hw_breakpoint(current); |
| 675 | #else /* CONFIG_HAVE_HW_BREAKPOINTS */ | 677 | #else /* CONFIG_HAVE_HW_BREAKPOINT */ |
| 676 | set_debug_reg_defaults(¤t->thread); | 678 | set_debug_reg_defaults(¤t->thread); |
| 677 | #endif /* CONFIG_HAVE_HW_BREAKPOINTS */ | 679 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ |
| 678 | } | 680 | } |
| 679 | 681 | ||
| 680 | void | 682 | void |
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c index 1ec06576f619..c14d09f614f3 100644 --- a/arch/powerpc/mm/tlb_hash64.c +++ b/arch/powerpc/mm/tlb_hash64.c | |||
| @@ -38,13 +38,11 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); | |||
| 38 | * neesd to be flushed. This function will either perform the flush | 38 | * neesd to be flushed. This function will either perform the flush |
| 39 | * immediately or will batch it up if the current CPU has an active | 39 | * immediately or will batch it up if the current CPU has an active |
| 40 | * batch on it. | 40 | * batch on it. |
| 41 | * | ||
| 42 | * Must be called from within some kind of spinlock/non-preempt region... | ||
| 43 | */ | 41 | */ |
| 44 | void hpte_need_flush(struct mm_struct *mm, unsigned long addr, | 42 | void hpte_need_flush(struct mm_struct *mm, unsigned long addr, |
| 45 | pte_t *ptep, unsigned long pte, int huge) | 43 | pte_t *ptep, unsigned long pte, int huge) |
| 46 | { | 44 | { |
| 47 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); | 45 | struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); |
| 48 | unsigned long vsid, vaddr; | 46 | unsigned long vsid, vaddr; |
| 49 | unsigned int psize; | 47 | unsigned int psize; |
| 50 | int ssize; | 48 | int ssize; |
| @@ -99,6 +97,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, | |||
| 99 | */ | 97 | */ |
| 100 | if (!batch->active) { | 98 | if (!batch->active) { |
| 101 | flush_hash_page(vaddr, rpte, psize, ssize, 0); | 99 | flush_hash_page(vaddr, rpte, psize, ssize, 0); |
| 100 | put_cpu_var(ppc64_tlb_batch); | ||
| 102 | return; | 101 | return; |
| 103 | } | 102 | } |
| 104 | 103 | ||
| @@ -127,6 +126,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, | |||
| 127 | batch->index = ++i; | 126 | batch->index = ++i; |
| 128 | if (i >= PPC64_TLB_BATCH_NR) | 127 | if (i >= PPC64_TLB_BATCH_NR) |
| 129 | __flush_tlb_pending(batch); | 128 | __flush_tlb_pending(batch); |
| 129 | put_cpu_var(ppc64_tlb_batch); | ||
| 130 | } | 130 | } |
| 131 | 131 | ||
| 132 | /* | 132 | /* |
diff --git a/arch/sh/include/asm/sections.h b/arch/sh/include/asm/sections.h index a78701da775b..4a5350037c8f 100644 --- a/arch/sh/include/asm/sections.h +++ b/arch/sh/include/asm/sections.h | |||
| @@ -3,7 +3,7 @@ | |||
| 3 | 3 | ||
| 4 | #include <asm-generic/sections.h> | 4 | #include <asm-generic/sections.h> |
| 5 | 5 | ||
| 6 | extern void __nosave_begin, __nosave_end; | 6 | extern long __nosave_begin, __nosave_end; |
| 7 | extern long __machvec_start, __machvec_end; | 7 | extern long __machvec_start, __machvec_end; |
| 8 | extern char __uncached_start, __uncached_end; | 8 | extern char __uncached_start, __uncached_end; |
| 9 | extern char _ebss[]; | 9 | extern char _ebss[]; |
diff --git a/arch/sh/kernel/cpu/sh4/setup-sh7750.c b/arch/sh/kernel/cpu/sh4/setup-sh7750.c index 672944f5b19c..e53b4b38bd11 100644 --- a/arch/sh/kernel/cpu/sh4/setup-sh7750.c +++ b/arch/sh/kernel/cpu/sh4/setup-sh7750.c | |||
| @@ -14,7 +14,7 @@ | |||
| 14 | #include <linux/io.h> | 14 | #include <linux/io.h> |
| 15 | #include <linux/sh_timer.h> | 15 | #include <linux/sh_timer.h> |
| 16 | #include <linux/serial_sci.h> | 16 | #include <linux/serial_sci.h> |
| 17 | #include <asm/machtypes.h> | 17 | #include <generated/machtypes.h> |
| 18 | 18 | ||
| 19 | static struct resource rtc_resources[] = { | 19 | static struct resource rtc_resources[] = { |
| 20 | [0] = { | 20 | [0] = { |
| @@ -255,12 +255,17 @@ static struct platform_device *sh7750_early_devices[] __initdata = { | |||
| 255 | 255 | ||
| 256 | void __init plat_early_device_setup(void) | 256 | void __init plat_early_device_setup(void) |
| 257 | { | 257 | { |
| 258 | struct platform_device *dev[1]; | ||
| 259 | |||
| 258 | if (mach_is_rts7751r2d()) { | 260 | if (mach_is_rts7751r2d()) { |
| 259 | scif_platform_data.scscr |= SCSCR_CKE1; | 261 | scif_platform_data.scscr |= SCSCR_CKE1; |
| 260 | early_platform_add_devices(&scif_device, 1); | 262 | dev[0] = &scif_device; |
| 263 | early_platform_add_devices(dev, 1); | ||
| 261 | } else { | 264 | } else { |
| 262 | early_platform_add_devices(&sci_device, 1); | 265 | dev[0] = &sci_device; |
| 263 | early_platform_add_devices(&scif_device, 1); | 266 | early_platform_add_devices(dev, 1); |
| 267 | dev[0] = &scif_device; | ||
| 268 | early_platform_add_devices(dev, 1); | ||
| 264 | } | 269 | } |
| 265 | 270 | ||
| 266 | early_platform_add_devices(sh7750_early_devices, | 271 | early_platform_add_devices(sh7750_early_devices, |
diff --git a/arch/sh/lib/delay.c b/arch/sh/lib/delay.c index faa8f86c0db4..0901b2f14e15 100644 --- a/arch/sh/lib/delay.c +++ b/arch/sh/lib/delay.c | |||
| @@ -10,6 +10,16 @@ | |||
| 10 | void __delay(unsigned long loops) | 10 | void __delay(unsigned long loops) |
| 11 | { | 11 | { |
| 12 | __asm__ __volatile__( | 12 | __asm__ __volatile__( |
| 13 | /* | ||
| 14 | * ST40-300 appears to have an issue with this code, | ||
| 15 | * normally taking two cycles each loop, as with all | ||
| 16 | * other SH variants. If however the branch and the | ||
| 17 | * delay slot straddle an 8 byte boundary, this increases | ||
| 18 | * to 3 cycles. | ||
| 19 | * This align directive ensures this doesn't occur. | ||
| 20 | */ | ||
| 21 | ".balign 8\n\t" | ||
| 22 | |||
| 13 | "tst %0, %0\n\t" | 23 | "tst %0, %0\n\t" |
| 14 | "1:\t" | 24 | "1:\t" |
| 15 | "bf/s 1b\n\t" | 25 | "bf/s 1b\n\t" |
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index 88d3dc3d30d5..5a580ea04429 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c | |||
| @@ -108,7 +108,8 @@ void copy_user_highpage(struct page *to, struct page *from, | |||
| 108 | kunmap_atomic(vfrom, KM_USER0); | 108 | kunmap_atomic(vfrom, KM_USER0); |
| 109 | } | 109 | } |
| 110 | 110 | ||
| 111 | if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) | 111 | if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) || |
| 112 | (vma->vm_flags & VM_EXEC)) | ||
| 112 | __flush_purge_region(vto, PAGE_SIZE); | 113 | __flush_purge_region(vto, PAGE_SIZE); |
| 113 | 114 | ||
| 114 | kunmap_atomic(vto, KM_USER1); | 115 | kunmap_atomic(vto, KM_USER1); |
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 4d0dfa0d998e..43a18c77676d 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h | |||
| @@ -36,6 +36,11 @@ | |||
| 36 | #define MSR_IA32_PERFCTR1 0x000000c2 | 36 | #define MSR_IA32_PERFCTR1 0x000000c2 |
| 37 | #define MSR_FSB_FREQ 0x000000cd | 37 | #define MSR_FSB_FREQ 0x000000cd |
| 38 | 38 | ||
| 39 | #define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2 | ||
| 40 | #define NHM_C3_AUTO_DEMOTE (1UL << 25) | ||
| 41 | #define NHM_C1_AUTO_DEMOTE (1UL << 26) | ||
| 42 | #define ATM_LNC_C6_AUTO_DEMOTE (1UL << 25) | ||
| 43 | |||
| 39 | #define MSR_MTRRcap 0x000000fe | 44 | #define MSR_MTRRcap 0x000000fe |
| 40 | #define MSR_IA32_BBL_CR_CTL 0x00000119 | 45 | #define MSR_IA32_BBL_CR_CTL 0x00000119 |
| 41 | 46 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c index bd1cac747f67..52c93648e492 100644 --- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c +++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c | |||
| @@ -158,9 +158,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c) | |||
| 158 | { | 158 | { |
| 159 | if (c->x86 == 0x06) { | 159 | if (c->x86 == 0x06) { |
| 160 | if (cpu_has(c, X86_FEATURE_EST)) | 160 | if (cpu_has(c, X86_FEATURE_EST)) |
| 161 | printk(KERN_WARNING PFX "Warning: EST-capable CPU " | 161 | printk_once(KERN_WARNING PFX "Warning: EST-capable " |
| 162 | "detected. The acpi-cpufreq module offers " | 162 | "CPU detected. The acpi-cpufreq module offers " |
| 163 | "voltage scaling in addition of frequency " | 163 | "voltage scaling in addition to frequency " |
| 164 | "scaling. You should use that instead of " | 164 | "scaling. You should use that instead of " |
| 165 | "p4-clockmod, if possible.\n"); | 165 | "p4-clockmod, if possible.\n"); |
| 166 | switch (c->x86_model) { | 166 | switch (c->x86_model) { |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 35c7e65e59be..c567dec854f6 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
| @@ -1537,6 +1537,7 @@ static struct notifier_block cpb_nb = { | |||
| 1537 | static int __cpuinit powernowk8_init(void) | 1537 | static int __cpuinit powernowk8_init(void) |
| 1538 | { | 1538 | { |
| 1539 | unsigned int i, supported_cpus = 0, cpu; | 1539 | unsigned int i, supported_cpus = 0, cpu; |
| 1540 | int rv; | ||
| 1540 | 1541 | ||
| 1541 | for_each_online_cpu(i) { | 1542 | for_each_online_cpu(i) { |
| 1542 | int rc; | 1543 | int rc; |
| @@ -1555,14 +1556,14 @@ static int __cpuinit powernowk8_init(void) | |||
| 1555 | 1556 | ||
| 1556 | cpb_capable = true; | 1557 | cpb_capable = true; |
| 1557 | 1558 | ||
| 1558 | register_cpu_notifier(&cpb_nb); | ||
| 1559 | |||
| 1560 | msrs = msrs_alloc(); | 1559 | msrs = msrs_alloc(); |
| 1561 | if (!msrs) { | 1560 | if (!msrs) { |
| 1562 | printk(KERN_ERR "%s: Error allocating msrs!\n", __func__); | 1561 | printk(KERN_ERR "%s: Error allocating msrs!\n", __func__); |
| 1563 | return -ENOMEM; | 1562 | return -ENOMEM; |
| 1564 | } | 1563 | } |
| 1565 | 1564 | ||
| 1565 | register_cpu_notifier(&cpb_nb); | ||
| 1566 | |||
| 1566 | rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs); | 1567 | rdmsr_on_cpus(cpu_online_mask, MSR_K7_HWCR, msrs); |
| 1567 | 1568 | ||
| 1568 | for_each_cpu(cpu, cpu_online_mask) { | 1569 | for_each_cpu(cpu, cpu_online_mask) { |
| @@ -1574,7 +1575,13 @@ static int __cpuinit powernowk8_init(void) | |||
| 1574 | (cpb_enabled ? "on" : "off")); | 1575 | (cpb_enabled ? "on" : "off")); |
| 1575 | } | 1576 | } |
| 1576 | 1577 | ||
| 1577 | return cpufreq_register_driver(&cpufreq_amd64_driver); | 1578 | rv = cpufreq_register_driver(&cpufreq_amd64_driver); |
| 1579 | if (rv < 0 && boot_cpu_has(X86_FEATURE_CPB)) { | ||
| 1580 | unregister_cpu_notifier(&cpb_nb); | ||
| 1581 | msrs_free(msrs); | ||
| 1582 | msrs = NULL; | ||
| 1583 | } | ||
| 1584 | return rv; | ||
| 1578 | } | 1585 | } |
| 1579 | 1586 | ||
| 1580 | /* driver entry point for term */ | 1587 | /* driver entry point for term */ |
diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c index dab874647530..044bda5b3174 100644 --- a/arch/x86/platform/olpc/olpc_dt.c +++ b/arch/x86/platform/olpc/olpc_dt.c | |||
| @@ -140,8 +140,7 @@ void * __init prom_early_alloc(unsigned long size) | |||
| 140 | * wasted bootmem) and hand off chunks of it to callers. | 140 | * wasted bootmem) and hand off chunks of it to callers. |
| 141 | */ | 141 | */ |
| 142 | res = alloc_bootmem(chunk_size); | 142 | res = alloc_bootmem(chunk_size); |
| 143 | if (!res) | 143 | BUG_ON(!res); |
| 144 | return NULL; | ||
| 145 | prom_early_allocated += chunk_size; | 144 | prom_early_allocated += chunk_size; |
| 146 | memset(res, 0, chunk_size); | 145 | memset(res, 0, chunk_size); |
| 147 | free_mem = chunk_size; | 146 | free_mem = chunk_size; |
diff --git a/block/blk-core.c b/block/blk-core.c index 2f4002f79a24..518dd423a5fe 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
| @@ -352,7 +352,7 @@ void blk_start_queue(struct request_queue *q) | |||
| 352 | WARN_ON(!irqs_disabled()); | 352 | WARN_ON(!irqs_disabled()); |
| 353 | 353 | ||
| 354 | queue_flag_clear(QUEUE_FLAG_STOPPED, q); | 354 | queue_flag_clear(QUEUE_FLAG_STOPPED, q); |
| 355 | __blk_run_queue(q); | 355 | __blk_run_queue(q, false); |
| 356 | } | 356 | } |
| 357 | EXPORT_SYMBOL(blk_start_queue); | 357 | EXPORT_SYMBOL(blk_start_queue); |
| 358 | 358 | ||
| @@ -403,13 +403,14 @@ EXPORT_SYMBOL(blk_sync_queue); | |||
| 403 | /** | 403 | /** |
| 404 | * __blk_run_queue - run a single device queue | 404 | * __blk_run_queue - run a single device queue |
| 405 | * @q: The queue to run | 405 | * @q: The queue to run |
| 406 | * @force_kblockd: Don't run @q->request_fn directly. Use kblockd. | ||
| 406 | * | 407 | * |
| 407 | * Description: | 408 | * Description: |
| 408 | * See @blk_run_queue. This variant must be called with the queue lock | 409 | * See @blk_run_queue. This variant must be called with the queue lock |
| 409 | * held and interrupts disabled. | 410 | * held and interrupts disabled. |
| 410 | * | 411 | * |
| 411 | */ | 412 | */ |
| 412 | void __blk_run_queue(struct request_queue *q) | 413 | void __blk_run_queue(struct request_queue *q, bool force_kblockd) |
| 413 | { | 414 | { |
| 414 | blk_remove_plug(q); | 415 | blk_remove_plug(q); |
| 415 | 416 | ||
| @@ -423,7 +424,7 @@ void __blk_run_queue(struct request_queue *q) | |||
| 423 | * Only recurse once to avoid overrunning the stack, let the unplug | 424 | * Only recurse once to avoid overrunning the stack, let the unplug |
| 424 | * handling reinvoke the handler shortly if we already got there. | 425 | * handling reinvoke the handler shortly if we already got there. |
| 425 | */ | 426 | */ |
| 426 | if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { | 427 | if (!force_kblockd && !queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { |
| 427 | q->request_fn(q); | 428 | q->request_fn(q); |
| 428 | queue_flag_clear(QUEUE_FLAG_REENTER, q); | 429 | queue_flag_clear(QUEUE_FLAG_REENTER, q); |
| 429 | } else { | 430 | } else { |
| @@ -446,7 +447,7 @@ void blk_run_queue(struct request_queue *q) | |||
| 446 | unsigned long flags; | 447 | unsigned long flags; |
| 447 | 448 | ||
| 448 | spin_lock_irqsave(q->queue_lock, flags); | 449 | spin_lock_irqsave(q->queue_lock, flags); |
| 449 | __blk_run_queue(q); | 450 | __blk_run_queue(q, false); |
| 450 | spin_unlock_irqrestore(q->queue_lock, flags); | 451 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 451 | } | 452 | } |
| 452 | EXPORT_SYMBOL(blk_run_queue); | 453 | EXPORT_SYMBOL(blk_run_queue); |
| @@ -1053,7 +1054,7 @@ void blk_insert_request(struct request_queue *q, struct request *rq, | |||
| 1053 | 1054 | ||
| 1054 | drive_stat_acct(rq, 1); | 1055 | drive_stat_acct(rq, 1); |
| 1055 | __elv_add_request(q, rq, where, 0); | 1056 | __elv_add_request(q, rq, where, 0); |
| 1056 | __blk_run_queue(q); | 1057 | __blk_run_queue(q, false); |
| 1057 | spin_unlock_irqrestore(q->queue_lock, flags); | 1058 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 1058 | } | 1059 | } |
| 1059 | EXPORT_SYMBOL(blk_insert_request); | 1060 | EXPORT_SYMBOL(blk_insert_request); |
| @@ -2610,13 +2611,6 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) | |||
| 2610 | } | 2611 | } |
| 2611 | EXPORT_SYMBOL(kblockd_schedule_work); | 2612 | EXPORT_SYMBOL(kblockd_schedule_work); |
| 2612 | 2613 | ||
| 2613 | int kblockd_schedule_delayed_work(struct request_queue *q, | ||
| 2614 | struct delayed_work *dwork, unsigned long delay) | ||
| 2615 | { | ||
| 2616 | return queue_delayed_work(kblockd_workqueue, dwork, delay); | ||
| 2617 | } | ||
| 2618 | EXPORT_SYMBOL(kblockd_schedule_delayed_work); | ||
| 2619 | |||
| 2620 | int __init blk_dev_init(void) | 2614 | int __init blk_dev_init(void) |
| 2621 | { | 2615 | { |
| 2622 | BUILD_BUG_ON(__REQ_NR_BITS > 8 * | 2616 | BUILD_BUG_ON(__REQ_NR_BITS > 8 * |
diff --git a/block/blk-flush.c b/block/blk-flush.c index 54b123d6563e..b27d0208611b 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c | |||
| @@ -66,10 +66,12 @@ static void blk_flush_complete_seq_end_io(struct request_queue *q, | |||
| 66 | 66 | ||
| 67 | /* | 67 | /* |
| 68 | * Moving a request silently to empty queue_head may stall the | 68 | * Moving a request silently to empty queue_head may stall the |
| 69 | * queue. Kick the queue in those cases. | 69 | * queue. Kick the queue in those cases. This function is called |
| 70 | * from request completion path and calling directly into | ||
| 71 | * request_fn may confuse the driver. Always use kblockd. | ||
| 70 | */ | 72 | */ |
| 71 | if (was_empty && next_rq) | 73 | if (was_empty && next_rq) |
| 72 | __blk_run_queue(q); | 74 | __blk_run_queue(q, true); |
| 73 | } | 75 | } |
| 74 | 76 | ||
| 75 | static void pre_flush_end_io(struct request *rq, int error) | 77 | static void pre_flush_end_io(struct request *rq, int error) |
| @@ -130,7 +132,7 @@ static struct request *queue_next_fseq(struct request_queue *q) | |||
| 130 | BUG(); | 132 | BUG(); |
| 131 | } | 133 | } |
| 132 | 134 | ||
| 133 | elv_insert(q, rq, ELEVATOR_INSERT_FRONT); | 135 | elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE); |
| 134 | return rq; | 136 | return rq; |
| 135 | } | 137 | } |
| 136 | 138 | ||
diff --git a/block/blk-lib.c b/block/blk-lib.c index 1a320d2406b0..eec78becb355 100644 --- a/block/blk-lib.c +++ b/block/blk-lib.c | |||
| @@ -132,7 +132,7 @@ static void bio_batch_end_io(struct bio *bio, int err) | |||
| 132 | } | 132 | } |
| 133 | 133 | ||
| 134 | /** | 134 | /** |
| 135 | * blkdev_issue_zeroout generate number of zero filed write bios | 135 | * blkdev_issue_zeroout - generate number of zero filed write bios |
| 136 | * @bdev: blockdev to issue | 136 | * @bdev: blockdev to issue |
| 137 | * @sector: start sector | 137 | * @sector: start sector |
| 138 | * @nr_sects: number of sectors to write | 138 | * @nr_sects: number of sectors to write |
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index a89043a3caa4..e36cc10a346c 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
| @@ -20,6 +20,11 @@ static int throtl_quantum = 32; | |||
| 20 | /* Throttling is performed over 100ms slice and after that slice is renewed */ | 20 | /* Throttling is performed over 100ms slice and after that slice is renewed */ |
| 21 | static unsigned long throtl_slice = HZ/10; /* 100 ms */ | 21 | static unsigned long throtl_slice = HZ/10; /* 100 ms */ |
| 22 | 22 | ||
| 23 | /* A workqueue to queue throttle related work */ | ||
| 24 | static struct workqueue_struct *kthrotld_workqueue; | ||
| 25 | static void throtl_schedule_delayed_work(struct throtl_data *td, | ||
| 26 | unsigned long delay); | ||
| 27 | |||
| 23 | struct throtl_rb_root { | 28 | struct throtl_rb_root { |
| 24 | struct rb_root rb; | 29 | struct rb_root rb; |
| 25 | struct rb_node *left; | 30 | struct rb_node *left; |
| @@ -345,10 +350,9 @@ static void throtl_schedule_next_dispatch(struct throtl_data *td) | |||
| 345 | update_min_dispatch_time(st); | 350 | update_min_dispatch_time(st); |
| 346 | 351 | ||
| 347 | if (time_before_eq(st->min_disptime, jiffies)) | 352 | if (time_before_eq(st->min_disptime, jiffies)) |
| 348 | throtl_schedule_delayed_work(td->queue, 0); | 353 | throtl_schedule_delayed_work(td, 0); |
| 349 | else | 354 | else |
| 350 | throtl_schedule_delayed_work(td->queue, | 355 | throtl_schedule_delayed_work(td, (st->min_disptime - jiffies)); |
| 351 | (st->min_disptime - jiffies)); | ||
| 352 | } | 356 | } |
| 353 | 357 | ||
| 354 | static inline void | 358 | static inline void |
| @@ -815,10 +819,10 @@ void blk_throtl_work(struct work_struct *work) | |||
| 815 | } | 819 | } |
| 816 | 820 | ||
| 817 | /* Call with queue lock held */ | 821 | /* Call with queue lock held */ |
| 818 | void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) | 822 | static void |
| 823 | throtl_schedule_delayed_work(struct throtl_data *td, unsigned long delay) | ||
| 819 | { | 824 | { |
| 820 | 825 | ||
| 821 | struct throtl_data *td = q->td; | ||
| 822 | struct delayed_work *dwork = &td->throtl_work; | 826 | struct delayed_work *dwork = &td->throtl_work; |
| 823 | 827 | ||
| 824 | if (total_nr_queued(td) > 0) { | 828 | if (total_nr_queued(td) > 0) { |
| @@ -827,12 +831,11 @@ void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) | |||
| 827 | * Cancel that and schedule a new one. | 831 | * Cancel that and schedule a new one. |
| 828 | */ | 832 | */ |
| 829 | __cancel_delayed_work(dwork); | 833 | __cancel_delayed_work(dwork); |
| 830 | kblockd_schedule_delayed_work(q, dwork, delay); | 834 | queue_delayed_work(kthrotld_workqueue, dwork, delay); |
| 831 | throtl_log(td, "schedule work. delay=%lu jiffies=%lu", | 835 | throtl_log(td, "schedule work. delay=%lu jiffies=%lu", |
| 832 | delay, jiffies); | 836 | delay, jiffies); |
| 833 | } | 837 | } |
| 834 | } | 838 | } |
| 835 | EXPORT_SYMBOL(throtl_schedule_delayed_work); | ||
| 836 | 839 | ||
| 837 | static void | 840 | static void |
| 838 | throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg) | 841 | throtl_destroy_tg(struct throtl_data *td, struct throtl_grp *tg) |
| @@ -920,7 +923,7 @@ static void throtl_update_blkio_group_read_bps(void *key, | |||
| 920 | smp_mb__after_atomic_inc(); | 923 | smp_mb__after_atomic_inc(); |
| 921 | 924 | ||
| 922 | /* Schedule a work now to process the limit change */ | 925 | /* Schedule a work now to process the limit change */ |
| 923 | throtl_schedule_delayed_work(td->queue, 0); | 926 | throtl_schedule_delayed_work(td, 0); |
| 924 | } | 927 | } |
| 925 | 928 | ||
| 926 | static void throtl_update_blkio_group_write_bps(void *key, | 929 | static void throtl_update_blkio_group_write_bps(void *key, |
| @@ -934,7 +937,7 @@ static void throtl_update_blkio_group_write_bps(void *key, | |||
| 934 | smp_mb__before_atomic_inc(); | 937 | smp_mb__before_atomic_inc(); |
| 935 | atomic_inc(&td->limits_changed); | 938 | atomic_inc(&td->limits_changed); |
| 936 | smp_mb__after_atomic_inc(); | 939 | smp_mb__after_atomic_inc(); |
| 937 | throtl_schedule_delayed_work(td->queue, 0); | 940 | throtl_schedule_delayed_work(td, 0); |
| 938 | } | 941 | } |
| 939 | 942 | ||
| 940 | static void throtl_update_blkio_group_read_iops(void *key, | 943 | static void throtl_update_blkio_group_read_iops(void *key, |
| @@ -948,7 +951,7 @@ static void throtl_update_blkio_group_read_iops(void *key, | |||
| 948 | smp_mb__before_atomic_inc(); | 951 | smp_mb__before_atomic_inc(); |
| 949 | atomic_inc(&td->limits_changed); | 952 | atomic_inc(&td->limits_changed); |
| 950 | smp_mb__after_atomic_inc(); | 953 | smp_mb__after_atomic_inc(); |
| 951 | throtl_schedule_delayed_work(td->queue, 0); | 954 | throtl_schedule_delayed_work(td, 0); |
| 952 | } | 955 | } |
| 953 | 956 | ||
| 954 | static void throtl_update_blkio_group_write_iops(void *key, | 957 | static void throtl_update_blkio_group_write_iops(void *key, |
| @@ -962,7 +965,7 @@ static void throtl_update_blkio_group_write_iops(void *key, | |||
| 962 | smp_mb__before_atomic_inc(); | 965 | smp_mb__before_atomic_inc(); |
| 963 | atomic_inc(&td->limits_changed); | 966 | atomic_inc(&td->limits_changed); |
| 964 | smp_mb__after_atomic_inc(); | 967 | smp_mb__after_atomic_inc(); |
| 965 | throtl_schedule_delayed_work(td->queue, 0); | 968 | throtl_schedule_delayed_work(td, 0); |
| 966 | } | 969 | } |
| 967 | 970 | ||
| 968 | void throtl_shutdown_timer_wq(struct request_queue *q) | 971 | void throtl_shutdown_timer_wq(struct request_queue *q) |
| @@ -1135,6 +1138,10 @@ void blk_throtl_exit(struct request_queue *q) | |||
| 1135 | 1138 | ||
| 1136 | static int __init throtl_init(void) | 1139 | static int __init throtl_init(void) |
| 1137 | { | 1140 | { |
| 1141 | kthrotld_workqueue = alloc_workqueue("kthrotld", WQ_MEM_RECLAIM, 0); | ||
| 1142 | if (!kthrotld_workqueue) | ||
| 1143 | panic("Failed to create kthrotld\n"); | ||
| 1144 | |||
| 1138 | blkio_policy_register(&blkio_policy_throtl); | 1145 | blkio_policy_register(&blkio_policy_throtl); |
| 1139 | return 0; | 1146 | return 0; |
| 1140 | } | 1147 | } |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 7be4c7959625..ea83a4f0c27d 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
| @@ -3355,7 +3355,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
| 3355 | cfqd->busy_queues > 1) { | 3355 | cfqd->busy_queues > 1) { |
| 3356 | cfq_del_timer(cfqd, cfqq); | 3356 | cfq_del_timer(cfqd, cfqq); |
| 3357 | cfq_clear_cfqq_wait_request(cfqq); | 3357 | cfq_clear_cfqq_wait_request(cfqq); |
| 3358 | __blk_run_queue(cfqd->queue); | 3358 | __blk_run_queue(cfqd->queue, false); |
| 3359 | } else { | 3359 | } else { |
| 3360 | cfq_blkiocg_update_idle_time_stats( | 3360 | cfq_blkiocg_update_idle_time_stats( |
| 3361 | &cfqq->cfqg->blkg); | 3361 | &cfqq->cfqg->blkg); |
| @@ -3370,7 +3370,7 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
| 3370 | * this new queue is RT and the current one is BE | 3370 | * this new queue is RT and the current one is BE |
| 3371 | */ | 3371 | */ |
| 3372 | cfq_preempt_queue(cfqd, cfqq); | 3372 | cfq_preempt_queue(cfqd, cfqq); |
| 3373 | __blk_run_queue(cfqd->queue); | 3373 | __blk_run_queue(cfqd->queue, false); |
| 3374 | } | 3374 | } |
| 3375 | } | 3375 | } |
| 3376 | 3376 | ||
| @@ -3731,7 +3731,7 @@ static void cfq_kick_queue(struct work_struct *work) | |||
| 3731 | struct request_queue *q = cfqd->queue; | 3731 | struct request_queue *q = cfqd->queue; |
| 3732 | 3732 | ||
| 3733 | spin_lock_irq(q->queue_lock); | 3733 | spin_lock_irq(q->queue_lock); |
| 3734 | __blk_run_queue(cfqd->queue); | 3734 | __blk_run_queue(cfqd->queue, false); |
| 3735 | spin_unlock_irq(q->queue_lock); | 3735 | spin_unlock_irq(q->queue_lock); |
| 3736 | } | 3736 | } |
| 3737 | 3737 | ||
diff --git a/block/elevator.c b/block/elevator.c index 2569512830d3..236e93c1f46c 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
| @@ -602,7 +602,7 @@ void elv_quiesce_start(struct request_queue *q) | |||
| 602 | */ | 602 | */ |
| 603 | elv_drain_elevator(q); | 603 | elv_drain_elevator(q); |
| 604 | while (q->rq.elvpriv) { | 604 | while (q->rq.elvpriv) { |
| 605 | __blk_run_queue(q); | 605 | __blk_run_queue(q, false); |
| 606 | spin_unlock_irq(q->queue_lock); | 606 | spin_unlock_irq(q->queue_lock); |
| 607 | msleep(10); | 607 | msleep(10); |
| 608 | spin_lock_irq(q->queue_lock); | 608 | spin_lock_irq(q->queue_lock); |
| @@ -651,7 +651,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) | |||
| 651 | * with anything. There's no point in delaying queue | 651 | * with anything. There's no point in delaying queue |
| 652 | * processing. | 652 | * processing. |
| 653 | */ | 653 | */ |
| 654 | __blk_run_queue(q); | 654 | __blk_run_queue(q, false); |
| 655 | break; | 655 | break; |
| 656 | 656 | ||
| 657 | case ELEVATOR_INSERT_SORT: | 657 | case ELEVATOR_INSERT_SORT: |
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h index 54784bb42cec..edc25867ad9d 100644 --- a/drivers/acpi/acpica/aclocal.h +++ b/drivers/acpi/acpica/aclocal.h | |||
| @@ -416,10 +416,15 @@ struct acpi_gpe_handler_info { | |||
| 416 | u8 originally_enabled; /* True if GPE was originally enabled */ | 416 | u8 originally_enabled; /* True if GPE was originally enabled */ |
| 417 | }; | 417 | }; |
| 418 | 418 | ||
| 419 | struct acpi_gpe_notify_object { | ||
| 420 | struct acpi_namespace_node *node; | ||
| 421 | struct acpi_gpe_notify_object *next; | ||
| 422 | }; | ||
| 423 | |||
| 419 | union acpi_gpe_dispatch_info { | 424 | union acpi_gpe_dispatch_info { |
| 420 | struct acpi_namespace_node *method_node; /* Method node for this GPE level */ | 425 | struct acpi_namespace_node *method_node; /* Method node for this GPE level */ |
| 421 | struct acpi_gpe_handler_info *handler; /* Installed GPE handler */ | 426 | struct acpi_gpe_handler_info *handler; /* Installed GPE handler */ |
| 422 | struct acpi_namespace_node *device_node; /* Parent _PRW device for implicit notify */ | 427 | struct acpi_gpe_notify_object device; /* List of _PRW devices for implicit notify */ |
| 423 | }; | 428 | }; |
| 424 | 429 | ||
| 425 | /* | 430 | /* |
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c index 14988a86066f..f4725212eb48 100644 --- a/drivers/acpi/acpica/evgpe.c +++ b/drivers/acpi/acpica/evgpe.c | |||
| @@ -457,6 +457,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) | |||
| 457 | acpi_status status; | 457 | acpi_status status; |
| 458 | struct acpi_gpe_event_info *local_gpe_event_info; | 458 | struct acpi_gpe_event_info *local_gpe_event_info; |
| 459 | struct acpi_evaluate_info *info; | 459 | struct acpi_evaluate_info *info; |
| 460 | struct acpi_gpe_notify_object *notify_object; | ||
| 460 | 461 | ||
| 461 | ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); | 462 | ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); |
| 462 | 463 | ||
| @@ -508,10 +509,18 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) | |||
| 508 | * from this thread -- because handlers may in turn run other | 509 | * from this thread -- because handlers may in turn run other |
| 509 | * control methods. | 510 | * control methods. |
| 510 | */ | 511 | */ |
| 511 | status = | 512 | status = acpi_ev_queue_notify_request( |
| 512 | acpi_ev_queue_notify_request(local_gpe_event_info->dispatch. | 513 | local_gpe_event_info->dispatch.device.node, |
| 513 | device_node, | 514 | ACPI_NOTIFY_DEVICE_WAKE); |
| 514 | ACPI_NOTIFY_DEVICE_WAKE); | 515 | |
| 516 | notify_object = local_gpe_event_info->dispatch.device.next; | ||
| 517 | while (ACPI_SUCCESS(status) && notify_object) { | ||
| 518 | status = acpi_ev_queue_notify_request( | ||
| 519 | notify_object->node, | ||
| 520 | ACPI_NOTIFY_DEVICE_WAKE); | ||
| 521 | notify_object = notify_object->next; | ||
| 522 | } | ||
| 523 | |||
| 515 | break; | 524 | break; |
| 516 | 525 | ||
| 517 | case ACPI_GPE_DISPATCH_METHOD: | 526 | case ACPI_GPE_DISPATCH_METHOD: |
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c index 3b20a3401b64..52aaff3df562 100644 --- a/drivers/acpi/acpica/evxfgpe.c +++ b/drivers/acpi/acpica/evxfgpe.c | |||
| @@ -198,7 +198,9 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device, | |||
| 198 | acpi_status status = AE_BAD_PARAMETER; | 198 | acpi_status status = AE_BAD_PARAMETER; |
| 199 | struct acpi_gpe_event_info *gpe_event_info; | 199 | struct acpi_gpe_event_info *gpe_event_info; |
| 200 | struct acpi_namespace_node *device_node; | 200 | struct acpi_namespace_node *device_node; |
| 201 | struct acpi_gpe_notify_object *notify_object; | ||
| 201 | acpi_cpu_flags flags; | 202 | acpi_cpu_flags flags; |
| 203 | u8 gpe_dispatch_mask; | ||
| 202 | 204 | ||
| 203 | ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake); | 205 | ACPI_FUNCTION_TRACE(acpi_setup_gpe_for_wake); |
| 204 | 206 | ||
| @@ -221,27 +223,49 @@ acpi_setup_gpe_for_wake(acpi_handle wake_device, | |||
| 221 | goto unlock_and_exit; | 223 | goto unlock_and_exit; |
| 222 | } | 224 | } |
| 223 | 225 | ||
| 226 | if (wake_device == ACPI_ROOT_OBJECT) { | ||
| 227 | goto out; | ||
| 228 | } | ||
| 229 | |||
| 224 | /* | 230 | /* |
| 225 | * If there is no method or handler for this GPE, then the | 231 | * If there is no method or handler for this GPE, then the |
| 226 | * wake_device will be notified whenever this GPE fires (aka | 232 | * wake_device will be notified whenever this GPE fires (aka |
| 227 | * "implicit notify") Note: The GPE is assumed to be | 233 | * "implicit notify") Note: The GPE is assumed to be |
| 228 | * level-triggered (for windows compatibility). | 234 | * level-triggered (for windows compatibility). |
| 229 | */ | 235 | */ |
| 230 | if (((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == | 236 | gpe_dispatch_mask = gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK; |
| 231 | ACPI_GPE_DISPATCH_NONE) && (wake_device != ACPI_ROOT_OBJECT)) { | 237 | if (gpe_dispatch_mask != ACPI_GPE_DISPATCH_NONE |
| 238 | && gpe_dispatch_mask != ACPI_GPE_DISPATCH_NOTIFY) { | ||
| 239 | goto out; | ||
| 240 | } | ||
| 232 | 241 | ||
| 233 | /* Validate wake_device is of type Device */ | 242 | /* Validate wake_device is of type Device */ |
| 234 | 243 | ||
| 235 | device_node = ACPI_CAST_PTR(struct acpi_namespace_node, | 244 | device_node = ACPI_CAST_PTR(struct acpi_namespace_node, wake_device); |
| 236 | wake_device); | 245 | if (device_node->type != ACPI_TYPE_DEVICE) { |
| 237 | if (device_node->type != ACPI_TYPE_DEVICE) { | 246 | goto unlock_and_exit; |
| 238 | goto unlock_and_exit; | 247 | } |
| 239 | } | 248 | |
| 249 | if (gpe_dispatch_mask == ACPI_GPE_DISPATCH_NONE) { | ||
| 240 | gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY | | 250 | gpe_event_info->flags = (ACPI_GPE_DISPATCH_NOTIFY | |
| 241 | ACPI_GPE_LEVEL_TRIGGERED); | 251 | ACPI_GPE_LEVEL_TRIGGERED); |
| 242 | gpe_event_info->dispatch.device_node = device_node; | 252 | gpe_event_info->dispatch.device.node = device_node; |
| 253 | gpe_event_info->dispatch.device.next = NULL; | ||
| 254 | } else { | ||
| 255 | /* There are multiple devices to notify implicitly. */ | ||
| 256 | |||
| 257 | notify_object = ACPI_ALLOCATE_ZEROED(sizeof(*notify_object)); | ||
| 258 | if (!notify_object) { | ||
| 259 | status = AE_NO_MEMORY; | ||
| 260 | goto unlock_and_exit; | ||
| 261 | } | ||
| 262 | |||
| 263 | notify_object->node = device_node; | ||
| 264 | notify_object->next = gpe_event_info->dispatch.device.next; | ||
| 265 | gpe_event_info->dispatch.device.next = notify_object; | ||
| 243 | } | 266 | } |
| 244 | 267 | ||
| 268 | out: | ||
| 245 | gpe_event_info->flags |= ACPI_GPE_CAN_WAKE; | 269 | gpe_event_info->flags |= ACPI_GPE_CAN_WAKE; |
| 246 | status = AE_OK; | 270 | status = AE_OK; |
| 247 | 271 | ||
diff --git a/drivers/acpi/debugfs.c b/drivers/acpi/debugfs.c index 5df67f1d6c61..384f7abcff77 100644 --- a/drivers/acpi/debugfs.c +++ b/drivers/acpi/debugfs.c | |||
| @@ -26,7 +26,9 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf, | |||
| 26 | size_t count, loff_t *ppos) | 26 | size_t count, loff_t *ppos) |
| 27 | { | 27 | { |
| 28 | static char *buf; | 28 | static char *buf; |
| 29 | static int uncopied_bytes; | 29 | static u32 max_size; |
| 30 | static u32 uncopied_bytes; | ||
| 31 | |||
| 30 | struct acpi_table_header table; | 32 | struct acpi_table_header table; |
| 31 | acpi_status status; | 33 | acpi_status status; |
| 32 | 34 | ||
| @@ -37,19 +39,24 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf, | |||
| 37 | if (copy_from_user(&table, user_buf, | 39 | if (copy_from_user(&table, user_buf, |
| 38 | sizeof(struct acpi_table_header))) | 40 | sizeof(struct acpi_table_header))) |
| 39 | return -EFAULT; | 41 | return -EFAULT; |
| 40 | uncopied_bytes = table.length; | 42 | uncopied_bytes = max_size = table.length; |
| 41 | buf = kzalloc(uncopied_bytes, GFP_KERNEL); | 43 | buf = kzalloc(max_size, GFP_KERNEL); |
| 42 | if (!buf) | 44 | if (!buf) |
| 43 | return -ENOMEM; | 45 | return -ENOMEM; |
| 44 | } | 46 | } |
| 45 | 47 | ||
| 46 | if (uncopied_bytes < count) { | 48 | if (buf == NULL) |
| 47 | kfree(buf); | 49 | return -EINVAL; |
| 50 | |||
| 51 | if ((*ppos > max_size) || | ||
| 52 | (*ppos + count > max_size) || | ||
| 53 | (*ppos + count < count) || | ||
| 54 | (count > uncopied_bytes)) | ||
| 48 | return -EINVAL; | 55 | return -EINVAL; |
| 49 | } | ||
| 50 | 56 | ||
| 51 | if (copy_from_user(buf + (*ppos), user_buf, count)) { | 57 | if (copy_from_user(buf + (*ppos), user_buf, count)) { |
| 52 | kfree(buf); | 58 | kfree(buf); |
| 59 | buf = NULL; | ||
| 53 | return -EFAULT; | 60 | return -EFAULT; |
| 54 | } | 61 | } |
| 55 | 62 | ||
| @@ -59,6 +66,7 @@ static ssize_t cm_write(struct file *file, const char __user * user_buf, | |||
| 59 | if (!uncopied_bytes) { | 66 | if (!uncopied_bytes) { |
| 60 | status = acpi_install_method(buf); | 67 | status = acpi_install_method(buf); |
| 61 | kfree(buf); | 68 | kfree(buf); |
| 69 | buf = NULL; | ||
| 62 | if (ACPI_FAILURE(status)) | 70 | if (ACPI_FAILURE(status)) |
| 63 | return -EINVAL; | 71 | return -EINVAL; |
| 64 | add_taint(TAINT_OVERRIDDEN_ACPI_TABLE); | 72 | add_taint(TAINT_OVERRIDDEN_ACPI_TABLE); |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 49e6a545eb63..dbf31ec9114d 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
| @@ -78,7 +78,6 @@ | |||
| 78 | 78 | ||
| 79 | #include <asm/uaccess.h> | 79 | #include <asm/uaccess.h> |
| 80 | 80 | ||
| 81 | static DEFINE_MUTEX(loop_mutex); | ||
| 82 | static LIST_HEAD(loop_devices); | 81 | static LIST_HEAD(loop_devices); |
| 83 | static DEFINE_MUTEX(loop_devices_mutex); | 82 | static DEFINE_MUTEX(loop_devices_mutex); |
| 84 | 83 | ||
| @@ -1501,11 +1500,9 @@ static int lo_open(struct block_device *bdev, fmode_t mode) | |||
| 1501 | { | 1500 | { |
| 1502 | struct loop_device *lo = bdev->bd_disk->private_data; | 1501 | struct loop_device *lo = bdev->bd_disk->private_data; |
| 1503 | 1502 | ||
| 1504 | mutex_lock(&loop_mutex); | ||
| 1505 | mutex_lock(&lo->lo_ctl_mutex); | 1503 | mutex_lock(&lo->lo_ctl_mutex); |
| 1506 | lo->lo_refcnt++; | 1504 | lo->lo_refcnt++; |
| 1507 | mutex_unlock(&lo->lo_ctl_mutex); | 1505 | mutex_unlock(&lo->lo_ctl_mutex); |
| 1508 | mutex_unlock(&loop_mutex); | ||
| 1509 | 1506 | ||
| 1510 | return 0; | 1507 | return 0; |
| 1511 | } | 1508 | } |
| @@ -1515,7 +1512,6 @@ static int lo_release(struct gendisk *disk, fmode_t mode) | |||
| 1515 | struct loop_device *lo = disk->private_data; | 1512 | struct loop_device *lo = disk->private_data; |
| 1516 | int err; | 1513 | int err; |
| 1517 | 1514 | ||
| 1518 | mutex_lock(&loop_mutex); | ||
| 1519 | mutex_lock(&lo->lo_ctl_mutex); | 1515 | mutex_lock(&lo->lo_ctl_mutex); |
| 1520 | 1516 | ||
| 1521 | if (--lo->lo_refcnt) | 1517 | if (--lo->lo_refcnt) |
| @@ -1540,7 +1536,6 @@ static int lo_release(struct gendisk *disk, fmode_t mode) | |||
| 1540 | out: | 1536 | out: |
| 1541 | mutex_unlock(&lo->lo_ctl_mutex); | 1537 | mutex_unlock(&lo->lo_ctl_mutex); |
| 1542 | out_unlocked: | 1538 | out_unlocked: |
| 1543 | mutex_unlock(&loop_mutex); | ||
| 1544 | return 0; | 1539 | return 0; |
| 1545 | } | 1540 | } |
| 1546 | 1541 | ||
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 490393186338..84b164d1eb2b 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c | |||
| @@ -388,6 +388,10 @@ static void discard_port_data(struct port *port) | |||
| 388 | unsigned int len; | 388 | unsigned int len; |
| 389 | int ret; | 389 | int ret; |
| 390 | 390 | ||
| 391 | if (!port->portdev) { | ||
| 392 | /* Device has been unplugged. vqs are already gone. */ | ||
| 393 | return; | ||
| 394 | } | ||
| 391 | vq = port->in_vq; | 395 | vq = port->in_vq; |
| 392 | if (port->inbuf) | 396 | if (port->inbuf) |
| 393 | buf = port->inbuf; | 397 | buf = port->inbuf; |
| @@ -470,6 +474,10 @@ static void reclaim_consumed_buffers(struct port *port) | |||
| 470 | void *buf; | 474 | void *buf; |
| 471 | unsigned int len; | 475 | unsigned int len; |
| 472 | 476 | ||
| 477 | if (!port->portdev) { | ||
| 478 | /* Device has been unplugged. vqs are already gone. */ | ||
| 479 | return; | ||
| 480 | } | ||
| 473 | while ((buf = virtqueue_get_buf(port->out_vq, &len))) { | 481 | while ((buf = virtqueue_get_buf(port->out_vq, &len))) { |
| 474 | kfree(buf); | 482 | kfree(buf); |
| 475 | port->outvq_full = false; | 483 | port->outvq_full = false; |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 1109f6848a43..5cb4d09919d6 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
| @@ -1919,8 +1919,10 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) | |||
| 1919 | 1919 | ||
| 1920 | ret = sysdev_driver_register(&cpu_sysdev_class, | 1920 | ret = sysdev_driver_register(&cpu_sysdev_class, |
| 1921 | &cpufreq_sysdev_driver); | 1921 | &cpufreq_sysdev_driver); |
| 1922 | if (ret) | ||
| 1923 | goto err_null_driver; | ||
| 1922 | 1924 | ||
| 1923 | if ((!ret) && !(cpufreq_driver->flags & CPUFREQ_STICKY)) { | 1925 | if (!(cpufreq_driver->flags & CPUFREQ_STICKY)) { |
| 1924 | int i; | 1926 | int i; |
| 1925 | ret = -ENODEV; | 1927 | ret = -ENODEV; |
| 1926 | 1928 | ||
| @@ -1935,21 +1937,22 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) | |||
| 1935 | if (ret) { | 1937 | if (ret) { |
| 1936 | dprintk("no CPU initialized for driver %s\n", | 1938 | dprintk("no CPU initialized for driver %s\n", |
| 1937 | driver_data->name); | 1939 | driver_data->name); |
| 1938 | sysdev_driver_unregister(&cpu_sysdev_class, | 1940 | goto err_sysdev_unreg; |
| 1939 | &cpufreq_sysdev_driver); | ||
| 1940 | |||
| 1941 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | ||
| 1942 | cpufreq_driver = NULL; | ||
| 1943 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
| 1944 | } | 1941 | } |
| 1945 | } | 1942 | } |
| 1946 | 1943 | ||
| 1947 | if (!ret) { | 1944 | register_hotcpu_notifier(&cpufreq_cpu_notifier); |
| 1948 | register_hotcpu_notifier(&cpufreq_cpu_notifier); | 1945 | dprintk("driver %s up and running\n", driver_data->name); |
| 1949 | dprintk("driver %s up and running\n", driver_data->name); | 1946 | cpufreq_debug_enable_ratelimit(); |
| 1950 | cpufreq_debug_enable_ratelimit(); | ||
| 1951 | } | ||
| 1952 | 1947 | ||
| 1948 | return 0; | ||
| 1949 | err_sysdev_unreg: | ||
| 1950 | sysdev_driver_unregister(&cpu_sysdev_class, | ||
| 1951 | &cpufreq_sysdev_driver); | ||
| 1952 | err_null_driver: | ||
| 1953 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | ||
| 1954 | cpufreq_driver = NULL; | ||
| 1955 | spin_unlock_irqrestore(&cpufreq_driver_lock, flags); | ||
| 1953 | return ret; | 1956 | return ret; |
| 1954 | } | 1957 | } |
| 1955 | EXPORT_SYMBOL_GPL(cpufreq_register_driver); | 1958 | EXPORT_SYMBOL_GPL(cpufreq_register_driver); |
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 6977a1ce9d98..f73ef4390db6 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c | |||
| @@ -672,7 +672,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) | |||
| 672 | struct drm_crtc_helper_funcs *crtc_funcs; | 672 | struct drm_crtc_helper_funcs *crtc_funcs; |
| 673 | u16 *red, *green, *blue, *transp; | 673 | u16 *red, *green, *blue, *transp; |
| 674 | struct drm_crtc *crtc; | 674 | struct drm_crtc *crtc; |
| 675 | int i, rc = 0; | 675 | int i, j, rc = 0; |
| 676 | int start; | 676 | int start; |
| 677 | 677 | ||
| 678 | for (i = 0; i < fb_helper->crtc_count; i++) { | 678 | for (i = 0; i < fb_helper->crtc_count; i++) { |
| @@ -685,7 +685,7 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) | |||
| 685 | transp = cmap->transp; | 685 | transp = cmap->transp; |
| 686 | start = cmap->start; | 686 | start = cmap->start; |
| 687 | 687 | ||
| 688 | for (i = 0; i < cmap->len; i++) { | 688 | for (j = 0; j < cmap->len; j++) { |
| 689 | u16 hred, hgreen, hblue, htransp = 0xffff; | 689 | u16 hred, hgreen, hblue, htransp = 0xffff; |
| 690 | 690 | ||
| 691 | hred = *red++; | 691 | hred = *red++; |
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 3601466c5502..4ff9b6cc973f 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
| @@ -865,7 +865,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) | |||
| 865 | int max_freq; | 865 | int max_freq; |
| 866 | 866 | ||
| 867 | /* RPSTAT1 is in the GT power well */ | 867 | /* RPSTAT1 is in the GT power well */ |
| 868 | __gen6_force_wake_get(dev_priv); | 868 | __gen6_gt_force_wake_get(dev_priv); |
| 869 | 869 | ||
| 870 | seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); | 870 | seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); |
| 871 | seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1)); | 871 | seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1)); |
| @@ -888,7 +888,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused) | |||
| 888 | seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", | 888 | seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", |
| 889 | max_freq * 100); | 889 | max_freq * 100); |
| 890 | 890 | ||
| 891 | __gen6_force_wake_put(dev_priv); | 891 | __gen6_gt_force_wake_put(dev_priv); |
| 892 | } else { | 892 | } else { |
| 893 | seq_printf(m, "no P-state info available\n"); | 893 | seq_printf(m, "no P-state info available\n"); |
| 894 | } | 894 | } |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 17bd766f2081..e33d9be7df3b 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
| @@ -1895,6 +1895,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
| 1895 | if (IS_GEN2(dev)) | 1895 | if (IS_GEN2(dev)) |
| 1896 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); | 1896 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30)); |
| 1897 | 1897 | ||
| 1898 | /* 965GM sometimes incorrectly writes to hardware status page (HWS) | ||
| 1899 | * using 32bit addressing, overwriting memory if HWS is located | ||
| 1900 | * above 4GB. | ||
| 1901 | * | ||
| 1902 | * The documentation also mentions an issue with undefined | ||
| 1903 | * behaviour if any general state is accessed within a page above 4GB, | ||
| 1904 | * which also needs to be handled carefully. | ||
| 1905 | */ | ||
| 1906 | if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) | ||
| 1907 | dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32)); | ||
| 1908 | |||
| 1898 | mmio_bar = IS_GEN2(dev) ? 1 : 0; | 1909 | mmio_bar = IS_GEN2(dev) ? 1 : 0; |
| 1899 | dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0); | 1910 | dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0); |
| 1900 | if (!dev_priv->regs) { | 1911 | if (!dev_priv->regs) { |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 0ad533f06af9..22ec066adae6 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
| @@ -46,6 +46,9 @@ module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); | |||
| 46 | unsigned int i915_powersave = 1; | 46 | unsigned int i915_powersave = 1; |
| 47 | module_param_named(powersave, i915_powersave, int, 0600); | 47 | module_param_named(powersave, i915_powersave, int, 0600); |
| 48 | 48 | ||
| 49 | unsigned int i915_semaphores = 0; | ||
| 50 | module_param_named(semaphores, i915_semaphores, int, 0600); | ||
| 51 | |||
| 49 | unsigned int i915_enable_rc6 = 0; | 52 | unsigned int i915_enable_rc6 = 0; |
| 50 | module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); | 53 | module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); |
| 51 | 54 | ||
| @@ -254,7 +257,7 @@ void intel_detect_pch (struct drm_device *dev) | |||
| 254 | } | 257 | } |
| 255 | } | 258 | } |
| 256 | 259 | ||
| 257 | void __gen6_force_wake_get(struct drm_i915_private *dev_priv) | 260 | void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) |
| 258 | { | 261 | { |
| 259 | int count; | 262 | int count; |
| 260 | 263 | ||
| @@ -270,12 +273,22 @@ void __gen6_force_wake_get(struct drm_i915_private *dev_priv) | |||
| 270 | udelay(10); | 273 | udelay(10); |
| 271 | } | 274 | } |
| 272 | 275 | ||
| 273 | void __gen6_force_wake_put(struct drm_i915_private *dev_priv) | 276 | void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) |
| 274 | { | 277 | { |
| 275 | I915_WRITE_NOTRACE(FORCEWAKE, 0); | 278 | I915_WRITE_NOTRACE(FORCEWAKE, 0); |
| 276 | POSTING_READ(FORCEWAKE); | 279 | POSTING_READ(FORCEWAKE); |
| 277 | } | 280 | } |
| 278 | 281 | ||
| 282 | void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) | ||
| 283 | { | ||
| 284 | int loop = 500; | ||
| 285 | u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); | ||
| 286 | while (fifo < 20 && loop--) { | ||
| 287 | udelay(10); | ||
| 288 | fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); | ||
| 289 | } | ||
| 290 | } | ||
| 291 | |||
| 279 | static int i915_drm_freeze(struct drm_device *dev) | 292 | static int i915_drm_freeze(struct drm_device *dev) |
| 280 | { | 293 | { |
| 281 | struct drm_i915_private *dev_priv = dev->dev_private; | 294 | struct drm_i915_private *dev_priv = dev->dev_private; |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 65dfe81d0035..456f40484838 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -956,6 +956,7 @@ extern struct drm_ioctl_desc i915_ioctls[]; | |||
| 956 | extern int i915_max_ioctl; | 956 | extern int i915_max_ioctl; |
| 957 | extern unsigned int i915_fbpercrtc; | 957 | extern unsigned int i915_fbpercrtc; |
| 958 | extern unsigned int i915_powersave; | 958 | extern unsigned int i915_powersave; |
| 959 | extern unsigned int i915_semaphores; | ||
| 959 | extern unsigned int i915_lvds_downclock; | 960 | extern unsigned int i915_lvds_downclock; |
| 960 | extern unsigned int i915_panel_use_ssc; | 961 | extern unsigned int i915_panel_use_ssc; |
| 961 | extern unsigned int i915_enable_rc6; | 962 | extern unsigned int i915_enable_rc6; |
| @@ -1177,6 +1178,9 @@ void i915_gem_detach_phys_object(struct drm_device *dev, | |||
| 1177 | void i915_gem_free_all_phys_object(struct drm_device *dev); | 1178 | void i915_gem_free_all_phys_object(struct drm_device *dev); |
| 1178 | void i915_gem_release(struct drm_device *dev, struct drm_file *file); | 1179 | void i915_gem_release(struct drm_device *dev, struct drm_file *file); |
| 1179 | 1180 | ||
| 1181 | uint32_t | ||
| 1182 | i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj); | ||
| 1183 | |||
| 1180 | /* i915_gem_gtt.c */ | 1184 | /* i915_gem_gtt.c */ |
| 1181 | void i915_gem_restore_gtt_mappings(struct drm_device *dev); | 1185 | void i915_gem_restore_gtt_mappings(struct drm_device *dev); |
| 1182 | int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); | 1186 | int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj); |
| @@ -1353,22 +1357,32 @@ __i915_write(64, q) | |||
| 1353 | * must be set to prevent GT core from power down and stale values being | 1357 | * must be set to prevent GT core from power down and stale values being |
| 1354 | * returned. | 1358 | * returned. |
| 1355 | */ | 1359 | */ |
| 1356 | void __gen6_force_wake_get(struct drm_i915_private *dev_priv); | 1360 | void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); |
| 1357 | void __gen6_force_wake_put (struct drm_i915_private *dev_priv); | 1361 | void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); |
| 1358 | static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg) | 1362 | void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); |
| 1363 | |||
| 1364 | static inline u32 i915_gt_read(struct drm_i915_private *dev_priv, u32 reg) | ||
| 1359 | { | 1365 | { |
| 1360 | u32 val; | 1366 | u32 val; |
| 1361 | 1367 | ||
| 1362 | if (dev_priv->info->gen >= 6) { | 1368 | if (dev_priv->info->gen >= 6) { |
| 1363 | __gen6_force_wake_get(dev_priv); | 1369 | __gen6_gt_force_wake_get(dev_priv); |
| 1364 | val = I915_READ(reg); | 1370 | val = I915_READ(reg); |
| 1365 | __gen6_force_wake_put(dev_priv); | 1371 | __gen6_gt_force_wake_put(dev_priv); |
| 1366 | } else | 1372 | } else |
| 1367 | val = I915_READ(reg); | 1373 | val = I915_READ(reg); |
| 1368 | 1374 | ||
| 1369 | return val; | 1375 | return val; |
| 1370 | } | 1376 | } |
| 1371 | 1377 | ||
| 1378 | static inline void i915_gt_write(struct drm_i915_private *dev_priv, | ||
| 1379 | u32 reg, u32 val) | ||
| 1380 | { | ||
| 1381 | if (dev_priv->info->gen >= 6) | ||
| 1382 | __gen6_gt_wait_for_fifo(dev_priv); | ||
| 1383 | I915_WRITE(reg, val); | ||
| 1384 | } | ||
| 1385 | |||
| 1372 | static inline void | 1386 | static inline void |
| 1373 | i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len) | 1387 | i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len) |
| 1374 | { | 1388 | { |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index cf4f74c7c6fb..36e66cc5225e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -1398,7 +1398,7 @@ i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj) | |||
| 1398 | * Return the required GTT alignment for an object, only taking into account | 1398 | * Return the required GTT alignment for an object, only taking into account |
| 1399 | * unfenced tiled surface requirements. | 1399 | * unfenced tiled surface requirements. |
| 1400 | */ | 1400 | */ |
| 1401 | static uint32_t | 1401 | uint32_t |
| 1402 | i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj) | 1402 | i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj) |
| 1403 | { | 1403 | { |
| 1404 | struct drm_device *dev = obj->base.dev; | 1404 | struct drm_device *dev = obj->base.dev; |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index d2f445e825f2..50ab1614571c 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
| @@ -772,8 +772,8 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj, | |||
| 772 | if (from == NULL || to == from) | 772 | if (from == NULL || to == from) |
| 773 | return 0; | 773 | return 0; |
| 774 | 774 | ||
| 775 | /* XXX gpu semaphores are currently causing hard hangs on SNB mobile */ | 775 | /* XXX gpu semaphores are implicated in various hard hangs on SNB */ |
| 776 | if (INTEL_INFO(obj->base.dev)->gen < 6 || IS_MOBILE(obj->base.dev)) | 776 | if (INTEL_INFO(obj->base.dev)->gen < 6 || !i915_semaphores) |
| 777 | return i915_gem_object_wait_rendering(obj, true); | 777 | return i915_gem_object_wait_rendering(obj, true); |
| 778 | 778 | ||
| 779 | idx = intel_ring_sync_index(from, to); | 779 | idx = intel_ring_sync_index(from, to); |
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 79a04fde69b5..d64843e18df2 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
| @@ -184,7 +184,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | |||
| 184 | static bool | 184 | static bool |
| 185 | i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) | 185 | i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) |
| 186 | { | 186 | { |
| 187 | int tile_width, tile_height; | 187 | int tile_width; |
| 188 | 188 | ||
| 189 | /* Linear is always fine */ | 189 | /* Linear is always fine */ |
| 190 | if (tiling_mode == I915_TILING_NONE) | 190 | if (tiling_mode == I915_TILING_NONE) |
| @@ -215,20 +215,6 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode) | |||
| 215 | } | 215 | } |
| 216 | } | 216 | } |
| 217 | 217 | ||
| 218 | if (IS_GEN2(dev) || | ||
| 219 | (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))) | ||
| 220 | tile_height = 32; | ||
| 221 | else | ||
| 222 | tile_height = 8; | ||
| 223 | /* i8xx is strange: It has 2 interleaved rows of tiles, so needs an even | ||
| 224 | * number of tile rows. */ | ||
| 225 | if (IS_GEN2(dev)) | ||
| 226 | tile_height *= 2; | ||
| 227 | |||
| 228 | /* Size needs to be aligned to a full tile row */ | ||
| 229 | if (size & (tile_height * stride - 1)) | ||
| 230 | return false; | ||
| 231 | |||
| 232 | /* 965+ just needs multiples of tile width */ | 218 | /* 965+ just needs multiples of tile width */ |
| 233 | if (INTEL_INFO(dev)->gen >= 4) { | 219 | if (INTEL_INFO(dev)->gen >= 4) { |
| 234 | if (stride & (tile_width - 1)) | 220 | if (stride & (tile_width - 1)) |
| @@ -363,14 +349,27 @@ i915_gem_set_tiling(struct drm_device *dev, void *data, | |||
| 363 | (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && | 349 | (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end && |
| 364 | i915_gem_object_fence_ok(obj, args->tiling_mode)); | 350 | i915_gem_object_fence_ok(obj, args->tiling_mode)); |
| 365 | 351 | ||
| 366 | obj->tiling_changed = true; | 352 | /* Rebind if we need a change of alignment */ |
| 367 | obj->tiling_mode = args->tiling_mode; | 353 | if (!obj->map_and_fenceable) { |
| 368 | obj->stride = args->stride; | 354 | u32 unfenced_alignment = |
| 355 | i915_gem_get_unfenced_gtt_alignment(obj); | ||
| 356 | if (obj->gtt_offset & (unfenced_alignment - 1)) | ||
| 357 | ret = i915_gem_object_unbind(obj); | ||
| 358 | } | ||
| 359 | |||
| 360 | if (ret == 0) { | ||
| 361 | obj->tiling_changed = true; | ||
| 362 | obj->tiling_mode = args->tiling_mode; | ||
| 363 | obj->stride = args->stride; | ||
| 364 | } | ||
| 369 | } | 365 | } |
| 366 | /* we have to maintain this existing ABI... */ | ||
| 367 | args->stride = obj->stride; | ||
| 368 | args->tiling_mode = obj->tiling_mode; | ||
| 370 | drm_gem_object_unreference(&obj->base); | 369 | drm_gem_object_unreference(&obj->base); |
| 371 | mutex_unlock(&dev->struct_mutex); | 370 | mutex_unlock(&dev->struct_mutex); |
| 372 | 371 | ||
| 373 | return 0; | 372 | return ret; |
| 374 | } | 373 | } |
| 375 | 374 | ||
| 376 | /** | 375 | /** |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 729d4233b763..3e6f486f4605 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
| @@ -3261,6 +3261,8 @@ | |||
| 3261 | #define FORCEWAKE 0xA18C | 3261 | #define FORCEWAKE 0xA18C |
| 3262 | #define FORCEWAKE_ACK 0x130090 | 3262 | #define FORCEWAKE_ACK 0x130090 |
| 3263 | 3263 | ||
| 3264 | #define GT_FIFO_FREE_ENTRIES 0x120008 | ||
| 3265 | |||
| 3264 | #define GEN6_RPNSWREQ 0xA008 | 3266 | #define GEN6_RPNSWREQ 0xA008 |
| 3265 | #define GEN6_TURBO_DISABLE (1<<31) | 3267 | #define GEN6_TURBO_DISABLE (1<<31) |
| 3266 | #define GEN6_FREQUENCY(x) ((x)<<25) | 3268 | #define GEN6_FREQUENCY(x) ((x)<<25) |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index e79b25bbee6c..49fb54fd9a18 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -1219,7 +1219,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev) | |||
| 1219 | u32 blt_ecoskpd; | 1219 | u32 blt_ecoskpd; |
| 1220 | 1220 | ||
| 1221 | /* Make sure blitter notifies FBC of writes */ | 1221 | /* Make sure blitter notifies FBC of writes */ |
| 1222 | __gen6_force_wake_get(dev_priv); | 1222 | __gen6_gt_force_wake_get(dev_priv); |
| 1223 | blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); | 1223 | blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); |
| 1224 | blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << | 1224 | blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << |
| 1225 | GEN6_BLITTER_LOCK_SHIFT; | 1225 | GEN6_BLITTER_LOCK_SHIFT; |
| @@ -1230,7 +1230,7 @@ static void sandybridge_blit_fbc_update(struct drm_device *dev) | |||
| 1230 | GEN6_BLITTER_LOCK_SHIFT); | 1230 | GEN6_BLITTER_LOCK_SHIFT); |
| 1231 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); | 1231 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); |
| 1232 | POSTING_READ(GEN6_BLITTER_ECOSKPD); | 1232 | POSTING_READ(GEN6_BLITTER_ECOSKPD); |
| 1233 | __gen6_force_wake_put(dev_priv); | 1233 | __gen6_gt_force_wake_put(dev_priv); |
| 1234 | } | 1234 | } |
| 1235 | 1235 | ||
| 1236 | static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) | 1236 | static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) |
| @@ -6282,7 +6282,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) | |||
| 6282 | * userspace... | 6282 | * userspace... |
| 6283 | */ | 6283 | */ |
| 6284 | I915_WRITE(GEN6_RC_STATE, 0); | 6284 | I915_WRITE(GEN6_RC_STATE, 0); |
| 6285 | __gen6_force_wake_get(dev_priv); | 6285 | __gen6_gt_force_wake_get(dev_priv); |
| 6286 | 6286 | ||
| 6287 | /* disable the counters and set deterministic thresholds */ | 6287 | /* disable the counters and set deterministic thresholds */ |
| 6288 | I915_WRITE(GEN6_RC_CONTROL, 0); | 6288 | I915_WRITE(GEN6_RC_CONTROL, 0); |
| @@ -6380,7 +6380,7 @@ void gen6_enable_rps(struct drm_i915_private *dev_priv) | |||
| 6380 | /* enable all PM interrupts */ | 6380 | /* enable all PM interrupts */ |
| 6381 | I915_WRITE(GEN6_PMINTRMSK, 0); | 6381 | I915_WRITE(GEN6_PMINTRMSK, 0); |
| 6382 | 6382 | ||
| 6383 | __gen6_force_wake_put(dev_priv); | 6383 | __gen6_gt_force_wake_put(dev_priv); |
| 6384 | } | 6384 | } |
| 6385 | 6385 | ||
| 6386 | void intel_enable_clock_gating(struct drm_device *dev) | 6386 | void intel_enable_clock_gating(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 6d6fde85a636..34306865a5df 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
| @@ -14,22 +14,23 @@ struct intel_hw_status_page { | |||
| 14 | struct drm_i915_gem_object *obj; | 14 | struct drm_i915_gem_object *obj; |
| 15 | }; | 15 | }; |
| 16 | 16 | ||
| 17 | #define I915_RING_READ(reg) i915_safe_read(dev_priv, reg) | 17 | #define I915_RING_READ(reg) i915_gt_read(dev_priv, reg) |
| 18 | #define I915_RING_WRITE(reg, val) i915_gt_write(dev_priv, reg, val) | ||
| 18 | 19 | ||
| 19 | #define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base)) | 20 | #define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL((ring)->mmio_base)) |
| 20 | #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val) | 21 | #define I915_WRITE_TAIL(ring, val) I915_RING_WRITE(RING_TAIL((ring)->mmio_base), val) |
| 21 | 22 | ||
| 22 | #define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base)) | 23 | #define I915_READ_START(ring) I915_RING_READ(RING_START((ring)->mmio_base)) |
| 23 | #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val) | 24 | #define I915_WRITE_START(ring, val) I915_RING_WRITE(RING_START((ring)->mmio_base), val) |
| 24 | 25 | ||
| 25 | #define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base)) | 26 | #define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD((ring)->mmio_base)) |
| 26 | #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val) | 27 | #define I915_WRITE_HEAD(ring, val) I915_RING_WRITE(RING_HEAD((ring)->mmio_base), val) |
| 27 | 28 | ||
| 28 | #define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base)) | 29 | #define I915_READ_CTL(ring) I915_RING_READ(RING_CTL((ring)->mmio_base)) |
| 29 | #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val) | 30 | #define I915_WRITE_CTL(ring, val) I915_RING_WRITE(RING_CTL((ring)->mmio_base), val) |
| 30 | 31 | ||
| 31 | #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val) | ||
| 32 | #define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base)) | 32 | #define I915_READ_IMR(ring) I915_RING_READ(RING_IMR((ring)->mmio_base)) |
| 33 | #define I915_WRITE_IMR(ring, val) I915_RING_WRITE(RING_IMR((ring)->mmio_base), val) | ||
| 33 | 34 | ||
| 34 | #define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base)) | 35 | #define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID((ring)->mmio_base)) |
| 35 | #define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base)) | 36 | #define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0((ring)->mmio_base)) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c index 65699bfaaaea..b368ed74aad7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dma.c +++ b/drivers/gpu/drm/nouveau/nouveau_dma.c | |||
| @@ -83,7 +83,8 @@ nouveau_dma_init(struct nouveau_channel *chan) | |||
| 83 | return ret; | 83 | return ret; |
| 84 | 84 | ||
| 85 | /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */ | 85 | /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */ |
| 86 | ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy); | 86 | ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfd0, 0x1000, |
| 87 | &chan->m2mf_ntfy); | ||
| 87 | if (ret) | 88 | if (ret) |
| 88 | return ret; | 89 | return ret; |
| 89 | 90 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h index 9821fcacc3d2..982d70b12722 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drv.h +++ b/drivers/gpu/drm/nouveau/nouveau_drv.h | |||
| @@ -852,7 +852,8 @@ extern const struct ttm_mem_type_manager_func nouveau_vram_manager; | |||
| 852 | extern int nouveau_notifier_init_channel(struct nouveau_channel *); | 852 | extern int nouveau_notifier_init_channel(struct nouveau_channel *); |
| 853 | extern void nouveau_notifier_takedown_channel(struct nouveau_channel *); | 853 | extern void nouveau_notifier_takedown_channel(struct nouveau_channel *); |
| 854 | extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle, | 854 | extern int nouveau_notifier_alloc(struct nouveau_channel *, uint32_t handle, |
| 855 | int cout, uint32_t *offset); | 855 | int cout, uint32_t start, uint32_t end, |
| 856 | uint32_t *offset); | ||
| 856 | extern int nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *); | 857 | extern int nouveau_notifier_offset(struct nouveau_gpuobj *, uint32_t *); |
| 857 | extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data, | 858 | extern int nouveau_ioctl_notifier_alloc(struct drm_device *, void *data, |
| 858 | struct drm_file *); | 859 | struct drm_file *); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 26347b7cd872..b0fb9bdcddb7 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c | |||
| @@ -725,8 +725,10 @@ nouveau_vram_manager_new(struct ttm_mem_type_manager *man, | |||
| 725 | ret = vram->get(dev, mem->num_pages << PAGE_SHIFT, | 725 | ret = vram->get(dev, mem->num_pages << PAGE_SHIFT, |
| 726 | mem->page_alignment << PAGE_SHIFT, size_nc, | 726 | mem->page_alignment << PAGE_SHIFT, size_nc, |
| 727 | (nvbo->tile_flags >> 8) & 0xff, &node); | 727 | (nvbo->tile_flags >> 8) & 0xff, &node); |
| 728 | if (ret) | 728 | if (ret) { |
| 729 | return ret; | 729 | mem->mm_node = NULL; |
| 730 | return (ret == -ENOSPC) ? 0 : ret; | ||
| 731 | } | ||
| 730 | 732 | ||
| 731 | node->page_shift = 12; | 733 | node->page_shift = 12; |
| 732 | if (nvbo->vma.node) | 734 | if (nvbo->vma.node) |
diff --git a/drivers/gpu/drm/nouveau/nouveau_mm.c b/drivers/gpu/drm/nouveau/nouveau_mm.c index 8844b50c3e54..7609756b6faf 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mm.c +++ b/drivers/gpu/drm/nouveau/nouveau_mm.c | |||
| @@ -123,7 +123,7 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc, | |||
| 123 | return 0; | 123 | return 0; |
| 124 | } | 124 | } |
| 125 | 125 | ||
| 126 | return -ENOMEM; | 126 | return -ENOSPC; |
| 127 | } | 127 | } |
| 128 | 128 | ||
| 129 | int | 129 | int |
diff --git a/drivers/gpu/drm/nouveau/nouveau_notifier.c b/drivers/gpu/drm/nouveau/nouveau_notifier.c index fe29d604b820..5ea167623a82 100644 --- a/drivers/gpu/drm/nouveau/nouveau_notifier.c +++ b/drivers/gpu/drm/nouveau/nouveau_notifier.c | |||
| @@ -96,7 +96,8 @@ nouveau_notifier_gpuobj_dtor(struct drm_device *dev, | |||
| 96 | 96 | ||
| 97 | int | 97 | int |
| 98 | nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, | 98 | nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, |
| 99 | int size, uint32_t *b_offset) | 99 | int size, uint32_t start, uint32_t end, |
| 100 | uint32_t *b_offset) | ||
| 100 | { | 101 | { |
| 101 | struct drm_device *dev = chan->dev; | 102 | struct drm_device *dev = chan->dev; |
| 102 | struct nouveau_gpuobj *nobj = NULL; | 103 | struct nouveau_gpuobj *nobj = NULL; |
| @@ -104,9 +105,10 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, | |||
| 104 | uint32_t offset; | 105 | uint32_t offset; |
| 105 | int target, ret; | 106 | int target, ret; |
| 106 | 107 | ||
| 107 | mem = drm_mm_search_free(&chan->notifier_heap, size, 0, 0); | 108 | mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0, |
| 109 | start, end, 0); | ||
| 108 | if (mem) | 110 | if (mem) |
| 109 | mem = drm_mm_get_block(mem, size, 0); | 111 | mem = drm_mm_get_block_range(mem, size, 0, start, end); |
| 110 | if (!mem) { | 112 | if (!mem) { |
| 111 | NV_ERROR(dev, "Channel %d notifier block full\n", chan->id); | 113 | NV_ERROR(dev, "Channel %d notifier block full\n", chan->id); |
| 112 | return -ENOMEM; | 114 | return -ENOMEM; |
| @@ -177,7 +179,8 @@ nouveau_ioctl_notifier_alloc(struct drm_device *dev, void *data, | |||
| 177 | if (IS_ERR(chan)) | 179 | if (IS_ERR(chan)) |
| 178 | return PTR_ERR(chan); | 180 | return PTR_ERR(chan); |
| 179 | 181 | ||
| 180 | ret = nouveau_notifier_alloc(chan, na->handle, na->size, &na->offset); | 182 | ret = nouveau_notifier_alloc(chan, na->handle, na->size, 0, 0x1000, |
| 183 | &na->offset); | ||
| 181 | nouveau_channel_put(&chan); | 184 | nouveau_channel_put(&chan); |
| 182 | return ret; | 185 | return ret; |
| 183 | } | 186 | } |
diff --git a/drivers/gpu/drm/nouveau/nv50_instmem.c b/drivers/gpu/drm/nouveau/nv50_instmem.c index ea0041810ae3..e57caa2a00e3 100644 --- a/drivers/gpu/drm/nouveau/nv50_instmem.c +++ b/drivers/gpu/drm/nouveau/nv50_instmem.c | |||
| @@ -403,16 +403,24 @@ nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj) | |||
| 403 | void | 403 | void |
| 404 | nv50_instmem_flush(struct drm_device *dev) | 404 | nv50_instmem_flush(struct drm_device *dev) |
| 405 | { | 405 | { |
| 406 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 407 | |||
| 408 | spin_lock(&dev_priv->ramin_lock); | ||
| 406 | nv_wr32(dev, 0x00330c, 0x00000001); | 409 | nv_wr32(dev, 0x00330c, 0x00000001); |
| 407 | if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000)) | 410 | if (!nv_wait(dev, 0x00330c, 0x00000002, 0x00000000)) |
| 408 | NV_ERROR(dev, "PRAMIN flush timeout\n"); | 411 | NV_ERROR(dev, "PRAMIN flush timeout\n"); |
| 412 | spin_unlock(&dev_priv->ramin_lock); | ||
| 409 | } | 413 | } |
| 410 | 414 | ||
| 411 | void | 415 | void |
| 412 | nv84_instmem_flush(struct drm_device *dev) | 416 | nv84_instmem_flush(struct drm_device *dev) |
| 413 | { | 417 | { |
| 418 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 419 | |||
| 420 | spin_lock(&dev_priv->ramin_lock); | ||
| 414 | nv_wr32(dev, 0x070000, 0x00000001); | 421 | nv_wr32(dev, 0x070000, 0x00000001); |
| 415 | if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000)) | 422 | if (!nv_wait(dev, 0x070000, 0x00000002, 0x00000000)) |
| 416 | NV_ERROR(dev, "PRAMIN flush timeout\n"); | 423 | NV_ERROR(dev, "PRAMIN flush timeout\n"); |
| 424 | spin_unlock(&dev_priv->ramin_lock); | ||
| 417 | } | 425 | } |
| 418 | 426 | ||
diff --git a/drivers/gpu/drm/nouveau/nv50_vm.c b/drivers/gpu/drm/nouveau/nv50_vm.c index 459ff08241e5..6144156f255a 100644 --- a/drivers/gpu/drm/nouveau/nv50_vm.c +++ b/drivers/gpu/drm/nouveau/nv50_vm.c | |||
| @@ -169,7 +169,11 @@ nv50_vm_flush(struct nouveau_vm *vm) | |||
| 169 | void | 169 | void |
| 170 | nv50_vm_flush_engine(struct drm_device *dev, int engine) | 170 | nv50_vm_flush_engine(struct drm_device *dev, int engine) |
| 171 | { | 171 | { |
| 172 | struct drm_nouveau_private *dev_priv = dev->dev_private; | ||
| 173 | |||
| 174 | spin_lock(&dev_priv->ramin_lock); | ||
| 172 | nv_wr32(dev, 0x100c80, (engine << 16) | 1); | 175 | nv_wr32(dev, 0x100c80, (engine << 16) | 1); |
| 173 | if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) | 176 | if (!nv_wait(dev, 0x100c80, 0x00000001, 0x00000000)) |
| 174 | NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); | 177 | NV_ERROR(dev, "vm flush timeout: engine %d\n", engine); |
| 178 | spin_unlock(&dev_priv->ramin_lock); | ||
| 175 | } | 179 | } |
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 1fa091e05690..4a5c4a44ffb1 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c | |||
| @@ -62,6 +62,7 @@ | |||
| 62 | #include <linux/notifier.h> | 62 | #include <linux/notifier.h> |
| 63 | #include <linux/cpu.h> | 63 | #include <linux/cpu.h> |
| 64 | #include <asm/mwait.h> | 64 | #include <asm/mwait.h> |
| 65 | #include <asm/msr.h> | ||
| 65 | 66 | ||
| 66 | #define INTEL_IDLE_VERSION "0.4" | 67 | #define INTEL_IDLE_VERSION "0.4" |
| 67 | #define PREFIX "intel_idle: " | 68 | #define PREFIX "intel_idle: " |
| @@ -85,6 +86,12 @@ static int intel_idle(struct cpuidle_device *dev, struct cpuidle_state *state); | |||
| 85 | static struct cpuidle_state *cpuidle_state_table; | 86 | static struct cpuidle_state *cpuidle_state_table; |
| 86 | 87 | ||
| 87 | /* | 88 | /* |
| 89 | * Hardware C-state auto-demotion may not always be optimal. | ||
| 90 | * Indicate which enable bits to clear here. | ||
| 91 | */ | ||
| 92 | static unsigned long long auto_demotion_disable_flags; | ||
| 93 | |||
| 94 | /* | ||
| 88 | * Set this flag for states where the HW flushes the TLB for us | 95 | * Set this flag for states where the HW flushes the TLB for us |
| 89 | * and so we don't need cross-calls to keep it consistent. | 96 | * and so we don't need cross-calls to keep it consistent. |
| 90 | * If this flag is set, SW flushes the TLB, so even if the | 97 | * If this flag is set, SW flushes the TLB, so even if the |
| @@ -281,6 +288,15 @@ static struct notifier_block setup_broadcast_notifier = { | |||
| 281 | .notifier_call = setup_broadcast_cpuhp_notify, | 288 | .notifier_call = setup_broadcast_cpuhp_notify, |
| 282 | }; | 289 | }; |
| 283 | 290 | ||
| 291 | static void auto_demotion_disable(void *dummy) | ||
| 292 | { | ||
| 293 | unsigned long long msr_bits; | ||
| 294 | |||
| 295 | rdmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); | ||
| 296 | msr_bits &= ~auto_demotion_disable_flags; | ||
| 297 | wrmsrl(MSR_NHM_SNB_PKG_CST_CFG_CTL, msr_bits); | ||
| 298 | } | ||
| 299 | |||
| 284 | /* | 300 | /* |
| 285 | * intel_idle_probe() | 301 | * intel_idle_probe() |
| 286 | */ | 302 | */ |
| @@ -324,11 +340,17 @@ static int intel_idle_probe(void) | |||
| 324 | case 0x25: /* Westmere */ | 340 | case 0x25: /* Westmere */ |
| 325 | case 0x2C: /* Westmere */ | 341 | case 0x2C: /* Westmere */ |
| 326 | cpuidle_state_table = nehalem_cstates; | 342 | cpuidle_state_table = nehalem_cstates; |
| 343 | auto_demotion_disable_flags = | ||
| 344 | (NHM_C1_AUTO_DEMOTE | NHM_C3_AUTO_DEMOTE); | ||
| 327 | break; | 345 | break; |
| 328 | 346 | ||
| 329 | case 0x1C: /* 28 - Atom Processor */ | 347 | case 0x1C: /* 28 - Atom Processor */ |
| 348 | cpuidle_state_table = atom_cstates; | ||
| 349 | break; | ||
| 350 | |||
| 330 | case 0x26: /* 38 - Lincroft Atom Processor */ | 351 | case 0x26: /* 38 - Lincroft Atom Processor */ |
| 331 | cpuidle_state_table = atom_cstates; | 352 | cpuidle_state_table = atom_cstates; |
| 353 | auto_demotion_disable_flags = ATM_LNC_C6_AUTO_DEMOTE; | ||
| 332 | break; | 354 | break; |
| 333 | 355 | ||
| 334 | case 0x2A: /* SNB */ | 356 | case 0x2A: /* SNB */ |
| @@ -436,6 +458,8 @@ static int intel_idle_cpuidle_devices_init(void) | |||
| 436 | return -EIO; | 458 | return -EIO; |
| 437 | } | 459 | } |
| 438 | } | 460 | } |
| 461 | if (auto_demotion_disable_flags) | ||
| 462 | smp_call_function(auto_demotion_disable, NULL, 1); | ||
| 439 | 463 | ||
| 440 | return 0; | 464 | return 0; |
| 441 | } | 465 | } |
diff --git a/drivers/isdn/hardware/eicon/istream.c b/drivers/isdn/hardware/eicon/istream.c index 18f8798442fa..7bd5baa547be 100644 --- a/drivers/isdn/hardware/eicon/istream.c +++ b/drivers/isdn/hardware/eicon/istream.c | |||
| @@ -62,7 +62,7 @@ void diva_xdi_provide_istream_info (ADAPTER* a, | |||
| 62 | stream interface. | 62 | stream interface. |
| 63 | If synchronous service was requested, then function | 63 | If synchronous service was requested, then function |
| 64 | does return amount of data written to stream. | 64 | does return amount of data written to stream. |
| 65 | 'final' does indicate that pice of data to be written is | 65 | 'final' does indicate that piece of data to be written is |
| 66 | final part of frame (necessary only by structured datatransfer) | 66 | final part of frame (necessary only by structured datatransfer) |
| 67 | return 0 if zero lengh packet was written | 67 | return 0 if zero lengh packet was written |
| 68 | return -1 if stream is full | 68 | return -1 if stream is full |
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c index 6a1f94042612..c45e6305b26f 100644 --- a/drivers/mfd/asic3.c +++ b/drivers/mfd/asic3.c | |||
| @@ -143,9 +143,9 @@ static void asic3_irq_demux(unsigned int irq, struct irq_desc *desc) | |||
| 143 | unsigned long flags; | 143 | unsigned long flags; |
| 144 | struct asic3 *asic; | 144 | struct asic3 *asic; |
| 145 | 145 | ||
| 146 | desc->chip->ack(irq); | 146 | desc->irq_data.chip->irq_ack(&desc->irq_data); |
| 147 | 147 | ||
| 148 | asic = desc->handler_data; | 148 | asic = get_irq_data(irq); |
| 149 | 149 | ||
| 150 | for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) { | 150 | for (iter = 0 ; iter < MAX_ASIC_ISR_LOOPS; iter++) { |
| 151 | u32 status; | 151 | u32 status; |
diff --git a/drivers/mfd/davinci_voicecodec.c b/drivers/mfd/davinci_voicecodec.c index 33c923d215c7..fdd8a1b8bc67 100644 --- a/drivers/mfd/davinci_voicecodec.c +++ b/drivers/mfd/davinci_voicecodec.c | |||
| @@ -118,12 +118,12 @@ static int __init davinci_vc_probe(struct platform_device *pdev) | |||
| 118 | 118 | ||
| 119 | /* Voice codec interface client */ | 119 | /* Voice codec interface client */ |
| 120 | cell = &davinci_vc->cells[DAVINCI_VC_VCIF_CELL]; | 120 | cell = &davinci_vc->cells[DAVINCI_VC_VCIF_CELL]; |
| 121 | cell->name = "davinci_vcif"; | 121 | cell->name = "davinci-vcif"; |
| 122 | cell->driver_data = davinci_vc; | 122 | cell->driver_data = davinci_vc; |
| 123 | 123 | ||
| 124 | /* Voice codec CQ93VC client */ | 124 | /* Voice codec CQ93VC client */ |
| 125 | cell = &davinci_vc->cells[DAVINCI_VC_CQ93VC_CELL]; | 125 | cell = &davinci_vc->cells[DAVINCI_VC_CQ93VC_CELL]; |
| 126 | cell->name = "cq93vc"; | 126 | cell->name = "cq93vc-codec"; |
| 127 | cell->driver_data = davinci_vc; | 127 | cell->driver_data = davinci_vc; |
| 128 | 128 | ||
| 129 | ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells, | 129 | ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells, |
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c index 627cf577b16d..e9018d1394ee 100644 --- a/drivers/mfd/tps6586x.c +++ b/drivers/mfd/tps6586x.c | |||
| @@ -150,12 +150,12 @@ static inline int __tps6586x_write(struct i2c_client *client, | |||
| 150 | static inline int __tps6586x_writes(struct i2c_client *client, int reg, | 150 | static inline int __tps6586x_writes(struct i2c_client *client, int reg, |
| 151 | int len, uint8_t *val) | 151 | int len, uint8_t *val) |
| 152 | { | 152 | { |
| 153 | int ret; | 153 | int ret, i; |
| 154 | 154 | ||
| 155 | ret = i2c_smbus_write_i2c_block_data(client, reg, len, val); | 155 | for (i = 0; i < len; i++) { |
| 156 | if (ret < 0) { | 156 | ret = __tps6586x_write(client, reg + i, *(val + i)); |
| 157 | dev_err(&client->dev, "failed writings to 0x%02x\n", reg); | 157 | if (ret < 0) |
| 158 | return ret; | 158 | return ret; |
| 159 | } | 159 | } |
| 160 | 160 | ||
| 161 | return 0; | 161 | return 0; |
diff --git a/drivers/mfd/ucb1x00-ts.c b/drivers/mfd/ucb1x00-ts.c index 000cb414a78a..92b85e28a15e 100644 --- a/drivers/mfd/ucb1x00-ts.c +++ b/drivers/mfd/ucb1x00-ts.c | |||
| @@ -385,12 +385,18 @@ static int ucb1x00_ts_add(struct ucb1x00_dev *dev) | |||
| 385 | idev->close = ucb1x00_ts_close; | 385 | idev->close = ucb1x00_ts_close; |
| 386 | 386 | ||
| 387 | __set_bit(EV_ABS, idev->evbit); | 387 | __set_bit(EV_ABS, idev->evbit); |
| 388 | __set_bit(ABS_X, idev->absbit); | ||
| 389 | __set_bit(ABS_Y, idev->absbit); | ||
| 390 | __set_bit(ABS_PRESSURE, idev->absbit); | ||
| 391 | 388 | ||
| 392 | input_set_drvdata(idev, ts); | 389 | input_set_drvdata(idev, ts); |
| 393 | 390 | ||
| 391 | ucb1x00_adc_enable(ts->ucb); | ||
| 392 | ts->x_res = ucb1x00_ts_read_xres(ts); | ||
| 393 | ts->y_res = ucb1x00_ts_read_yres(ts); | ||
| 394 | ucb1x00_adc_disable(ts->ucb); | ||
| 395 | |||
| 396 | input_set_abs_params(idev, ABS_X, 0, ts->x_res, 0, 0); | ||
| 397 | input_set_abs_params(idev, ABS_Y, 0, ts->y_res, 0, 0); | ||
| 398 | input_set_abs_params(idev, ABS_PRESSURE, 0, 0, 0, 0); | ||
| 399 | |||
| 394 | err = input_register_device(idev); | 400 | err = input_register_device(idev); |
| 395 | if (err) | 401 | if (err) |
| 396 | goto fail; | 402 | goto fail; |
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c index 41233c7fa581..f4016a075fd6 100644 --- a/drivers/mfd/wm8994-core.c +++ b/drivers/mfd/wm8994-core.c | |||
| @@ -246,6 +246,16 @@ static int wm8994_suspend(struct device *dev) | |||
| 246 | struct wm8994 *wm8994 = dev_get_drvdata(dev); | 246 | struct wm8994 *wm8994 = dev_get_drvdata(dev); |
| 247 | int ret; | 247 | int ret; |
| 248 | 248 | ||
| 249 | /* Don't actually go through with the suspend if the CODEC is | ||
| 250 | * still active (eg, for audio passthrough from CP. */ | ||
| 251 | ret = wm8994_reg_read(wm8994, WM8994_POWER_MANAGEMENT_1); | ||
| 252 | if (ret < 0) { | ||
| 253 | dev_err(dev, "Failed to read power status: %d\n", ret); | ||
| 254 | } else if (ret & WM8994_VMID_SEL_MASK) { | ||
| 255 | dev_dbg(dev, "CODEC still active, ignoring suspend\n"); | ||
| 256 | return 0; | ||
| 257 | } | ||
| 258 | |||
| 249 | /* GPIO configuration state is saved here since we may be configuring | 259 | /* GPIO configuration state is saved here since we may be configuring |
| 250 | * the GPIO alternate functions even if we're not using the gpiolib | 260 | * the GPIO alternate functions even if we're not using the gpiolib |
| 251 | * driver for them. | 261 | * driver for them. |
| @@ -261,6 +271,8 @@ static int wm8994_suspend(struct device *dev) | |||
| 261 | if (ret < 0) | 271 | if (ret < 0) |
| 262 | dev_err(dev, "Failed to save LDO registers: %d\n", ret); | 272 | dev_err(dev, "Failed to save LDO registers: %d\n", ret); |
| 263 | 273 | ||
| 274 | wm8994->suspended = true; | ||
| 275 | |||
| 264 | ret = regulator_bulk_disable(wm8994->num_supplies, | 276 | ret = regulator_bulk_disable(wm8994->num_supplies, |
| 265 | wm8994->supplies); | 277 | wm8994->supplies); |
| 266 | if (ret != 0) { | 278 | if (ret != 0) { |
| @@ -276,6 +288,10 @@ static int wm8994_resume(struct device *dev) | |||
| 276 | struct wm8994 *wm8994 = dev_get_drvdata(dev); | 288 | struct wm8994 *wm8994 = dev_get_drvdata(dev); |
| 277 | int ret; | 289 | int ret; |
| 278 | 290 | ||
| 291 | /* We may have lied to the PM core about suspending */ | ||
| 292 | if (!wm8994->suspended) | ||
| 293 | return 0; | ||
| 294 | |||
| 279 | ret = regulator_bulk_enable(wm8994->num_supplies, | 295 | ret = regulator_bulk_enable(wm8994->num_supplies, |
| 280 | wm8994->supplies); | 296 | wm8994->supplies); |
| 281 | if (ret != 0) { | 297 | if (ret != 0) { |
| @@ -298,6 +314,8 @@ static int wm8994_resume(struct device *dev) | |||
| 298 | if (ret < 0) | 314 | if (ret < 0) |
| 299 | dev_err(dev, "Failed to restore GPIO registers: %d\n", ret); | 315 | dev_err(dev, "Failed to restore GPIO registers: %d\n", ret); |
| 300 | 316 | ||
| 317 | wm8994->suspended = false; | ||
| 318 | |||
| 301 | return 0; | 319 | return 0; |
| 302 | } | 320 | } |
| 303 | #endif | 321 | #endif |
diff --git a/drivers/misc/bmp085.c b/drivers/misc/bmp085.c index 63ee4c1a5315..b6e1c9a6679e 100644 --- a/drivers/misc/bmp085.c +++ b/drivers/misc/bmp085.c | |||
| @@ -449,6 +449,7 @@ static const struct i2c_device_id bmp085_id[] = { | |||
| 449 | { "bmp085", 0 }, | 449 | { "bmp085", 0 }, |
| 450 | { } | 450 | { } |
| 451 | }; | 451 | }; |
| 452 | MODULE_DEVICE_TABLE(i2c, bmp085_id); | ||
| 452 | 453 | ||
| 453 | static struct i2c_driver bmp085_driver = { | 454 | static struct i2c_driver bmp085_driver = { |
| 454 | .driver = { | 455 | .driver = { |
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index 5c4a54d9b6a4..ebc62ad4cc56 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c | |||
| @@ -792,7 +792,6 @@ int mmc_attach_sdio(struct mmc_host *host) | |||
| 792 | */ | 792 | */ |
| 793 | mmc_release_host(host); | 793 | mmc_release_host(host); |
| 794 | err = mmc_add_card(host->card); | 794 | err = mmc_add_card(host->card); |
| 795 | mmc_claim_host(host); | ||
| 796 | if (err) | 795 | if (err) |
| 797 | goto remove_added; | 796 | goto remove_added; |
| 798 | 797 | ||
| @@ -805,12 +804,12 @@ int mmc_attach_sdio(struct mmc_host *host) | |||
| 805 | goto remove_added; | 804 | goto remove_added; |
| 806 | } | 805 | } |
| 807 | 806 | ||
| 807 | mmc_claim_host(host); | ||
| 808 | return 0; | 808 | return 0; |
| 809 | 809 | ||
| 810 | 810 | ||
| 811 | remove_added: | 811 | remove_added: |
| 812 | /* Remove without lock if the device has been added. */ | 812 | /* Remove without lock if the device has been added. */ |
| 813 | mmc_release_host(host); | ||
| 814 | mmc_sdio_remove(host); | 813 | mmc_sdio_remove(host); |
| 815 | mmc_claim_host(host); | 814 | mmc_claim_host(host); |
| 816 | remove: | 815 | remove: |
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index 653c62475cb6..7897d114b290 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h | |||
| @@ -22,7 +22,7 @@ | |||
| 22 | * (you will need to reboot afterwards) */ | 22 | * (you will need to reboot afterwards) */ |
| 23 | /* #define BNX2X_STOP_ON_ERROR */ | 23 | /* #define BNX2X_STOP_ON_ERROR */ |
| 24 | 24 | ||
| 25 | #define DRV_MODULE_VERSION "1.62.00-5" | 25 | #define DRV_MODULE_VERSION "1.62.00-6" |
| 26 | #define DRV_MODULE_RELDATE "2011/01/30" | 26 | #define DRV_MODULE_RELDATE "2011/01/30" |
| 27 | #define BNX2X_BC_VER 0x040200 | 27 | #define BNX2X_BC_VER 0x040200 |
| 28 | 28 | ||
| @@ -1613,19 +1613,23 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms, | |||
| 1613 | #define BNX2X_BTR 4 | 1613 | #define BNX2X_BTR 4 |
| 1614 | #define MAX_SPQ_PENDING 8 | 1614 | #define MAX_SPQ_PENDING 8 |
| 1615 | 1615 | ||
| 1616 | 1616 | /* CMNG constants, as derived from system spec calculations */ | |
| 1617 | /* CMNG constants | 1617 | /* default MIN rate in case VNIC min rate is configured to zero - 100Mbps */ |
| 1618 | derived from lab experiments, and not from system spec calculations !!! */ | 1618 | #define DEF_MIN_RATE 100 |
| 1619 | #define DEF_MIN_RATE 100 | ||
| 1620 | /* resolution of the rate shaping timer - 100 usec */ | 1619 | /* resolution of the rate shaping timer - 100 usec */ |
| 1621 | #define RS_PERIODIC_TIMEOUT_USEC 100 | 1620 | #define RS_PERIODIC_TIMEOUT_USEC 100 |
| 1622 | /* resolution of fairness algorithm in usecs - | ||
| 1623 | coefficient for calculating the actual t fair */ | ||
| 1624 | #define T_FAIR_COEF 10000000 | ||
| 1625 | /* number of bytes in single QM arbitration cycle - | 1621 | /* number of bytes in single QM arbitration cycle - |
| 1626 | coefficient for calculating the fairness timer */ | 1622 | * coefficient for calculating the fairness timer */ |
| 1627 | #define QM_ARB_BYTES 40000 | 1623 | #define QM_ARB_BYTES 160000 |
| 1628 | #define FAIR_MEM 2 | 1624 | /* resolution of Min algorithm 1:100 */ |
| 1625 | #define MIN_RES 100 | ||
| 1626 | /* how many bytes above threshold for the minimal credit of Min algorithm*/ | ||
| 1627 | #define MIN_ABOVE_THRESH 32768 | ||
| 1628 | /* Fairness algorithm integration time coefficient - | ||
| 1629 | * for calculating the actual Tfair */ | ||
| 1630 | #define T_FAIR_COEF ((MIN_ABOVE_THRESH + QM_ARB_BYTES) * 8 * MIN_RES) | ||
| 1631 | /* Memory of fairness algorithm . 2 cycles */ | ||
| 1632 | #define FAIR_MEM 2 | ||
| 1629 | 1633 | ||
| 1630 | 1634 | ||
| 1631 | #define ATTN_NIG_FOR_FUNC (1L << 8) | 1635 | #define ATTN_NIG_FOR_FUNC (1L << 8) |
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c index 710ce5d04c53..93798129061b 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.c +++ b/drivers/net/bnx2x/bnx2x_cmn.c | |||
| @@ -259,10 +259,44 @@ static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue, | |||
| 259 | #endif | 259 | #endif |
| 260 | } | 260 | } |
| 261 | 261 | ||
| 262 | /* Timestamp option length allowed for TPA aggregation: | ||
| 263 | * | ||
| 264 | * nop nop kind length echo val | ||
| 265 | */ | ||
| 266 | #define TPA_TSTAMP_OPT_LEN 12 | ||
| 267 | /** | ||
| 268 | * Calculate the approximate value of the MSS for this | ||
| 269 | * aggregation using the first packet of it. | ||
| 270 | * | ||
| 271 | * @param bp | ||
| 272 | * @param parsing_flags Parsing flags from the START CQE | ||
| 273 | * @param len_on_bd Total length of the first packet for the | ||
| 274 | * aggregation. | ||
| 275 | */ | ||
| 276 | static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags, | ||
| 277 | u16 len_on_bd) | ||
| 278 | { | ||
| 279 | /* TPA arrgregation won't have an IP options and TCP options | ||
| 280 | * other than timestamp. | ||
| 281 | */ | ||
| 282 | u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr); | ||
| 283 | |||
| 284 | |||
| 285 | /* Check if there was a TCP timestamp, if there is it's will | ||
| 286 | * always be 12 bytes length: nop nop kind length echo val. | ||
| 287 | * | ||
| 288 | * Otherwise FW would close the aggregation. | ||
| 289 | */ | ||
| 290 | if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG) | ||
| 291 | hdrs_len += TPA_TSTAMP_OPT_LEN; | ||
| 292 | |||
| 293 | return len_on_bd - hdrs_len; | ||
| 294 | } | ||
| 295 | |||
| 262 | static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, | 296 | static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, |
| 263 | struct sk_buff *skb, | 297 | struct sk_buff *skb, |
| 264 | struct eth_fast_path_rx_cqe *fp_cqe, | 298 | struct eth_fast_path_rx_cqe *fp_cqe, |
| 265 | u16 cqe_idx) | 299 | u16 cqe_idx, u16 parsing_flags) |
| 266 | { | 300 | { |
| 267 | struct sw_rx_page *rx_pg, old_rx_pg; | 301 | struct sw_rx_page *rx_pg, old_rx_pg; |
| 268 | u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd); | 302 | u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd); |
| @@ -275,8 +309,8 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
| 275 | 309 | ||
| 276 | /* This is needed in order to enable forwarding support */ | 310 | /* This is needed in order to enable forwarding support */ |
| 277 | if (frag_size) | 311 | if (frag_size) |
| 278 | skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE, | 312 | skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags, |
| 279 | max(frag_size, (u32)len_on_bd)); | 313 | len_on_bd); |
| 280 | 314 | ||
| 281 | #ifdef BNX2X_STOP_ON_ERROR | 315 | #ifdef BNX2X_STOP_ON_ERROR |
| 282 | if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { | 316 | if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) { |
| @@ -344,6 +378,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
| 344 | if (likely(new_skb)) { | 378 | if (likely(new_skb)) { |
| 345 | /* fix ip xsum and give it to the stack */ | 379 | /* fix ip xsum and give it to the stack */ |
| 346 | /* (no need to map the new skb) */ | 380 | /* (no need to map the new skb) */ |
| 381 | u16 parsing_flags = | ||
| 382 | le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags); | ||
| 347 | 383 | ||
| 348 | prefetch(skb); | 384 | prefetch(skb); |
| 349 | prefetch(((char *)(skb)) + L1_CACHE_BYTES); | 385 | prefetch(((char *)(skb)) + L1_CACHE_BYTES); |
| @@ -373,9 +409,9 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp, | |||
| 373 | } | 409 | } |
| 374 | 410 | ||
| 375 | if (!bnx2x_fill_frag_skb(bp, fp, skb, | 411 | if (!bnx2x_fill_frag_skb(bp, fp, skb, |
| 376 | &cqe->fast_path_cqe, cqe_idx)) { | 412 | &cqe->fast_path_cqe, cqe_idx, |
| 377 | if ((le16_to_cpu(cqe->fast_path_cqe. | 413 | parsing_flags)) { |
| 378 | pars_flags.flags) & PARSING_FLAGS_VLAN)) | 414 | if (parsing_flags & PARSING_FLAGS_VLAN) |
| 379 | __vlan_hwaccel_put_tag(skb, | 415 | __vlan_hwaccel_put_tag(skb, |
| 380 | le16_to_cpu(cqe->fast_path_cqe. | 416 | le16_to_cpu(cqe->fast_path_cqe. |
| 381 | vlan_tag)); | 417 | vlan_tag)); |
| @@ -703,19 +739,20 @@ u16 bnx2x_get_mf_speed(struct bnx2x *bp) | |||
| 703 | { | 739 | { |
| 704 | u16 line_speed = bp->link_vars.line_speed; | 740 | u16 line_speed = bp->link_vars.line_speed; |
| 705 | if (IS_MF(bp)) { | 741 | if (IS_MF(bp)) { |
| 706 | u16 maxCfg = (bp->mf_config[BP_VN(bp)] & | 742 | u16 maxCfg = bnx2x_extract_max_cfg(bp, |
| 707 | FUNC_MF_CFG_MAX_BW_MASK) >> | 743 | bp->mf_config[BP_VN(bp)]); |
| 708 | FUNC_MF_CFG_MAX_BW_SHIFT; | 744 | |
| 709 | /* Calculate the current MAX line speed limit for the DCC | 745 | /* Calculate the current MAX line speed limit for the MF |
| 710 | * capable devices | 746 | * devices |
| 711 | */ | 747 | */ |
| 712 | if (IS_MF_SD(bp)) { | 748 | if (IS_MF_SI(bp)) |
| 749 | line_speed = (line_speed * maxCfg) / 100; | ||
| 750 | else { /* SD mode */ | ||
| 713 | u16 vn_max_rate = maxCfg * 100; | 751 | u16 vn_max_rate = maxCfg * 100; |
| 714 | 752 | ||
| 715 | if (vn_max_rate < line_speed) | 753 | if (vn_max_rate < line_speed) |
| 716 | line_speed = vn_max_rate; | 754 | line_speed = vn_max_rate; |
| 717 | } else /* IS_MF_SI(bp)) */ | 755 | } |
| 718 | line_speed = (line_speed * maxCfg) / 100; | ||
| 719 | } | 756 | } |
| 720 | 757 | ||
| 721 | return line_speed; | 758 | return line_speed; |
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h index 03eb4d68e6bb..326ba44b3ded 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.h +++ b/drivers/net/bnx2x/bnx2x_cmn.h | |||
| @@ -1044,4 +1044,24 @@ static inline void storm_memset_cmng(struct bnx2x *bp, | |||
| 1044 | void bnx2x_acquire_phy_lock(struct bnx2x *bp); | 1044 | void bnx2x_acquire_phy_lock(struct bnx2x *bp); |
| 1045 | void bnx2x_release_phy_lock(struct bnx2x *bp); | 1045 | void bnx2x_release_phy_lock(struct bnx2x *bp); |
| 1046 | 1046 | ||
| 1047 | /** | ||
| 1048 | * Extracts MAX BW part from MF configuration. | ||
| 1049 | * | ||
| 1050 | * @param bp | ||
| 1051 | * @param mf_cfg | ||
| 1052 | * | ||
| 1053 | * @return u16 | ||
| 1054 | */ | ||
| 1055 | static inline u16 bnx2x_extract_max_cfg(struct bnx2x *bp, u32 mf_cfg) | ||
| 1056 | { | ||
| 1057 | u16 max_cfg = (mf_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> | ||
| 1058 | FUNC_MF_CFG_MAX_BW_SHIFT; | ||
| 1059 | if (!max_cfg) { | ||
| 1060 | BNX2X_ERR("Illegal configuration detected for Max BW - " | ||
| 1061 | "using 100 instead\n"); | ||
| 1062 | max_cfg = 100; | ||
| 1063 | } | ||
| 1064 | return max_cfg; | ||
| 1065 | } | ||
| 1066 | |||
| 1047 | #endif /* BNX2X_CMN_H */ | 1067 | #endif /* BNX2X_CMN_H */ |
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c index 5b44a8b48509..ef2919987a10 100644 --- a/drivers/net/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/bnx2x/bnx2x_ethtool.c | |||
| @@ -238,7 +238,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
| 238 | speed |= (cmd->speed_hi << 16); | 238 | speed |= (cmd->speed_hi << 16); |
| 239 | 239 | ||
| 240 | if (IS_MF_SI(bp)) { | 240 | if (IS_MF_SI(bp)) { |
| 241 | u32 param = 0; | 241 | u32 param = 0, part; |
| 242 | u32 line_speed = bp->link_vars.line_speed; | 242 | u32 line_speed = bp->link_vars.line_speed; |
| 243 | 243 | ||
| 244 | /* use 10G if no link detected */ | 244 | /* use 10G if no link detected */ |
| @@ -251,9 +251,11 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
| 251 | REQ_BC_VER_4_SET_MF_BW); | 251 | REQ_BC_VER_4_SET_MF_BW); |
| 252 | return -EINVAL; | 252 | return -EINVAL; |
| 253 | } | 253 | } |
| 254 | if (line_speed < speed) { | 254 | part = (speed * 100) / line_speed; |
| 255 | BNX2X_DEV_INFO("New speed should be less or equal " | 255 | if (line_speed < speed || !part) { |
| 256 | "to actual line speed\n"); | 256 | BNX2X_DEV_INFO("Speed setting should be in a range " |
| 257 | "from 1%% to 100%% " | ||
| 258 | "of actual line speed\n"); | ||
| 257 | return -EINVAL; | 259 | return -EINVAL; |
| 258 | } | 260 | } |
| 259 | /* load old values */ | 261 | /* load old values */ |
| @@ -263,8 +265,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) | |||
| 263 | param &= FUNC_MF_CFG_MIN_BW_MASK; | 265 | param &= FUNC_MF_CFG_MIN_BW_MASK; |
| 264 | 266 | ||
| 265 | /* set new MAX value */ | 267 | /* set new MAX value */ |
| 266 | param |= (((speed * 100) / line_speed) | 268 | param |= (part << FUNC_MF_CFG_MAX_BW_SHIFT) |
| 267 | << FUNC_MF_CFG_MAX_BW_SHIFT) | ||
| 268 | & FUNC_MF_CFG_MAX_BW_MASK; | 269 | & FUNC_MF_CFG_MAX_BW_MASK; |
| 269 | 270 | ||
| 270 | bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, param); | 271 | bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, param); |
| @@ -1781,9 +1782,7 @@ static int bnx2x_test_nvram(struct bnx2x *bp) | |||
| 1781 | { 0x100, 0x350 }, /* manuf_info */ | 1782 | { 0x100, 0x350 }, /* manuf_info */ |
| 1782 | { 0x450, 0xf0 }, /* feature_info */ | 1783 | { 0x450, 0xf0 }, /* feature_info */ |
| 1783 | { 0x640, 0x64 }, /* upgrade_key_info */ | 1784 | { 0x640, 0x64 }, /* upgrade_key_info */ |
| 1784 | { 0x6a4, 0x64 }, | ||
| 1785 | { 0x708, 0x70 }, /* manuf_key_info */ | 1785 | { 0x708, 0x70 }, /* manuf_key_info */ |
| 1786 | { 0x778, 0x70 }, | ||
| 1787 | { 0, 0 } | 1786 | { 0, 0 } |
| 1788 | }; | 1787 | }; |
| 1789 | __be32 buf[0x350 / 4]; | 1788 | __be32 buf[0x350 / 4]; |
| @@ -1933,11 +1932,11 @@ static void bnx2x_self_test(struct net_device *dev, | |||
| 1933 | buf[4] = 1; | 1932 | buf[4] = 1; |
| 1934 | etest->flags |= ETH_TEST_FL_FAILED; | 1933 | etest->flags |= ETH_TEST_FL_FAILED; |
| 1935 | } | 1934 | } |
| 1936 | if (bp->port.pmf) | 1935 | |
| 1937 | if (bnx2x_link_test(bp, is_serdes) != 0) { | 1936 | if (bnx2x_link_test(bp, is_serdes) != 0) { |
| 1938 | buf[5] = 1; | 1937 | buf[5] = 1; |
| 1939 | etest->flags |= ETH_TEST_FL_FAILED; | 1938 | etest->flags |= ETH_TEST_FL_FAILED; |
| 1940 | } | 1939 | } |
| 1941 | 1940 | ||
| 1942 | #ifdef BNX2X_EXTRA_DEBUG | 1941 | #ifdef BNX2X_EXTRA_DEBUG |
| 1943 | bnx2x_panic_dump(bp); | 1942 | bnx2x_panic_dump(bp); |
diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h index 5a268e9a0895..fa6dbe3f2058 100644 --- a/drivers/net/bnx2x/bnx2x_init.h +++ b/drivers/net/bnx2x/bnx2x_init.h | |||
| @@ -241,7 +241,7 @@ static const struct { | |||
| 241 | /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't | 241 | /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't |
| 242 | * want to handle "system kill" flow at the moment. | 242 | * want to handle "system kill" flow at the moment. |
| 243 | */ | 243 | */ |
| 244 | BLOCK_PRTY_INFO(PXP, 0x3ffffff, 0x3ffffff, 0x3ffffff, 0x3ffffff), | 244 | BLOCK_PRTY_INFO(PXP, 0x7ffffff, 0x3ffffff, 0x3ffffff, 0x7ffffff), |
| 245 | BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff), | 245 | BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff), |
| 246 | BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff), | 246 | BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff), |
| 247 | BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0), | 247 | BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0), |
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index d584d32c747d..032ae184b605 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c | |||
| @@ -1974,13 +1974,22 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) | |||
| 1974 | vn_max_rate = 0; | 1974 | vn_max_rate = 0; |
| 1975 | 1975 | ||
| 1976 | } else { | 1976 | } else { |
| 1977 | u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg); | ||
| 1978 | |||
| 1977 | vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> | 1979 | vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> |
| 1978 | FUNC_MF_CFG_MIN_BW_SHIFT) * 100; | 1980 | FUNC_MF_CFG_MIN_BW_SHIFT) * 100; |
| 1979 | /* If min rate is zero - set it to 1 */ | 1981 | /* If fairness is enabled (not all min rates are zeroes) and |
| 1982 | if current min rate is zero - set it to 1. | ||
| 1983 | This is a requirement of the algorithm. */ | ||
| 1980 | if (bp->vn_weight_sum && (vn_min_rate == 0)) | 1984 | if (bp->vn_weight_sum && (vn_min_rate == 0)) |
| 1981 | vn_min_rate = DEF_MIN_RATE; | 1985 | vn_min_rate = DEF_MIN_RATE; |
| 1982 | vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> | 1986 | |
| 1983 | FUNC_MF_CFG_MAX_BW_SHIFT) * 100; | 1987 | if (IS_MF_SI(bp)) |
| 1988 | /* maxCfg in percents of linkspeed */ | ||
| 1989 | vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100; | ||
| 1990 | else | ||
| 1991 | /* maxCfg is absolute in 100Mb units */ | ||
| 1992 | vn_max_rate = maxCfg * 100; | ||
| 1984 | } | 1993 | } |
| 1985 | 1994 | ||
| 1986 | DP(NETIF_MSG_IFUP, | 1995 | DP(NETIF_MSG_IFUP, |
| @@ -2006,7 +2015,8 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn) | |||
| 2006 | m_fair_vn.vn_credit_delta = | 2015 | m_fair_vn.vn_credit_delta = |
| 2007 | max_t(u32, (vn_min_rate * (T_FAIR_COEF / | 2016 | max_t(u32, (vn_min_rate * (T_FAIR_COEF / |
| 2008 | (8 * bp->vn_weight_sum))), | 2017 | (8 * bp->vn_weight_sum))), |
| 2009 | (bp->cmng.fair_vars.fair_threshold * 2)); | 2018 | (bp->cmng.fair_vars.fair_threshold + |
| 2019 | MIN_ABOVE_THRESH)); | ||
| 2010 | DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n", | 2020 | DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n", |
| 2011 | m_fair_vn.vn_credit_delta); | 2021 | m_fair_vn.vn_credit_delta); |
| 2012 | } | 2022 | } |
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c index bda60d590fa8..3445ded6674f 100644 --- a/drivers/net/bnx2x/bnx2x_stats.c +++ b/drivers/net/bnx2x/bnx2x_stats.c | |||
| @@ -1239,14 +1239,14 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) | |||
| 1239 | if (unlikely(bp->panic)) | 1239 | if (unlikely(bp->panic)) |
| 1240 | return; | 1240 | return; |
| 1241 | 1241 | ||
| 1242 | bnx2x_stats_stm[bp->stats_state][event].action(bp); | ||
| 1243 | |||
| 1242 | /* Protect a state change flow */ | 1244 | /* Protect a state change flow */ |
| 1243 | spin_lock_bh(&bp->stats_lock); | 1245 | spin_lock_bh(&bp->stats_lock); |
| 1244 | state = bp->stats_state; | 1246 | state = bp->stats_state; |
| 1245 | bp->stats_state = bnx2x_stats_stm[state][event].next_state; | 1247 | bp->stats_state = bnx2x_stats_stm[state][event].next_state; |
| 1246 | spin_unlock_bh(&bp->stats_lock); | 1248 | spin_unlock_bh(&bp->stats_lock); |
| 1247 | 1249 | ||
| 1248 | bnx2x_stats_stm[state][event].action(bp); | ||
| 1249 | |||
| 1250 | if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) | 1250 | if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) |
| 1251 | DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", | 1251 | DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", |
| 1252 | state, event, bp->stats_state); | 1252 | state, event, bp->stats_state); |
diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c index 5157e15e96eb..aeea9f9ff6e8 100644 --- a/drivers/net/can/softing/softing_main.c +++ b/drivers/net/can/softing/softing_main.c | |||
| @@ -633,6 +633,7 @@ static const struct net_device_ops softing_netdev_ops = { | |||
| 633 | }; | 633 | }; |
| 634 | 634 | ||
| 635 | static const struct can_bittiming_const softing_btr_const = { | 635 | static const struct can_bittiming_const softing_btr_const = { |
| 636 | .name = "softing", | ||
| 636 | .tseg1_min = 1, | 637 | .tseg1_min = 1, |
| 637 | .tseg1_max = 16, | 638 | .tseg1_max = 16, |
| 638 | .tseg2_min = 1, | 639 | .tseg2_min = 1, |
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c index 7ff170cbc7dc..302be4aa69d6 100644 --- a/drivers/net/cnic.c +++ b/drivers/net/cnic.c | |||
| @@ -2760,6 +2760,8 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev) | |||
| 2760 | u32 status_idx = (u16) *cp->kcq1.status_idx_ptr; | 2760 | u32 status_idx = (u16) *cp->kcq1.status_idx_ptr; |
| 2761 | int kcqe_cnt; | 2761 | int kcqe_cnt; |
| 2762 | 2762 | ||
| 2763 | /* status block index must be read before reading other fields */ | ||
| 2764 | rmb(); | ||
| 2763 | cp->kwq_con_idx = *cp->kwq_con_idx_ptr; | 2765 | cp->kwq_con_idx = *cp->kwq_con_idx_ptr; |
| 2764 | 2766 | ||
| 2765 | while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) { | 2767 | while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) { |
| @@ -2770,6 +2772,8 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev) | |||
| 2770 | barrier(); | 2772 | barrier(); |
| 2771 | if (status_idx != *cp->kcq1.status_idx_ptr) { | 2773 | if (status_idx != *cp->kcq1.status_idx_ptr) { |
| 2772 | status_idx = (u16) *cp->kcq1.status_idx_ptr; | 2774 | status_idx = (u16) *cp->kcq1.status_idx_ptr; |
| 2775 | /* status block index must be read first */ | ||
| 2776 | rmb(); | ||
| 2773 | cp->kwq_con_idx = *cp->kwq_con_idx_ptr; | 2777 | cp->kwq_con_idx = *cp->kwq_con_idx_ptr; |
| 2774 | } else | 2778 | } else |
| 2775 | break; | 2779 | break; |
| @@ -2888,6 +2892,8 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info) | |||
| 2888 | u32 last_status = *info->status_idx_ptr; | 2892 | u32 last_status = *info->status_idx_ptr; |
| 2889 | int kcqe_cnt; | 2893 | int kcqe_cnt; |
| 2890 | 2894 | ||
| 2895 | /* status block index must be read before reading the KCQ */ | ||
| 2896 | rmb(); | ||
| 2891 | while ((kcqe_cnt = cnic_get_kcqes(dev, info))) { | 2897 | while ((kcqe_cnt = cnic_get_kcqes(dev, info))) { |
| 2892 | 2898 | ||
| 2893 | service_kcqes(dev, kcqe_cnt); | 2899 | service_kcqes(dev, kcqe_cnt); |
| @@ -2898,6 +2904,8 @@ static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info) | |||
| 2898 | break; | 2904 | break; |
| 2899 | 2905 | ||
| 2900 | last_status = *info->status_idx_ptr; | 2906 | last_status = *info->status_idx_ptr; |
| 2907 | /* status block index must be read before reading the KCQ */ | ||
| 2908 | rmb(); | ||
| 2901 | } | 2909 | } |
| 2902 | return last_status; | 2910 | return last_status; |
| 2903 | } | 2911 | } |
| @@ -2906,26 +2914,35 @@ static void cnic_service_bnx2x_bh(unsigned long data) | |||
| 2906 | { | 2914 | { |
| 2907 | struct cnic_dev *dev = (struct cnic_dev *) data; | 2915 | struct cnic_dev *dev = (struct cnic_dev *) data; |
| 2908 | struct cnic_local *cp = dev->cnic_priv; | 2916 | struct cnic_local *cp = dev->cnic_priv; |
| 2909 | u32 status_idx; | 2917 | u32 status_idx, new_status_idx; |
| 2910 | 2918 | ||
| 2911 | if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) | 2919 | if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) |
| 2912 | return; | 2920 | return; |
| 2913 | 2921 | ||
| 2914 | status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); | 2922 | while (1) { |
| 2923 | status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); | ||
| 2915 | 2924 | ||
| 2916 | CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); | 2925 | CNIC_WR16(dev, cp->kcq1.io_addr, |
| 2926 | cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); | ||
| 2917 | 2927 | ||
| 2918 | if (BNX2X_CHIP_IS_E2(cp->chip_id)) { | 2928 | if (!BNX2X_CHIP_IS_E2(cp->chip_id)) { |
| 2919 | status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2); | 2929 | cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, |
| 2930 | status_idx, IGU_INT_ENABLE, 1); | ||
| 2931 | break; | ||
| 2932 | } | ||
| 2933 | |||
| 2934 | new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2); | ||
| 2935 | |||
| 2936 | if (new_status_idx != status_idx) | ||
| 2937 | continue; | ||
| 2920 | 2938 | ||
| 2921 | CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx + | 2939 | CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx + |
| 2922 | MAX_KCQ_IDX); | 2940 | MAX_KCQ_IDX); |
| 2923 | 2941 | ||
| 2924 | cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, | 2942 | cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, |
| 2925 | status_idx, IGU_INT_ENABLE, 1); | 2943 | status_idx, IGU_INT_ENABLE, 1); |
| 2926 | } else { | 2944 | |
| 2927 | cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, | 2945 | break; |
| 2928 | status_idx, IGU_INT_ENABLE, 1); | ||
| 2929 | } | 2946 | } |
| 2930 | } | 2947 | } |
| 2931 | 2948 | ||
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c index 2a628d17d178..7018bfe408a4 100644 --- a/drivers/net/davinci_emac.c +++ b/drivers/net/davinci_emac.c | |||
| @@ -1008,7 +1008,7 @@ static void emac_rx_handler(void *token, int len, int status) | |||
| 1008 | int ret; | 1008 | int ret; |
| 1009 | 1009 | ||
| 1010 | /* free and bail if we are shutting down */ | 1010 | /* free and bail if we are shutting down */ |
| 1011 | if (unlikely(!netif_running(ndev))) { | 1011 | if (unlikely(!netif_running(ndev) || !netif_carrier_ok(ndev))) { |
| 1012 | dev_kfree_skb_any(skb); | 1012 | dev_kfree_skb_any(skb); |
| 1013 | return; | 1013 | return; |
| 1014 | } | 1014 | } |
diff --git a/drivers/net/dnet.c b/drivers/net/dnet.c index 9d8a20b72fa9..8318ea06cb6d 100644 --- a/drivers/net/dnet.c +++ b/drivers/net/dnet.c | |||
| @@ -337,8 +337,6 @@ static int dnet_mii_init(struct dnet *bp) | |||
| 337 | for (i = 0; i < PHY_MAX_ADDR; i++) | 337 | for (i = 0; i < PHY_MAX_ADDR; i++) |
| 338 | bp->mii_bus->irq[i] = PHY_POLL; | 338 | bp->mii_bus->irq[i] = PHY_POLL; |
| 339 | 339 | ||
| 340 | platform_set_drvdata(bp->dev, bp->mii_bus); | ||
| 341 | |||
| 342 | if (mdiobus_register(bp->mii_bus)) { | 340 | if (mdiobus_register(bp->mii_bus)) { |
| 343 | err = -ENXIO; | 341 | err = -ENXIO; |
| 344 | goto err_out_free_mdio_irq; | 342 | goto err_out_free_mdio_irq; |
| @@ -863,6 +861,7 @@ static int __devinit dnet_probe(struct platform_device *pdev) | |||
| 863 | bp = netdev_priv(dev); | 861 | bp = netdev_priv(dev); |
| 864 | bp->dev = dev; | 862 | bp->dev = dev; |
| 865 | 863 | ||
| 864 | platform_set_drvdata(pdev, dev); | ||
| 866 | SET_NETDEV_DEV(dev, &pdev->dev); | 865 | SET_NETDEV_DEV(dev, &pdev->dev); |
| 867 | 866 | ||
| 868 | spin_lock_init(&bp->lock); | 867 | spin_lock_init(&bp->lock); |
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h index 55c1711f1688..33e7c45a4fe4 100644 --- a/drivers/net/e1000/e1000_osdep.h +++ b/drivers/net/e1000/e1000_osdep.h | |||
| @@ -42,7 +42,8 @@ | |||
| 42 | #define GBE_CONFIG_RAM_BASE \ | 42 | #define GBE_CONFIG_RAM_BASE \ |
| 43 | ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET)) | 43 | ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET)) |
| 44 | 44 | ||
| 45 | #define GBE_CONFIG_BASE_VIRT phys_to_virt(GBE_CONFIG_RAM_BASE) | 45 | #define GBE_CONFIG_BASE_VIRT \ |
| 46 | ((void __iomem *)phys_to_virt(GBE_CONFIG_RAM_BASE)) | ||
| 46 | 47 | ||
| 47 | #define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \ | 48 | #define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \ |
| 48 | (iowrite16_rep(base + offset, data, count)) | 49 | (iowrite16_rep(base + offset, data, count)) |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 3fa110ddb041..2e5022849f18 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
| @@ -5967,7 +5967,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev, | |||
| 5967 | /* APME bit in EEPROM is mapped to WUC.APME */ | 5967 | /* APME bit in EEPROM is mapped to WUC.APME */ |
| 5968 | eeprom_data = er32(WUC); | 5968 | eeprom_data = er32(WUC); |
| 5969 | eeprom_apme_mask = E1000_WUC_APME; | 5969 | eeprom_apme_mask = E1000_WUC_APME; |
| 5970 | if (eeprom_data & E1000_WUC_PHY_WAKE) | 5970 | if ((hw->mac.type > e1000_ich10lan) && |
| 5971 | (eeprom_data & E1000_WUC_PHY_WAKE)) | ||
| 5971 | adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; | 5972 | adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP; |
| 5972 | } else if (adapter->flags & FLAG_APME_IN_CTRL3) { | 5973 | } else if (adapter->flags & FLAG_APME_IN_CTRL3) { |
| 5973 | if (adapter->flags & FLAG_APME_CHECK_PORT_B && | 5974 | if (adapter->flags & FLAG_APME_CHECK_PORT_B && |
diff --git a/drivers/net/fec.c b/drivers/net/fec.c index 2a71373719ae..cd0282d5d40f 100644 --- a/drivers/net/fec.c +++ b/drivers/net/fec.c | |||
| @@ -74,7 +74,8 @@ static struct platform_device_id fec_devtype[] = { | |||
| 74 | }, { | 74 | }, { |
| 75 | .name = "imx28-fec", | 75 | .name = "imx28-fec", |
| 76 | .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, | 76 | .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, |
| 77 | } | 77 | }, |
| 78 | { } | ||
| 78 | }; | 79 | }; |
| 79 | 80 | ||
| 80 | static unsigned char macaddr[ETH_ALEN]; | 81 | static unsigned char macaddr[ETH_ALEN]; |
diff --git a/drivers/net/igbvf/vf.c b/drivers/net/igbvf/vf.c index 74486a8b009a..af3822f9ea9a 100644 --- a/drivers/net/igbvf/vf.c +++ b/drivers/net/igbvf/vf.c | |||
| @@ -220,7 +220,7 @@ static u32 e1000_hash_mc_addr_vf(struct e1000_hw *hw, u8 *mc_addr) | |||
| 220 | * The parameter rar_count will usually be hw->mac.rar_entry_count | 220 | * The parameter rar_count will usually be hw->mac.rar_entry_count |
| 221 | * unless there are workarounds that change this. | 221 | * unless there are workarounds that change this. |
| 222 | **/ | 222 | **/ |
| 223 | void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, | 223 | static void e1000_update_mc_addr_list_vf(struct e1000_hw *hw, |
| 224 | u8 *mc_addr_list, u32 mc_addr_count, | 224 | u8 *mc_addr_list, u32 mc_addr_count, |
| 225 | u32 rar_used_count, u32 rar_count) | 225 | u32 rar_used_count, u32 rar_count) |
| 226 | { | 226 | { |
diff --git a/drivers/net/macb.c b/drivers/net/macb.c index f69e73e2191e..79ccb54ab00c 100644 --- a/drivers/net/macb.c +++ b/drivers/net/macb.c | |||
| @@ -260,7 +260,7 @@ static int macb_mii_init(struct macb *bp) | |||
| 260 | for (i = 0; i < PHY_MAX_ADDR; i++) | 260 | for (i = 0; i < PHY_MAX_ADDR; i++) |
| 261 | bp->mii_bus->irq[i] = PHY_POLL; | 261 | bp->mii_bus->irq[i] = PHY_POLL; |
| 262 | 262 | ||
| 263 | platform_set_drvdata(bp->dev, bp->mii_bus); | 263 | dev_set_drvdata(&bp->dev->dev, bp->mii_bus); |
| 264 | 264 | ||
| 265 | if (mdiobus_register(bp->mii_bus)) | 265 | if (mdiobus_register(bp->mii_bus)) |
| 266 | goto err_out_free_mdio_irq; | 266 | goto err_out_free_mdio_irq; |
diff --git a/drivers/net/pcmcia/fmvj18x_cs.c b/drivers/net/pcmcia/fmvj18x_cs.c index 9226cda4d054..530ab5a10bd3 100644 --- a/drivers/net/pcmcia/fmvj18x_cs.c +++ b/drivers/net/pcmcia/fmvj18x_cs.c | |||
| @@ -691,6 +691,7 @@ static struct pcmcia_device_id fmvj18x_ids[] = { | |||
| 691 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a), | 691 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0105, 0x0e0a), |
| 692 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01), | 692 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0e01), |
| 693 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05), | 693 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0a05), |
| 694 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x0b05), | ||
| 694 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101), | 695 | PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x0032, 0x1101), |
| 695 | PCMCIA_DEVICE_NULL, | 696 | PCMCIA_DEVICE_NULL, |
| 696 | }; | 697 | }; |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index ef2133b16f8c..7ffdb80adf40 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | #include <linux/dma-mapping.h> | 25 | #include <linux/dma-mapping.h> |
| 26 | #include <linux/pm_runtime.h> | 26 | #include <linux/pm_runtime.h> |
| 27 | #include <linux/firmware.h> | 27 | #include <linux/firmware.h> |
| 28 | #include <linux/pci-aspm.h> | ||
| 28 | 29 | ||
| 29 | #include <asm/system.h> | 30 | #include <asm/system.h> |
| 30 | #include <asm/io.h> | 31 | #include <asm/io.h> |
| @@ -3020,6 +3021,11 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 3020 | mii->reg_num_mask = 0x1f; | 3021 | mii->reg_num_mask = 0x1f; |
| 3021 | mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII); | 3022 | mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII); |
| 3022 | 3023 | ||
| 3024 | /* disable ASPM completely as that cause random device stop working | ||
| 3025 | * problems as well as full system hangs for some PCIe devices users */ | ||
| 3026 | pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 | | ||
| 3027 | PCIE_LINK_STATE_CLKPM); | ||
| 3028 | |||
| 3023 | /* enable device (incl. PCI PM wakeup and hotplug setup) */ | 3029 | /* enable device (incl. PCI PM wakeup and hotplug setup) */ |
| 3024 | rc = pci_enable_device(pdev); | 3030 | rc = pci_enable_device(pdev); |
| 3025 | if (rc < 0) { | 3031 | if (rc < 0) { |
diff --git a/drivers/net/skge.c b/drivers/net/skge.c index 42daf98ba736..35b28f42d208 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c | |||
| @@ -3856,9 +3856,6 @@ static struct net_device *skge_devinit(struct skge_hw *hw, int port, | |||
| 3856 | memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); | 3856 | memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port*8, ETH_ALEN); |
| 3857 | memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); | 3857 | memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); |
| 3858 | 3858 | ||
| 3859 | /* device is off until link detection */ | ||
| 3860 | netif_carrier_off(dev); | ||
| 3861 | |||
| 3862 | return dev; | 3859 | return dev; |
| 3863 | } | 3860 | } |
| 3864 | 3861 | ||
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c index 5ab3084eb9cb..07b1633b7f3f 100644 --- a/drivers/net/wireless/ath/ath9k/hif_usb.c +++ b/drivers/net/wireless/ath/ath9k/hif_usb.c | |||
| @@ -219,8 +219,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev) | |||
| 219 | struct tx_buf *tx_buf = NULL; | 219 | struct tx_buf *tx_buf = NULL; |
| 220 | struct sk_buff *nskb = NULL; | 220 | struct sk_buff *nskb = NULL; |
| 221 | int ret = 0, i; | 221 | int ret = 0, i; |
| 222 | u16 *hdr, tx_skb_cnt = 0; | 222 | u16 tx_skb_cnt = 0; |
| 223 | u8 *buf; | 223 | u8 *buf; |
| 224 | __le16 *hdr; | ||
| 224 | 225 | ||
| 225 | if (hif_dev->tx.tx_skb_cnt == 0) | 226 | if (hif_dev->tx.tx_skb_cnt == 0) |
| 226 | return 0; | 227 | return 0; |
| @@ -245,9 +246,9 @@ static int __hif_usb_tx(struct hif_device_usb *hif_dev) | |||
| 245 | 246 | ||
| 246 | buf = tx_buf->buf; | 247 | buf = tx_buf->buf; |
| 247 | buf += tx_buf->offset; | 248 | buf += tx_buf->offset; |
| 248 | hdr = (u16 *)buf; | 249 | hdr = (__le16 *)buf; |
| 249 | *hdr++ = nskb->len; | 250 | *hdr++ = cpu_to_le16(nskb->len); |
| 250 | *hdr++ = ATH_USB_TX_STREAM_MODE_TAG; | 251 | *hdr++ = cpu_to_le16(ATH_USB_TX_STREAM_MODE_TAG); |
| 251 | buf += 4; | 252 | buf += 4; |
| 252 | memcpy(buf, nskb->data, nskb->len); | 253 | memcpy(buf, nskb->data, nskb->len); |
| 253 | tx_buf->len = nskb->len + 4; | 254 | tx_buf->len = nskb->len + 4; |
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c index 180170d3ce25..2915b11edefb 100644 --- a/drivers/net/wireless/ath/ath9k/mac.c +++ b/drivers/net/wireless/ath/ath9k/mac.c | |||
| @@ -885,7 +885,7 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints) | |||
| 885 | struct ath_common *common = ath9k_hw_common(ah); | 885 | struct ath_common *common = ath9k_hw_common(ah); |
| 886 | 886 | ||
| 887 | if (!(ints & ATH9K_INT_GLOBAL)) | 887 | if (!(ints & ATH9K_INT_GLOBAL)) |
| 888 | ath9k_hw_enable_interrupts(ah); | 888 | ath9k_hw_disable_interrupts(ah); |
| 889 | 889 | ||
| 890 | ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints); | 890 | ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints); |
| 891 | 891 | ||
| @@ -963,7 +963,8 @@ void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints) | |||
| 963 | REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); | 963 | REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); |
| 964 | } | 964 | } |
| 965 | 965 | ||
| 966 | ath9k_hw_enable_interrupts(ah); | 966 | if (ints & ATH9K_INT_GLOBAL) |
| 967 | ath9k_hw_enable_interrupts(ah); | ||
| 967 | 968 | ||
| 968 | return; | 969 | return; |
| 969 | } | 970 | } |
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c index 537732e5964f..f82c400be288 100644 --- a/drivers/net/wireless/ath/carl9170/usb.c +++ b/drivers/net/wireless/ath/carl9170/usb.c | |||
| @@ -118,6 +118,8 @@ static struct usb_device_id carl9170_usb_ids[] = { | |||
| 118 | { USB_DEVICE(0x057c, 0x8402) }, | 118 | { USB_DEVICE(0x057c, 0x8402) }, |
| 119 | /* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */ | 119 | /* Qwest/Actiontec 802AIN Wireless N USB Network Adapter */ |
| 120 | { USB_DEVICE(0x1668, 0x1200) }, | 120 | { USB_DEVICE(0x1668, 0x1200) }, |
| 121 | /* Airlive X.USB a/b/g/n */ | ||
| 122 | { USB_DEVICE(0x1b75, 0x9170) }, | ||
| 121 | 123 | ||
| 122 | /* terminate */ | 124 | /* terminate */ |
| 123 | {} | 125 | {} |
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c index 79ab0a6b1386..537fb8c84e3a 100644 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c | |||
| @@ -51,7 +51,7 @@ | |||
| 51 | #include "iwl-agn-debugfs.h" | 51 | #include "iwl-agn-debugfs.h" |
| 52 | 52 | ||
| 53 | /* Highest firmware API version supported */ | 53 | /* Highest firmware API version supported */ |
| 54 | #define IWL5000_UCODE_API_MAX 2 | 54 | #define IWL5000_UCODE_API_MAX 5 |
| 55 | #define IWL5150_UCODE_API_MAX 2 | 55 | #define IWL5150_UCODE_API_MAX 2 |
| 56 | 56 | ||
| 57 | /* Lowest firmware API version supported */ | 57 | /* Lowest firmware API version supported */ |
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c index 21713a7638c4..9b344a921e74 100644 --- a/drivers/net/wireless/p54/p54usb.c +++ b/drivers/net/wireless/p54/p54usb.c | |||
| @@ -98,6 +98,7 @@ static struct usb_device_id p54u_table[] __devinitdata = { | |||
| 98 | {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */ | 98 | {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */ |
| 99 | {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */ | 99 | {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */ |
| 100 | {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */ | 100 | {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */ |
| 101 | {USB_DEVICE(0x1740, 0x1000)}, /* Senao NUB-350 */ | ||
| 101 | {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */ | 102 | {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */ |
| 102 | {USB_DEVICE(0x2001, 0x3705)}, /* D-Link DWL-G120 rev C1 */ | 103 | {USB_DEVICE(0x2001, 0x3705)}, /* D-Link DWL-G120 rev C1 */ |
| 103 | {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */ | 104 | {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */ |
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c index 848cc2cce247..518542b4bf9e 100644 --- a/drivers/net/wireless/rndis_wlan.c +++ b/drivers/net/wireless/rndis_wlan.c | |||
| @@ -2597,6 +2597,9 @@ static int rndis_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, | |||
| 2597 | __le32 mode; | 2597 | __le32 mode; |
| 2598 | int ret; | 2598 | int ret; |
| 2599 | 2599 | ||
| 2600 | if (priv->device_type != RNDIS_BCM4320B) | ||
| 2601 | return -ENOTSUPP; | ||
| 2602 | |||
| 2600 | netdev_dbg(usbdev->net, "%s(): %s, %d\n", __func__, | 2603 | netdev_dbg(usbdev->net, "%s(): %s, %d\n", __func__, |
| 2601 | enabled ? "enabled" : "disabled", | 2604 | enabled ? "enabled" : "disabled", |
| 2602 | timeout); | 2605 | timeout); |
diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c index 28295d0a50f6..4d87b5dc9284 100644 --- a/drivers/of/pdt.c +++ b/drivers/of/pdt.c | |||
| @@ -36,19 +36,55 @@ unsigned int of_pdt_unique_id __initdata; | |||
| 36 | (p)->unique_id = of_pdt_unique_id++; \ | 36 | (p)->unique_id = of_pdt_unique_id++; \ |
| 37 | } while (0) | 37 | } while (0) |
| 38 | 38 | ||
| 39 | static inline const char *of_pdt_node_name(struct device_node *dp) | 39 | static char * __init of_pdt_build_full_name(struct device_node *dp) |
| 40 | { | 40 | { |
| 41 | return dp->path_component_name; | 41 | int len, ourlen, plen; |
| 42 | char *n; | ||
| 43 | |||
| 44 | dp->path_component_name = build_path_component(dp); | ||
| 45 | |||
| 46 | plen = strlen(dp->parent->full_name); | ||
| 47 | ourlen = strlen(dp->path_component_name); | ||
| 48 | len = ourlen + plen + 2; | ||
| 49 | |||
| 50 | n = prom_early_alloc(len); | ||
| 51 | strcpy(n, dp->parent->full_name); | ||
| 52 | if (!of_node_is_root(dp->parent)) { | ||
| 53 | strcpy(n + plen, "/"); | ||
| 54 | plen++; | ||
| 55 | } | ||
| 56 | strcpy(n + plen, dp->path_component_name); | ||
| 57 | |||
| 58 | return n; | ||
| 42 | } | 59 | } |
| 43 | 60 | ||
| 44 | #else | 61 | #else /* CONFIG_SPARC */ |
| 45 | 62 | ||
| 46 | static inline void of_pdt_incr_unique_id(void *p) { } | 63 | static inline void of_pdt_incr_unique_id(void *p) { } |
| 47 | static inline void irq_trans_init(struct device_node *dp) { } | 64 | static inline void irq_trans_init(struct device_node *dp) { } |
| 48 | 65 | ||
| 49 | static inline const char *of_pdt_node_name(struct device_node *dp) | 66 | static char * __init of_pdt_build_full_name(struct device_node *dp) |
| 50 | { | 67 | { |
| 51 | return dp->name; | 68 | static int failsafe_id = 0; /* for generating unique names on failure */ |
| 69 | char *buf; | ||
| 70 | int len; | ||
| 71 | |||
| 72 | if (of_pdt_prom_ops->pkg2path(dp->phandle, NULL, 0, &len)) | ||
| 73 | goto failsafe; | ||
| 74 | |||
| 75 | buf = prom_early_alloc(len + 1); | ||
| 76 | if (of_pdt_prom_ops->pkg2path(dp->phandle, buf, len, &len)) | ||
| 77 | goto failsafe; | ||
| 78 | return buf; | ||
| 79 | |||
| 80 | failsafe: | ||
| 81 | buf = prom_early_alloc(strlen(dp->parent->full_name) + | ||
| 82 | strlen(dp->name) + 16); | ||
| 83 | sprintf(buf, "%s/%s@unknown%i", | ||
| 84 | of_node_is_root(dp->parent) ? "" : dp->parent->full_name, | ||
| 85 | dp->name, failsafe_id++); | ||
| 86 | pr_err("%s: pkg2path failed; assigning %s\n", __func__, buf); | ||
| 87 | return buf; | ||
| 52 | } | 88 | } |
| 53 | 89 | ||
| 54 | #endif /* !CONFIG_SPARC */ | 90 | #endif /* !CONFIG_SPARC */ |
| @@ -132,47 +168,6 @@ static char * __init of_pdt_get_one_property(phandle node, const char *name) | |||
| 132 | return buf; | 168 | return buf; |
| 133 | } | 169 | } |
| 134 | 170 | ||
| 135 | static char * __init of_pdt_try_pkg2path(phandle node) | ||
| 136 | { | ||
| 137 | char *res, *buf = NULL; | ||
| 138 | int len; | ||
| 139 | |||
| 140 | if (!of_pdt_prom_ops->pkg2path) | ||
| 141 | return NULL; | ||
| 142 | |||
| 143 | if (of_pdt_prom_ops->pkg2path(node, buf, 0, &len)) | ||
| 144 | return NULL; | ||
| 145 | buf = prom_early_alloc(len + 1); | ||
| 146 | if (of_pdt_prom_ops->pkg2path(node, buf, len, &len)) { | ||
| 147 | pr_err("%s: package-to-path failed\n", __func__); | ||
| 148 | return NULL; | ||
| 149 | } | ||
| 150 | |||
| 151 | res = strrchr(buf, '/'); | ||
| 152 | if (!res) { | ||
| 153 | pr_err("%s: couldn't find / in %s\n", __func__, buf); | ||
| 154 | return NULL; | ||
| 155 | } | ||
| 156 | return res+1; | ||
| 157 | } | ||
| 158 | |||
| 159 | /* | ||
| 160 | * When fetching the node's name, first try using package-to-path; if | ||
| 161 | * that fails (either because the arch hasn't supplied a PROM callback, | ||
| 162 | * or some other random failure), fall back to just looking at the node's | ||
| 163 | * 'name' property. | ||
| 164 | */ | ||
| 165 | static char * __init of_pdt_build_name(phandle node) | ||
| 166 | { | ||
| 167 | char *buf; | ||
| 168 | |||
| 169 | buf = of_pdt_try_pkg2path(node); | ||
| 170 | if (!buf) | ||
| 171 | buf = of_pdt_get_one_property(node, "name"); | ||
| 172 | |||
| 173 | return buf; | ||
| 174 | } | ||
| 175 | |||
| 176 | static struct device_node * __init of_pdt_create_node(phandle node, | 171 | static struct device_node * __init of_pdt_create_node(phandle node, |
| 177 | struct device_node *parent) | 172 | struct device_node *parent) |
| 178 | { | 173 | { |
| @@ -187,7 +182,7 @@ static struct device_node * __init of_pdt_create_node(phandle node, | |||
| 187 | 182 | ||
| 188 | kref_init(&dp->kref); | 183 | kref_init(&dp->kref); |
| 189 | 184 | ||
| 190 | dp->name = of_pdt_build_name(node); | 185 | dp->name = of_pdt_get_one_property(node, "name"); |
| 191 | dp->type = of_pdt_get_one_property(node, "device_type"); | 186 | dp->type = of_pdt_get_one_property(node, "device_type"); |
| 192 | dp->phandle = node; | 187 | dp->phandle = node; |
| 193 | 188 | ||
| @@ -198,26 +193,6 @@ static struct device_node * __init of_pdt_create_node(phandle node, | |||
| 198 | return dp; | 193 | return dp; |
| 199 | } | 194 | } |
| 200 | 195 | ||
| 201 | static char * __init of_pdt_build_full_name(struct device_node *dp) | ||
| 202 | { | ||
| 203 | int len, ourlen, plen; | ||
| 204 | char *n; | ||
| 205 | |||
| 206 | plen = strlen(dp->parent->full_name); | ||
| 207 | ourlen = strlen(of_pdt_node_name(dp)); | ||
| 208 | len = ourlen + plen + 2; | ||
| 209 | |||
| 210 | n = prom_early_alloc(len); | ||
| 211 | strcpy(n, dp->parent->full_name); | ||
| 212 | if (!of_node_is_root(dp->parent)) { | ||
| 213 | strcpy(n + plen, "/"); | ||
| 214 | plen++; | ||
| 215 | } | ||
| 216 | strcpy(n + plen, of_pdt_node_name(dp)); | ||
| 217 | |||
| 218 | return n; | ||
| 219 | } | ||
| 220 | |||
| 221 | static struct device_node * __init of_pdt_build_tree(struct device_node *parent, | 196 | static struct device_node * __init of_pdt_build_tree(struct device_node *parent, |
| 222 | phandle node, | 197 | phandle node, |
| 223 | struct device_node ***nextp) | 198 | struct device_node ***nextp) |
| @@ -240,9 +215,6 @@ static struct device_node * __init of_pdt_build_tree(struct device_node *parent, | |||
| 240 | *(*nextp) = dp; | 215 | *(*nextp) = dp; |
| 241 | *nextp = &dp->allnext; | 216 | *nextp = &dp->allnext; |
| 242 | 217 | ||
| 243 | #if defined(CONFIG_SPARC) | ||
| 244 | dp->path_component_name = build_path_component(dp); | ||
| 245 | #endif | ||
| 246 | dp->full_name = of_pdt_build_full_name(dp); | 218 | dp->full_name = of_pdt_build_full_name(dp); |
| 247 | 219 | ||
| 248 | dp->child = of_pdt_build_tree(dp, | 220 | dp->child = of_pdt_build_tree(dp, |
diff --git a/drivers/pcmcia/pxa2xx_colibri.c b/drivers/pcmcia/pxa2xx_colibri.c index c3f72192af66..a52039564e74 100644 --- a/drivers/pcmcia/pxa2xx_colibri.c +++ b/drivers/pcmcia/pxa2xx_colibri.c | |||
| @@ -181,6 +181,9 @@ static int __init colibri_pcmcia_init(void) | |||
| 181 | { | 181 | { |
| 182 | int ret; | 182 | int ret; |
| 183 | 183 | ||
| 184 | if (!machine_is_colibri() && !machine_is_colibri320()) | ||
| 185 | return -ENODEV; | ||
| 186 | |||
| 184 | colibri_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); | 187 | colibri_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1); |
| 185 | if (!colibri_pcmcia_device) | 188 | if (!colibri_pcmcia_device) |
| 186 | return -ENOMEM; | 189 | return -ENOMEM; |
diff --git a/drivers/pps/generators/Kconfig b/drivers/pps/generators/Kconfig index f3a73dd77660..e4c4f3dc0728 100644 --- a/drivers/pps/generators/Kconfig +++ b/drivers/pps/generators/Kconfig | |||
| @@ -6,7 +6,7 @@ comment "PPS generators support" | |||
| 6 | 6 | ||
| 7 | config PPS_GENERATOR_PARPORT | 7 | config PPS_GENERATOR_PARPORT |
| 8 | tristate "Parallel port PPS signal generator" | 8 | tristate "Parallel port PPS signal generator" |
| 9 | depends on PARPORT | 9 | depends on PARPORT && BROKEN |
| 10 | help | 10 | help |
| 11 | If you say yes here you get support for a PPS signal generator which | 11 | If you say yes here you get support for a PPS signal generator which |
| 12 | utilizes STROBE pin of a parallel port to send PPS signals. It uses | 12 | utilizes STROBE pin of a parallel port to send PPS signals. It uses |
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index cf953ecbfca9..b80fa2882408 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c | |||
| @@ -77,18 +77,20 @@ static irqreturn_t s3c_rtc_tickirq(int irq, void *id) | |||
| 77 | } | 77 | } |
| 78 | 78 | ||
| 79 | /* Update control registers */ | 79 | /* Update control registers */ |
| 80 | static void s3c_rtc_setaie(int to) | 80 | static int s3c_rtc_setaie(struct device *dev, unsigned int enabled) |
| 81 | { | 81 | { |
| 82 | unsigned int tmp; | 82 | unsigned int tmp; |
| 83 | 83 | ||
| 84 | pr_debug("%s: aie=%d\n", __func__, to); | 84 | pr_debug("%s: aie=%d\n", __func__, enabled); |
| 85 | 85 | ||
| 86 | tmp = readb(s3c_rtc_base + S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN; | 86 | tmp = readb(s3c_rtc_base + S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN; |
| 87 | 87 | ||
| 88 | if (to) | 88 | if (enabled) |
| 89 | tmp |= S3C2410_RTCALM_ALMEN; | 89 | tmp |= S3C2410_RTCALM_ALMEN; |
| 90 | 90 | ||
| 91 | writeb(tmp, s3c_rtc_base + S3C2410_RTCALM); | 91 | writeb(tmp, s3c_rtc_base + S3C2410_RTCALM); |
| 92 | |||
| 93 | return 0; | ||
| 92 | } | 94 | } |
| 93 | 95 | ||
| 94 | static int s3c_rtc_setpie(struct device *dev, int enabled) | 96 | static int s3c_rtc_setpie(struct device *dev, int enabled) |
| @@ -308,7 +310,7 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) | |||
| 308 | 310 | ||
| 309 | writeb(alrm_en, base + S3C2410_RTCALM); | 311 | writeb(alrm_en, base + S3C2410_RTCALM); |
| 310 | 312 | ||
| 311 | s3c_rtc_setaie(alrm->enabled); | 313 | s3c_rtc_setaie(dev, alrm->enabled); |
| 312 | 314 | ||
| 313 | return 0; | 315 | return 0; |
| 314 | } | 316 | } |
| @@ -440,7 +442,7 @@ static int __devexit s3c_rtc_remove(struct platform_device *dev) | |||
| 440 | rtc_device_unregister(rtc); | 442 | rtc_device_unregister(rtc); |
| 441 | 443 | ||
| 442 | s3c_rtc_setpie(&dev->dev, 0); | 444 | s3c_rtc_setpie(&dev->dev, 0); |
| 443 | s3c_rtc_setaie(0); | 445 | s3c_rtc_setaie(&dev->dev, 0); |
| 444 | 446 | ||
| 445 | clk_disable(rtc_clk); | 447 | clk_disable(rtc_clk); |
| 446 | clk_put(rtc_clk); | 448 | clk_put(rtc_clk); |
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index c881a14fa5dd..1f6a4d894e73 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c | |||
| @@ -62,8 +62,8 @@ static int xpram_devs; | |||
| 62 | /* | 62 | /* |
| 63 | * Parameter parsing functions. | 63 | * Parameter parsing functions. |
| 64 | */ | 64 | */ |
| 65 | static int __initdata devs = XPRAM_DEVS; | 65 | static int devs = XPRAM_DEVS; |
| 66 | static char __initdata *sizes[XPRAM_MAX_DEVS]; | 66 | static char *sizes[XPRAM_MAX_DEVS]; |
| 67 | 67 | ||
| 68 | module_param(devs, int, 0); | 68 | module_param(devs, int, 0); |
| 69 | module_param_array(sizes, charp, NULL, 0); | 69 | module_param_array(sizes, charp, NULL, 0); |
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c index 8cd58e412b5e..5ad44daef73b 100644 --- a/drivers/s390/char/keyboard.c +++ b/drivers/s390/char/keyboard.c | |||
| @@ -460,7 +460,8 @@ kbd_ioctl(struct kbd_data *kbd, struct file *file, | |||
| 460 | unsigned int cmd, unsigned long arg) | 460 | unsigned int cmd, unsigned long arg) |
| 461 | { | 461 | { |
| 462 | void __user *argp; | 462 | void __user *argp; |
| 463 | int ct, perm; | 463 | unsigned int ct; |
| 464 | int perm; | ||
| 464 | 465 | ||
| 465 | argp = (void __user *)arg; | 466 | argp = (void __user *)arg; |
| 466 | 467 | ||
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h index 7a242f073632..267b54e8ff5a 100644 --- a/drivers/s390/char/tape.h +++ b/drivers/s390/char/tape.h | |||
| @@ -280,6 +280,14 @@ tape_do_io_free(struct tape_device *device, struct tape_request *request) | |||
| 280 | return rc; | 280 | return rc; |
| 281 | } | 281 | } |
| 282 | 282 | ||
| 283 | static inline void | ||
| 284 | tape_do_io_async_free(struct tape_device *device, struct tape_request *request) | ||
| 285 | { | ||
| 286 | request->callback = (void *) tape_free_request; | ||
| 287 | request->callback_data = NULL; | ||
| 288 | tape_do_io_async(device, request); | ||
| 289 | } | ||
| 290 | |||
| 283 | extern int tape_oper_handler(int irq, int status); | 291 | extern int tape_oper_handler(int irq, int status); |
| 284 | extern void tape_noper_handler(int irq, int status); | 292 | extern void tape_noper_handler(int irq, int status); |
| 285 | extern int tape_open(struct tape_device *); | 293 | extern int tape_open(struct tape_device *); |
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c index c17f35b6136a..c26511171ffe 100644 --- a/drivers/s390/char/tape_34xx.c +++ b/drivers/s390/char/tape_34xx.c | |||
| @@ -53,23 +53,11 @@ static void tape_34xx_delete_sbid_from(struct tape_device *, int); | |||
| 53 | * Medium sense for 34xx tapes. There is no 'real' medium sense call. | 53 | * Medium sense for 34xx tapes. There is no 'real' medium sense call. |
| 54 | * So we just do a normal sense. | 54 | * So we just do a normal sense. |
| 55 | */ | 55 | */ |
| 56 | static int | 56 | static void __tape_34xx_medium_sense(struct tape_request *request) |
| 57 | tape_34xx_medium_sense(struct tape_device *device) | ||
| 58 | { | 57 | { |
| 59 | struct tape_request *request; | 58 | struct tape_device *device = request->device; |
| 60 | unsigned char *sense; | 59 | unsigned char *sense; |
| 61 | int rc; | ||
| 62 | |||
| 63 | request = tape_alloc_request(1, 32); | ||
| 64 | if (IS_ERR(request)) { | ||
| 65 | DBF_EXCEPTION(6, "MSEN fail\n"); | ||
| 66 | return PTR_ERR(request); | ||
| 67 | } | ||
| 68 | |||
| 69 | request->op = TO_MSEN; | ||
| 70 | tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata); | ||
| 71 | 60 | ||
| 72 | rc = tape_do_io_interruptible(device, request); | ||
| 73 | if (request->rc == 0) { | 61 | if (request->rc == 0) { |
| 74 | sense = request->cpdata; | 62 | sense = request->cpdata; |
| 75 | 63 | ||
| @@ -88,15 +76,47 @@ tape_34xx_medium_sense(struct tape_device *device) | |||
| 88 | device->tape_generic_status |= GMT_WR_PROT(~0); | 76 | device->tape_generic_status |= GMT_WR_PROT(~0); |
| 89 | else | 77 | else |
| 90 | device->tape_generic_status &= ~GMT_WR_PROT(~0); | 78 | device->tape_generic_status &= ~GMT_WR_PROT(~0); |
| 91 | } else { | 79 | } else |
| 92 | DBF_EVENT(4, "tape_34xx: medium sense failed with rc=%d\n", | 80 | DBF_EVENT(4, "tape_34xx: medium sense failed with rc=%d\n", |
| 93 | request->rc); | 81 | request->rc); |
| 94 | } | ||
| 95 | tape_free_request(request); | 82 | tape_free_request(request); |
| 83 | } | ||
| 84 | |||
| 85 | static int tape_34xx_medium_sense(struct tape_device *device) | ||
| 86 | { | ||
| 87 | struct tape_request *request; | ||
| 88 | int rc; | ||
| 89 | |||
| 90 | request = tape_alloc_request(1, 32); | ||
| 91 | if (IS_ERR(request)) { | ||
| 92 | DBF_EXCEPTION(6, "MSEN fail\n"); | ||
| 93 | return PTR_ERR(request); | ||
| 94 | } | ||
| 96 | 95 | ||
| 96 | request->op = TO_MSEN; | ||
| 97 | tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata); | ||
| 98 | rc = tape_do_io_interruptible(device, request); | ||
| 99 | __tape_34xx_medium_sense(request); | ||
| 97 | return rc; | 100 | return rc; |
| 98 | } | 101 | } |
| 99 | 102 | ||
| 103 | static void tape_34xx_medium_sense_async(struct tape_device *device) | ||
| 104 | { | ||
| 105 | struct tape_request *request; | ||
| 106 | |||
| 107 | request = tape_alloc_request(1, 32); | ||
| 108 | if (IS_ERR(request)) { | ||
| 109 | DBF_EXCEPTION(6, "MSEN fail\n"); | ||
| 110 | return; | ||
| 111 | } | ||
| 112 | |||
| 113 | request->op = TO_MSEN; | ||
| 114 | tape_ccw_end(request->cpaddr, SENSE, 32, request->cpdata); | ||
| 115 | request->callback = (void *) __tape_34xx_medium_sense; | ||
| 116 | request->callback_data = NULL; | ||
| 117 | tape_do_io_async(device, request); | ||
| 118 | } | ||
| 119 | |||
| 100 | struct tape_34xx_work { | 120 | struct tape_34xx_work { |
| 101 | struct tape_device *device; | 121 | struct tape_device *device; |
| 102 | enum tape_op op; | 122 | enum tape_op op; |
| @@ -109,6 +129,9 @@ struct tape_34xx_work { | |||
| 109 | * is inserted but cannot call tape_do_io* from an interrupt context. | 129 | * is inserted but cannot call tape_do_io* from an interrupt context. |
| 110 | * Maybe that's useful for other actions we want to start from the | 130 | * Maybe that's useful for other actions we want to start from the |
| 111 | * interrupt handler. | 131 | * interrupt handler. |
| 132 | * Note: the work handler is called by the system work queue. The tape | ||
| 133 | * commands started by the handler need to be asynchrounous, otherwise | ||
| 134 | * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq). | ||
| 112 | */ | 135 | */ |
| 113 | static void | 136 | static void |
| 114 | tape_34xx_work_handler(struct work_struct *work) | 137 | tape_34xx_work_handler(struct work_struct *work) |
| @@ -119,7 +142,7 @@ tape_34xx_work_handler(struct work_struct *work) | |||
| 119 | 142 | ||
| 120 | switch(p->op) { | 143 | switch(p->op) { |
| 121 | case TO_MSEN: | 144 | case TO_MSEN: |
| 122 | tape_34xx_medium_sense(device); | 145 | tape_34xx_medium_sense_async(device); |
| 123 | break; | 146 | break; |
| 124 | default: | 147 | default: |
| 125 | DBF_EVENT(3, "T34XX: internal error: unknown work\n"); | 148 | DBF_EVENT(3, "T34XX: internal error: unknown work\n"); |
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c index fbe361fcd2c0..de2e99e0a71b 100644 --- a/drivers/s390/char/tape_3590.c +++ b/drivers/s390/char/tape_3590.c | |||
| @@ -329,17 +329,17 @@ out: | |||
| 329 | /* | 329 | /* |
| 330 | * Enable encryption | 330 | * Enable encryption |
| 331 | */ | 331 | */ |
| 332 | static int tape_3592_enable_crypt(struct tape_device *device) | 332 | static struct tape_request *__tape_3592_enable_crypt(struct tape_device *device) |
| 333 | { | 333 | { |
| 334 | struct tape_request *request; | 334 | struct tape_request *request; |
| 335 | char *data; | 335 | char *data; |
| 336 | 336 | ||
| 337 | DBF_EVENT(6, "tape_3592_enable_crypt\n"); | 337 | DBF_EVENT(6, "tape_3592_enable_crypt\n"); |
| 338 | if (!crypt_supported(device)) | 338 | if (!crypt_supported(device)) |
| 339 | return -ENOSYS; | 339 | return ERR_PTR(-ENOSYS); |
| 340 | request = tape_alloc_request(2, 72); | 340 | request = tape_alloc_request(2, 72); |
| 341 | if (IS_ERR(request)) | 341 | if (IS_ERR(request)) |
| 342 | return PTR_ERR(request); | 342 | return request; |
| 343 | data = request->cpdata; | 343 | data = request->cpdata; |
| 344 | memset(data,0,72); | 344 | memset(data,0,72); |
| 345 | 345 | ||
| @@ -354,23 +354,42 @@ static int tape_3592_enable_crypt(struct tape_device *device) | |||
| 354 | request->op = TO_CRYPT_ON; | 354 | request->op = TO_CRYPT_ON; |
| 355 | tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); | 355 | tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); |
| 356 | tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); | 356 | tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); |
| 357 | return request; | ||
| 358 | } | ||
| 359 | |||
| 360 | static int tape_3592_enable_crypt(struct tape_device *device) | ||
| 361 | { | ||
| 362 | struct tape_request *request; | ||
| 363 | |||
| 364 | request = __tape_3592_enable_crypt(device); | ||
| 365 | if (IS_ERR(request)) | ||
| 366 | return PTR_ERR(request); | ||
| 357 | return tape_do_io_free(device, request); | 367 | return tape_do_io_free(device, request); |
| 358 | } | 368 | } |
| 359 | 369 | ||
| 370 | static void tape_3592_enable_crypt_async(struct tape_device *device) | ||
| 371 | { | ||
| 372 | struct tape_request *request; | ||
| 373 | |||
| 374 | request = __tape_3592_enable_crypt(device); | ||
| 375 | if (!IS_ERR(request)) | ||
| 376 | tape_do_io_async_free(device, request); | ||
| 377 | } | ||
| 378 | |||
| 360 | /* | 379 | /* |
| 361 | * Disable encryption | 380 | * Disable encryption |
| 362 | */ | 381 | */ |
| 363 | static int tape_3592_disable_crypt(struct tape_device *device) | 382 | static struct tape_request *__tape_3592_disable_crypt(struct tape_device *device) |
| 364 | { | 383 | { |
| 365 | struct tape_request *request; | 384 | struct tape_request *request; |
| 366 | char *data; | 385 | char *data; |
| 367 | 386 | ||
| 368 | DBF_EVENT(6, "tape_3592_disable_crypt\n"); | 387 | DBF_EVENT(6, "tape_3592_disable_crypt\n"); |
| 369 | if (!crypt_supported(device)) | 388 | if (!crypt_supported(device)) |
| 370 | return -ENOSYS; | 389 | return ERR_PTR(-ENOSYS); |
| 371 | request = tape_alloc_request(2, 72); | 390 | request = tape_alloc_request(2, 72); |
| 372 | if (IS_ERR(request)) | 391 | if (IS_ERR(request)) |
| 373 | return PTR_ERR(request); | 392 | return request; |
| 374 | data = request->cpdata; | 393 | data = request->cpdata; |
| 375 | memset(data,0,72); | 394 | memset(data,0,72); |
| 376 | 395 | ||
| @@ -383,9 +402,28 @@ static int tape_3592_disable_crypt(struct tape_device *device) | |||
| 383 | tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); | 402 | tape_ccw_cc(request->cpaddr, MODE_SET_CB, 36, data); |
| 384 | tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); | 403 | tape_ccw_end(request->cpaddr + 1, MODE_SET_CB, 36, data + 36); |
| 385 | 404 | ||
| 405 | return request; | ||
| 406 | } | ||
| 407 | |||
| 408 | static int tape_3592_disable_crypt(struct tape_device *device) | ||
| 409 | { | ||
| 410 | struct tape_request *request; | ||
| 411 | |||
| 412 | request = __tape_3592_disable_crypt(device); | ||
| 413 | if (IS_ERR(request)) | ||
| 414 | return PTR_ERR(request); | ||
| 386 | return tape_do_io_free(device, request); | 415 | return tape_do_io_free(device, request); |
| 387 | } | 416 | } |
| 388 | 417 | ||
| 418 | static void tape_3592_disable_crypt_async(struct tape_device *device) | ||
| 419 | { | ||
| 420 | struct tape_request *request; | ||
| 421 | |||
| 422 | request = __tape_3592_disable_crypt(device); | ||
| 423 | if (!IS_ERR(request)) | ||
| 424 | tape_do_io_async_free(device, request); | ||
| 425 | } | ||
| 426 | |||
| 389 | /* | 427 | /* |
| 390 | * IOCTL: Set encryption status | 428 | * IOCTL: Set encryption status |
| 391 | */ | 429 | */ |
| @@ -457,8 +495,7 @@ tape_3590_ioctl(struct tape_device *device, unsigned int cmd, unsigned long arg) | |||
| 457 | /* | 495 | /* |
| 458 | * SENSE Medium: Get Sense data about medium state | 496 | * SENSE Medium: Get Sense data about medium state |
| 459 | */ | 497 | */ |
| 460 | static int | 498 | static int tape_3590_sense_medium(struct tape_device *device) |
| 461 | tape_3590_sense_medium(struct tape_device *device) | ||
| 462 | { | 499 | { |
| 463 | struct tape_request *request; | 500 | struct tape_request *request; |
| 464 | 501 | ||
| @@ -470,6 +507,18 @@ tape_3590_sense_medium(struct tape_device *device) | |||
| 470 | return tape_do_io_free(device, request); | 507 | return tape_do_io_free(device, request); |
| 471 | } | 508 | } |
| 472 | 509 | ||
| 510 | static void tape_3590_sense_medium_async(struct tape_device *device) | ||
| 511 | { | ||
| 512 | struct tape_request *request; | ||
| 513 | |||
| 514 | request = tape_alloc_request(1, 128); | ||
| 515 | if (IS_ERR(request)) | ||
| 516 | return; | ||
| 517 | request->op = TO_MSEN; | ||
| 518 | tape_ccw_end(request->cpaddr, MEDIUM_SENSE, 128, request->cpdata); | ||
| 519 | tape_do_io_async_free(device, request); | ||
| 520 | } | ||
| 521 | |||
| 473 | /* | 522 | /* |
| 474 | * MTTELL: Tell block. Return the number of block relative to current file. | 523 | * MTTELL: Tell block. Return the number of block relative to current file. |
| 475 | */ | 524 | */ |
| @@ -546,15 +595,14 @@ tape_3590_read_opposite(struct tape_device *device, | |||
| 546 | * 2. The attention msg is written to the "read subsystem data" buffer. | 595 | * 2. The attention msg is written to the "read subsystem data" buffer. |
| 547 | * In this case we probably should print it to the console. | 596 | * In this case we probably should print it to the console. |
| 548 | */ | 597 | */ |
| 549 | static int | 598 | static void tape_3590_read_attmsg_async(struct tape_device *device) |
| 550 | tape_3590_read_attmsg(struct tape_device *device) | ||
| 551 | { | 599 | { |
| 552 | struct tape_request *request; | 600 | struct tape_request *request; |
| 553 | char *buf; | 601 | char *buf; |
| 554 | 602 | ||
| 555 | request = tape_alloc_request(3, 4096); | 603 | request = tape_alloc_request(3, 4096); |
| 556 | if (IS_ERR(request)) | 604 | if (IS_ERR(request)) |
| 557 | return PTR_ERR(request); | 605 | return; |
| 558 | request->op = TO_READ_ATTMSG; | 606 | request->op = TO_READ_ATTMSG; |
| 559 | buf = request->cpdata; | 607 | buf = request->cpdata; |
| 560 | buf[0] = PREP_RD_SS_DATA; | 608 | buf[0] = PREP_RD_SS_DATA; |
| @@ -562,12 +610,15 @@ tape_3590_read_attmsg(struct tape_device *device) | |||
| 562 | tape_ccw_cc(request->cpaddr, PERFORM_SS_FUNC, 12, buf); | 610 | tape_ccw_cc(request->cpaddr, PERFORM_SS_FUNC, 12, buf); |
| 563 | tape_ccw_cc(request->cpaddr + 1, READ_SS_DATA, 4096 - 12, buf + 12); | 611 | tape_ccw_cc(request->cpaddr + 1, READ_SS_DATA, 4096 - 12, buf + 12); |
| 564 | tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); | 612 | tape_ccw_end(request->cpaddr + 2, NOP, 0, NULL); |
| 565 | return tape_do_io_free(device, request); | 613 | tape_do_io_async_free(device, request); |
| 566 | } | 614 | } |
| 567 | 615 | ||
| 568 | /* | 616 | /* |
| 569 | * These functions are used to schedule follow-up actions from within an | 617 | * These functions are used to schedule follow-up actions from within an |
| 570 | * interrupt context (like unsolicited interrupts). | 618 | * interrupt context (like unsolicited interrupts). |
| 619 | * Note: the work handler is called by the system work queue. The tape | ||
| 620 | * commands started by the handler need to be asynchrounous, otherwise | ||
| 621 | * a deadlock can occur e.g. in case of a deferred cc=1 (see __tape_do_irq). | ||
| 571 | */ | 622 | */ |
| 572 | struct work_handler_data { | 623 | struct work_handler_data { |
| 573 | struct tape_device *device; | 624 | struct tape_device *device; |
| @@ -583,16 +634,16 @@ tape_3590_work_handler(struct work_struct *work) | |||
| 583 | 634 | ||
| 584 | switch (p->op) { | 635 | switch (p->op) { |
| 585 | case TO_MSEN: | 636 | case TO_MSEN: |
| 586 | tape_3590_sense_medium(p->device); | 637 | tape_3590_sense_medium_async(p->device); |
| 587 | break; | 638 | break; |
| 588 | case TO_READ_ATTMSG: | 639 | case TO_READ_ATTMSG: |
| 589 | tape_3590_read_attmsg(p->device); | 640 | tape_3590_read_attmsg_async(p->device); |
| 590 | break; | 641 | break; |
| 591 | case TO_CRYPT_ON: | 642 | case TO_CRYPT_ON: |
| 592 | tape_3592_enable_crypt(p->device); | 643 | tape_3592_enable_crypt_async(p->device); |
| 593 | break; | 644 | break; |
| 594 | case TO_CRYPT_OFF: | 645 | case TO_CRYPT_OFF: |
| 595 | tape_3592_disable_crypt(p->device); | 646 | tape_3592_disable_crypt_async(p->device); |
| 596 | break; | 647 | break; |
| 597 | default: | 648 | default: |
| 598 | DBF_EVENT(3, "T3590: work handler undefined for " | 649 | DBF_EVENT(3, "T3590: work handler undefined for " |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 9045c52abd25..fb2bb35c62cb 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
| @@ -443,7 +443,7 @@ static void scsi_run_queue(struct request_queue *q) | |||
| 443 | &sdev->request_queue->queue_flags); | 443 | &sdev->request_queue->queue_flags); |
| 444 | if (flagset) | 444 | if (flagset) |
| 445 | queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); | 445 | queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); |
| 446 | __blk_run_queue(sdev->request_queue); | 446 | __blk_run_queue(sdev->request_queue, false); |
| 447 | if (flagset) | 447 | if (flagset) |
| 448 | queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); | 448 | queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); |
| 449 | spin_unlock(sdev->request_queue->queue_lock); | 449 | spin_unlock(sdev->request_queue->queue_lock); |
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c index 998c01be3234..5c3ccfc6b622 100644 --- a/drivers/scsi/scsi_transport_fc.c +++ b/drivers/scsi/scsi_transport_fc.c | |||
| @@ -3829,7 +3829,7 @@ fc_bsg_goose_queue(struct fc_rport *rport) | |||
| 3829 | !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags); | 3829 | !test_bit(QUEUE_FLAG_REENTER, &rport->rqst_q->queue_flags); |
| 3830 | if (flagset) | 3830 | if (flagset) |
| 3831 | queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q); | 3831 | queue_flag_set(QUEUE_FLAG_REENTER, rport->rqst_q); |
| 3832 | __blk_run_queue(rport->rqst_q); | 3832 | __blk_run_queue(rport->rqst_q, false); |
| 3833 | if (flagset) | 3833 | if (flagset) |
| 3834 | queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q); | 3834 | queue_flag_clear(QUEUE_FLAG_REENTER, rport->rqst_q); |
| 3835 | spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags); | 3835 | spin_unlock_irqrestore(rport->rqst_q->queue_lock, flags); |
diff --git a/drivers/tty/serial/serial_cs.c b/drivers/tty/serial/serial_cs.c index 93760b2ea172..1ef4df9bf7e4 100644 --- a/drivers/tty/serial/serial_cs.c +++ b/drivers/tty/serial/serial_cs.c | |||
| @@ -712,6 +712,7 @@ static struct pcmcia_device_id serial_ids[] = { | |||
| 712 | PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf), | 712 | PCMCIA_PFC_DEVICE_PROD_ID12(1, "Xircom", "CreditCard Ethernet+Modem II", 0x2e3ee845, 0xeca401bf), |
| 713 | PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0e01), | 713 | PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0e01), |
| 714 | PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05), | 714 | PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0a05), |
| 715 | PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x0b05), | ||
| 715 | PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101), | 716 | PCMCIA_PFC_DEVICE_MANF_CARD(1, 0x0032, 0x1101), |
| 716 | PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070), | 717 | PCMCIA_MFC_DEVICE_MANF_CARD(0, 0x0104, 0x0070), |
| 717 | PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0101, 0x0562), | 718 | PCMCIA_MFC_DEVICE_MANF_CARD(1, 0x0101, 0x0562), |
diff --git a/drivers/usb/gadget/f_phonet.c b/drivers/usb/gadget/f_phonet.c index 3c6e1a058745..5e1495097ec3 100644 --- a/drivers/usb/gadget/f_phonet.c +++ b/drivers/usb/gadget/f_phonet.c | |||
| @@ -346,14 +346,19 @@ static void pn_rx_complete(struct usb_ep *ep, struct usb_request *req) | |||
| 346 | 346 | ||
| 347 | if (unlikely(!skb)) | 347 | if (unlikely(!skb)) |
| 348 | break; | 348 | break; |
| 349 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, 0, | ||
| 350 | req->actual); | ||
| 351 | page = NULL; | ||
| 352 | 349 | ||
| 353 | if (req->actual < req->length) { /* Last fragment */ | 350 | if (skb->len == 0) { /* First fragment */ |
| 354 | skb->protocol = htons(ETH_P_PHONET); | 351 | skb->protocol = htons(ETH_P_PHONET); |
| 355 | skb_reset_mac_header(skb); | 352 | skb_reset_mac_header(skb); |
| 356 | pskb_pull(skb, 1); | 353 | /* Can't use pskb_pull() on page in IRQ */ |
| 354 | memcpy(skb_put(skb, 1), page_address(page), 1); | ||
| 355 | } | ||
| 356 | |||
| 357 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, | ||
| 358 | skb->len == 0, req->actual); | ||
| 359 | page = NULL; | ||
| 360 | |||
| 361 | if (req->actual < req->length) { /* Last fragment */ | ||
| 357 | skb->dev = dev; | 362 | skb->dev = dev; |
| 358 | dev->stats.rx_packets++; | 363 | dev->stats.rx_packets++; |
| 359 | dev->stats.rx_bytes += skb->len; | 364 | dev->stats.rx_bytes += skb->len; |
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c index e8f4f36fdf0b..a6f21b891f68 100644 --- a/drivers/usb/host/ehci-xilinx-of.c +++ b/drivers/usb/host/ehci-xilinx-of.c | |||
| @@ -29,6 +29,7 @@ | |||
| 29 | 29 | ||
| 30 | #include <linux/of.h> | 30 | #include <linux/of.h> |
| 31 | #include <linux/of_platform.h> | 31 | #include <linux/of_platform.h> |
| 32 | #include <linux/of_address.h> | ||
| 32 | 33 | ||
| 33 | /** | 34 | /** |
| 34 | * ehci_xilinx_of_setup - Initialize the device for ehci_reset() | 35 | * ehci_xilinx_of_setup - Initialize the device for ehci_reset() |
diff --git a/drivers/video/backlight/ltv350qv.c b/drivers/video/backlight/ltv350qv.c index 8010aaeb5adb..dd0e84a9bd2f 100644 --- a/drivers/video/backlight/ltv350qv.c +++ b/drivers/video/backlight/ltv350qv.c | |||
| @@ -239,11 +239,15 @@ static int __devinit ltv350qv_probe(struct spi_device *spi) | |||
| 239 | lcd->spi = spi; | 239 | lcd->spi = spi; |
| 240 | lcd->power = FB_BLANK_POWERDOWN; | 240 | lcd->power = FB_BLANK_POWERDOWN; |
| 241 | lcd->buffer = kzalloc(8, GFP_KERNEL); | 241 | lcd->buffer = kzalloc(8, GFP_KERNEL); |
| 242 | if (!lcd->buffer) { | ||
| 243 | ret = -ENOMEM; | ||
| 244 | goto out_free_lcd; | ||
| 245 | } | ||
| 242 | 246 | ||
| 243 | ld = lcd_device_register("ltv350qv", &spi->dev, lcd, <v_ops); | 247 | ld = lcd_device_register("ltv350qv", &spi->dev, lcd, <v_ops); |
| 244 | if (IS_ERR(ld)) { | 248 | if (IS_ERR(ld)) { |
| 245 | ret = PTR_ERR(ld); | 249 | ret = PTR_ERR(ld); |
| 246 | goto out_free_lcd; | 250 | goto out_free_buffer; |
| 247 | } | 251 | } |
| 248 | lcd->ld = ld; | 252 | lcd->ld = ld; |
| 249 | 253 | ||
| @@ -257,6 +261,8 @@ static int __devinit ltv350qv_probe(struct spi_device *spi) | |||
| 257 | 261 | ||
| 258 | out_unregister: | 262 | out_unregister: |
| 259 | lcd_device_unregister(ld); | 263 | lcd_device_unregister(ld); |
| 264 | out_free_buffer: | ||
| 265 | kfree(lcd->buffer); | ||
| 260 | out_free_lcd: | 266 | out_free_lcd: |
| 261 | kfree(lcd); | 267 | kfree(lcd); |
| 262 | return ret; | 268 | return ret; |
| @@ -268,6 +274,7 @@ static int __devexit ltv350qv_remove(struct spi_device *spi) | |||
| 268 | 274 | ||
| 269 | ltv350qv_power(lcd, FB_BLANK_POWERDOWN); | 275 | ltv350qv_power(lcd, FB_BLANK_POWERDOWN); |
| 270 | lcd_device_unregister(lcd->ld); | 276 | lcd_device_unregister(lcd->ld); |
| 277 | kfree(lcd->buffer); | ||
| 271 | kfree(lcd); | 278 | kfree(lcd); |
| 272 | 279 | ||
| 273 | return 0; | 280 | return 0; |
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index f0aef787a102..099a58615b90 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c | |||
| @@ -60,7 +60,6 @@ int ceph_init_dentry(struct dentry *dentry) | |||
| 60 | } | 60 | } |
| 61 | di->dentry = dentry; | 61 | di->dentry = dentry; |
| 62 | di->lease_session = NULL; | 62 | di->lease_session = NULL; |
| 63 | di->parent_inode = igrab(dentry->d_parent->d_inode); | ||
| 64 | dentry->d_fsdata = di; | 63 | dentry->d_fsdata = di; |
| 65 | dentry->d_time = jiffies; | 64 | dentry->d_time = jiffies; |
| 66 | ceph_dentry_lru_add(dentry); | 65 | ceph_dentry_lru_add(dentry); |
| @@ -410,7 +409,7 @@ more: | |||
| 410 | spin_lock(&inode->i_lock); | 409 | spin_lock(&inode->i_lock); |
| 411 | if (ci->i_release_count == fi->dir_release_count) { | 410 | if (ci->i_release_count == fi->dir_release_count) { |
| 412 | dout(" marking %p complete\n", inode); | 411 | dout(" marking %p complete\n", inode); |
| 413 | ci->i_ceph_flags |= CEPH_I_COMPLETE; | 412 | /* ci->i_ceph_flags |= CEPH_I_COMPLETE; */ |
| 414 | ci->i_max_offset = filp->f_pos; | 413 | ci->i_max_offset = filp->f_pos; |
| 415 | } | 414 | } |
| 416 | spin_unlock(&inode->i_lock); | 415 | spin_unlock(&inode->i_lock); |
| @@ -497,6 +496,7 @@ struct dentry *ceph_finish_lookup(struct ceph_mds_request *req, | |||
| 497 | 496 | ||
| 498 | /* .snap dir? */ | 497 | /* .snap dir? */ |
| 499 | if (err == -ENOENT && | 498 | if (err == -ENOENT && |
| 499 | ceph_snap(parent) == CEPH_NOSNAP && | ||
| 500 | strcmp(dentry->d_name.name, | 500 | strcmp(dentry->d_name.name, |
| 501 | fsc->mount_options->snapdir_name) == 0) { | 501 | fsc->mount_options->snapdir_name) == 0) { |
| 502 | struct inode *inode = ceph_get_snapdir(parent); | 502 | struct inode *inode = ceph_get_snapdir(parent); |
| @@ -1030,28 +1030,8 @@ out_touch: | |||
| 1030 | static void ceph_dentry_release(struct dentry *dentry) | 1030 | static void ceph_dentry_release(struct dentry *dentry) |
| 1031 | { | 1031 | { |
| 1032 | struct ceph_dentry_info *di = ceph_dentry(dentry); | 1032 | struct ceph_dentry_info *di = ceph_dentry(dentry); |
| 1033 | struct inode *parent_inode = NULL; | ||
| 1034 | u64 snapid = CEPH_NOSNAP; | ||
| 1035 | 1033 | ||
| 1036 | if (!IS_ROOT(dentry)) { | 1034 | dout("dentry_release %p\n", dentry); |
| 1037 | parent_inode = di->parent_inode; | ||
| 1038 | if (parent_inode) | ||
| 1039 | snapid = ceph_snap(parent_inode); | ||
| 1040 | } | ||
| 1041 | dout("dentry_release %p parent %p\n", dentry, parent_inode); | ||
| 1042 | if (parent_inode && snapid != CEPH_SNAPDIR) { | ||
| 1043 | struct ceph_inode_info *ci = ceph_inode(parent_inode); | ||
| 1044 | |||
| 1045 | spin_lock(&parent_inode->i_lock); | ||
| 1046 | if (ci->i_shared_gen == di->lease_shared_gen || | ||
| 1047 | snapid <= CEPH_MAXSNAP) { | ||
| 1048 | dout(" clearing %p complete (d_release)\n", | ||
| 1049 | parent_inode); | ||
| 1050 | ci->i_ceph_flags &= ~CEPH_I_COMPLETE; | ||
| 1051 | ci->i_release_count++; | ||
| 1052 | } | ||
| 1053 | spin_unlock(&parent_inode->i_lock); | ||
| 1054 | } | ||
| 1055 | if (di) { | 1035 | if (di) { |
| 1056 | ceph_dentry_lru_del(dentry); | 1036 | ceph_dentry_lru_del(dentry); |
| 1057 | if (di->lease_session) | 1037 | if (di->lease_session) |
| @@ -1059,8 +1039,6 @@ static void ceph_dentry_release(struct dentry *dentry) | |||
| 1059 | kmem_cache_free(ceph_dentry_cachep, di); | 1039 | kmem_cache_free(ceph_dentry_cachep, di); |
| 1060 | dentry->d_fsdata = NULL; | 1040 | dentry->d_fsdata = NULL; |
| 1061 | } | 1041 | } |
| 1062 | if (parent_inode) | ||
| 1063 | iput(parent_inode); | ||
| 1064 | } | 1042 | } |
| 1065 | 1043 | ||
| 1066 | static int ceph_snapdir_d_revalidate(struct dentry *dentry, | 1044 | static int ceph_snapdir_d_revalidate(struct dentry *dentry, |
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c index 5625463aa479..193bfa5e9cbd 100644 --- a/fs/ceph/inode.c +++ b/fs/ceph/inode.c | |||
| @@ -707,7 +707,7 @@ static int fill_inode(struct inode *inode, | |||
| 707 | (issued & CEPH_CAP_FILE_EXCL) == 0 && | 707 | (issued & CEPH_CAP_FILE_EXCL) == 0 && |
| 708 | (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) { | 708 | (ci->i_ceph_flags & CEPH_I_COMPLETE) == 0) { |
| 709 | dout(" marking %p complete (empty)\n", inode); | 709 | dout(" marking %p complete (empty)\n", inode); |
| 710 | ci->i_ceph_flags |= CEPH_I_COMPLETE; | 710 | /* ci->i_ceph_flags |= CEPH_I_COMPLETE; */ |
| 711 | ci->i_max_offset = 2; | 711 | ci->i_max_offset = 2; |
| 712 | } | 712 | } |
| 713 | break; | 713 | break; |
diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 88fcaa21b801..20b907d76ae2 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h | |||
| @@ -207,7 +207,6 @@ struct ceph_dentry_info { | |||
| 207 | struct dentry *dentry; | 207 | struct dentry *dentry; |
| 208 | u64 time; | 208 | u64 time; |
| 209 | u64 offset; | 209 | u64 offset; |
| 210 | struct inode *parent_inode; | ||
| 211 | }; | 210 | }; |
| 212 | 211 | ||
| 213 | struct ceph_inode_xattrs_info { | 212 | struct ceph_inode_xattrs_info { |
diff --git a/fs/exofs/namei.c b/fs/exofs/namei.c index 264e95d02830..4d70db110cfc 100644 --- a/fs/exofs/namei.c +++ b/fs/exofs/namei.c | |||
| @@ -272,7 +272,6 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
| 272 | new_de = exofs_find_entry(new_dir, new_dentry, &new_page); | 272 | new_de = exofs_find_entry(new_dir, new_dentry, &new_page); |
| 273 | if (!new_de) | 273 | if (!new_de) |
| 274 | goto out_dir; | 274 | goto out_dir; |
| 275 | inode_inc_link_count(old_inode); | ||
| 276 | err = exofs_set_link(new_dir, new_de, new_page, old_inode); | 275 | err = exofs_set_link(new_dir, new_de, new_page, old_inode); |
| 277 | new_inode->i_ctime = CURRENT_TIME; | 276 | new_inode->i_ctime = CURRENT_TIME; |
| 278 | if (dir_de) | 277 | if (dir_de) |
| @@ -286,12 +285,9 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
| 286 | if (new_dir->i_nlink >= EXOFS_LINK_MAX) | 285 | if (new_dir->i_nlink >= EXOFS_LINK_MAX) |
| 287 | goto out_dir; | 286 | goto out_dir; |
| 288 | } | 287 | } |
| 289 | inode_inc_link_count(old_inode); | ||
| 290 | err = exofs_add_link(new_dentry, old_inode); | 288 | err = exofs_add_link(new_dentry, old_inode); |
| 291 | if (err) { | 289 | if (err) |
| 292 | inode_dec_link_count(old_inode); | ||
| 293 | goto out_dir; | 290 | goto out_dir; |
| 294 | } | ||
| 295 | if (dir_de) | 291 | if (dir_de) |
| 296 | inode_inc_link_count(new_dir); | 292 | inode_inc_link_count(new_dir); |
| 297 | } | 293 | } |
| @@ -299,7 +295,7 @@ static int exofs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
| 299 | old_inode->i_ctime = CURRENT_TIME; | 295 | old_inode->i_ctime = CURRENT_TIME; |
| 300 | 296 | ||
| 301 | exofs_delete_entry(old_de, old_page); | 297 | exofs_delete_entry(old_de, old_page); |
| 302 | inode_dec_link_count(old_inode); | 298 | mark_inode_dirty(old_inode); |
| 303 | 299 | ||
| 304 | if (dir_de) { | 300 | if (dir_de) { |
| 305 | err = exofs_set_link(old_inode, dir_de, dir_page, new_dir); | 301 | err = exofs_set_link(old_inode, dir_de, dir_page, new_dir); |
diff --git a/fs/ext2/namei.c b/fs/ext2/namei.c index 2e1d8341d827..adb91855ccd0 100644 --- a/fs/ext2/namei.c +++ b/fs/ext2/namei.c | |||
| @@ -344,7 +344,6 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, | |||
| 344 | new_de = ext2_find_entry (new_dir, &new_dentry->d_name, &new_page); | 344 | new_de = ext2_find_entry (new_dir, &new_dentry->d_name, &new_page); |
| 345 | if (!new_de) | 345 | if (!new_de) |
| 346 | goto out_dir; | 346 | goto out_dir; |
| 347 | inode_inc_link_count(old_inode); | ||
| 348 | ext2_set_link(new_dir, new_de, new_page, old_inode, 1); | 347 | ext2_set_link(new_dir, new_de, new_page, old_inode, 1); |
| 349 | new_inode->i_ctime = CURRENT_TIME_SEC; | 348 | new_inode->i_ctime = CURRENT_TIME_SEC; |
| 350 | if (dir_de) | 349 | if (dir_de) |
| @@ -356,12 +355,9 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, | |||
| 356 | if (new_dir->i_nlink >= EXT2_LINK_MAX) | 355 | if (new_dir->i_nlink >= EXT2_LINK_MAX) |
| 357 | goto out_dir; | 356 | goto out_dir; |
| 358 | } | 357 | } |
| 359 | inode_inc_link_count(old_inode); | ||
| 360 | err = ext2_add_link(new_dentry, old_inode); | 358 | err = ext2_add_link(new_dentry, old_inode); |
| 361 | if (err) { | 359 | if (err) |
| 362 | inode_dec_link_count(old_inode); | ||
| 363 | goto out_dir; | 360 | goto out_dir; |
| 364 | } | ||
| 365 | if (dir_de) | 361 | if (dir_de) |
| 366 | inode_inc_link_count(new_dir); | 362 | inode_inc_link_count(new_dir); |
| 367 | } | 363 | } |
| @@ -369,12 +365,11 @@ static int ext2_rename (struct inode * old_dir, struct dentry * old_dentry, | |||
| 369 | /* | 365 | /* |
| 370 | * Like most other Unix systems, set the ctime for inodes on a | 366 | * Like most other Unix systems, set the ctime for inodes on a |
| 371 | * rename. | 367 | * rename. |
| 372 | * inode_dec_link_count() will mark the inode dirty. | ||
| 373 | */ | 368 | */ |
| 374 | old_inode->i_ctime = CURRENT_TIME_SEC; | 369 | old_inode->i_ctime = CURRENT_TIME_SEC; |
| 370 | mark_inode_dirty(old_inode); | ||
| 375 | 371 | ||
| 376 | ext2_delete_entry (old_de, old_page); | 372 | ext2_delete_entry (old_de, old_page); |
| 377 | inode_dec_link_count(old_inode); | ||
| 378 | 373 | ||
| 379 | if (dir_de) { | 374 | if (dir_de) { |
| 380 | if (old_dir != new_dir) | 375 | if (old_dir != new_dir) |
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c index afa66aaa2237..b4d70b13be92 100644 --- a/fs/hfs/dir.c +++ b/fs/hfs/dir.c | |||
| @@ -238,46 +238,22 @@ static int hfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
| 238 | } | 238 | } |
| 239 | 239 | ||
| 240 | /* | 240 | /* |
| 241 | * hfs_unlink() | 241 | * hfs_remove() |
| 242 | * | 242 | * |
| 243 | * This is the unlink() entry in the inode_operations structure for | 243 | * This serves as both unlink() and rmdir() in the inode_operations |
| 244 | * regular HFS directories. The purpose is to delete an existing | 244 | * structure for regular HFS directories. The purpose is to delete |
| 245 | * file, given the inode for the parent directory and the name | 245 | * an existing child, given the inode for the parent directory and |
| 246 | * (and its length) of the existing file. | 246 | * the name (and its length) of the existing directory. |
| 247 | */ | ||
| 248 | static int hfs_unlink(struct inode *dir, struct dentry *dentry) | ||
| 249 | { | ||
| 250 | struct inode *inode; | ||
| 251 | int res; | ||
| 252 | |||
| 253 | inode = dentry->d_inode; | ||
| 254 | res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name); | ||
| 255 | if (res) | ||
| 256 | return res; | ||
| 257 | |||
| 258 | drop_nlink(inode); | ||
| 259 | hfs_delete_inode(inode); | ||
| 260 | inode->i_ctime = CURRENT_TIME_SEC; | ||
| 261 | mark_inode_dirty(inode); | ||
| 262 | |||
| 263 | return res; | ||
| 264 | } | ||
| 265 | |||
| 266 | /* | ||
| 267 | * hfs_rmdir() | ||
| 268 | * | 247 | * |
| 269 | * This is the rmdir() entry in the inode_operations structure for | 248 | * HFS does not have hardlinks, so both rmdir and unlink set the |
| 270 | * regular HFS directories. The purpose is to delete an existing | 249 | * link count to 0. The only difference is the emptiness check. |
| 271 | * directory, given the inode for the parent directory and the name | ||
| 272 | * (and its length) of the existing directory. | ||
| 273 | */ | 250 | */ |
| 274 | static int hfs_rmdir(struct inode *dir, struct dentry *dentry) | 251 | static int hfs_remove(struct inode *dir, struct dentry *dentry) |
| 275 | { | 252 | { |
| 276 | struct inode *inode; | 253 | struct inode *inode = dentry->d_inode; |
| 277 | int res; | 254 | int res; |
| 278 | 255 | ||
| 279 | inode = dentry->d_inode; | 256 | if (S_ISDIR(inode->i_mode) && inode->i_size != 2) |
| 280 | if (inode->i_size != 2) | ||
| 281 | return -ENOTEMPTY; | 257 | return -ENOTEMPTY; |
| 282 | res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name); | 258 | res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name); |
| 283 | if (res) | 259 | if (res) |
| @@ -307,7 +283,7 @@ static int hfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
| 307 | 283 | ||
| 308 | /* Unlink destination if it already exists */ | 284 | /* Unlink destination if it already exists */ |
| 309 | if (new_dentry->d_inode) { | 285 | if (new_dentry->d_inode) { |
| 310 | res = hfs_unlink(new_dir, new_dentry); | 286 | res = hfs_remove(new_dir, new_dentry); |
| 311 | if (res) | 287 | if (res) |
| 312 | return res; | 288 | return res; |
| 313 | } | 289 | } |
| @@ -332,9 +308,9 @@ const struct file_operations hfs_dir_operations = { | |||
| 332 | const struct inode_operations hfs_dir_inode_operations = { | 308 | const struct inode_operations hfs_dir_inode_operations = { |
| 333 | .create = hfs_create, | 309 | .create = hfs_create, |
| 334 | .lookup = hfs_lookup, | 310 | .lookup = hfs_lookup, |
| 335 | .unlink = hfs_unlink, | 311 | .unlink = hfs_remove, |
| 336 | .mkdir = hfs_mkdir, | 312 | .mkdir = hfs_mkdir, |
| 337 | .rmdir = hfs_rmdir, | 313 | .rmdir = hfs_remove, |
| 338 | .rename = hfs_rename, | 314 | .rename = hfs_rename, |
| 339 | .setattr = hfs_inode_setattr, | 315 | .setattr = hfs_inode_setattr, |
| 340 | }; | 316 | }; |
diff --git a/fs/minix/namei.c b/fs/minix/namei.c index ce7337ddfdbf..6e6777f1b4b2 100644 --- a/fs/minix/namei.c +++ b/fs/minix/namei.c | |||
| @@ -213,7 +213,6 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry, | |||
| 213 | new_de = minix_find_entry(new_dentry, &new_page); | 213 | new_de = minix_find_entry(new_dentry, &new_page); |
| 214 | if (!new_de) | 214 | if (!new_de) |
| 215 | goto out_dir; | 215 | goto out_dir; |
| 216 | inode_inc_link_count(old_inode); | ||
| 217 | minix_set_link(new_de, new_page, old_inode); | 216 | minix_set_link(new_de, new_page, old_inode); |
| 218 | new_inode->i_ctime = CURRENT_TIME_SEC; | 217 | new_inode->i_ctime = CURRENT_TIME_SEC; |
| 219 | if (dir_de) | 218 | if (dir_de) |
| @@ -225,18 +224,15 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry, | |||
| 225 | if (new_dir->i_nlink >= info->s_link_max) | 224 | if (new_dir->i_nlink >= info->s_link_max) |
| 226 | goto out_dir; | 225 | goto out_dir; |
| 227 | } | 226 | } |
| 228 | inode_inc_link_count(old_inode); | ||
| 229 | err = minix_add_link(new_dentry, old_inode); | 227 | err = minix_add_link(new_dentry, old_inode); |
| 230 | if (err) { | 228 | if (err) |
| 231 | inode_dec_link_count(old_inode); | ||
| 232 | goto out_dir; | 229 | goto out_dir; |
| 233 | } | ||
| 234 | if (dir_de) | 230 | if (dir_de) |
| 235 | inode_inc_link_count(new_dir); | 231 | inode_inc_link_count(new_dir); |
| 236 | } | 232 | } |
| 237 | 233 | ||
| 238 | minix_delete_entry(old_de, old_page); | 234 | minix_delete_entry(old_de, old_page); |
| 239 | inode_dec_link_count(old_inode); | 235 | mark_inode_dirty(old_inode); |
| 240 | 236 | ||
| 241 | if (dir_de) { | 237 | if (dir_de) { |
| 242 | minix_set_link(dir_de, dir_page, new_dir); | 238 | minix_set_link(dir_de, dir_page, new_dir); |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 78936a8f40ab..1ff76acc7e98 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
| @@ -51,6 +51,7 @@ | |||
| 51 | #include <linux/sunrpc/bc_xprt.h> | 51 | #include <linux/sunrpc/bc_xprt.h> |
| 52 | #include <linux/xattr.h> | 52 | #include <linux/xattr.h> |
| 53 | #include <linux/utsname.h> | 53 | #include <linux/utsname.h> |
| 54 | #include <linux/mm.h> | ||
| 54 | 55 | ||
| 55 | #include "nfs4_fs.h" | 56 | #include "nfs4_fs.h" |
| 56 | #include "delegation.h" | 57 | #include "delegation.h" |
| @@ -3252,6 +3253,35 @@ static void buf_to_pages(const void *buf, size_t buflen, | |||
| 3252 | } | 3253 | } |
| 3253 | } | 3254 | } |
| 3254 | 3255 | ||
| 3256 | static int buf_to_pages_noslab(const void *buf, size_t buflen, | ||
| 3257 | struct page **pages, unsigned int *pgbase) | ||
| 3258 | { | ||
| 3259 | struct page *newpage, **spages; | ||
| 3260 | int rc = 0; | ||
| 3261 | size_t len; | ||
| 3262 | spages = pages; | ||
| 3263 | |||
| 3264 | do { | ||
| 3265 | len = min(PAGE_CACHE_SIZE, buflen); | ||
| 3266 | newpage = alloc_page(GFP_KERNEL); | ||
| 3267 | |||
| 3268 | if (newpage == NULL) | ||
| 3269 | goto unwind; | ||
| 3270 | memcpy(page_address(newpage), buf, len); | ||
| 3271 | buf += len; | ||
| 3272 | buflen -= len; | ||
| 3273 | *pages++ = newpage; | ||
| 3274 | rc++; | ||
| 3275 | } while (buflen != 0); | ||
| 3276 | |||
| 3277 | return rc; | ||
| 3278 | |||
| 3279 | unwind: | ||
| 3280 | for(; rc > 0; rc--) | ||
| 3281 | __free_page(spages[rc-1]); | ||
| 3282 | return -ENOMEM; | ||
| 3283 | } | ||
| 3284 | |||
| 3255 | struct nfs4_cached_acl { | 3285 | struct nfs4_cached_acl { |
| 3256 | int cached; | 3286 | int cached; |
| 3257 | size_t len; | 3287 | size_t len; |
| @@ -3420,13 +3450,23 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl | |||
| 3420 | .rpc_argp = &arg, | 3450 | .rpc_argp = &arg, |
| 3421 | .rpc_resp = &res, | 3451 | .rpc_resp = &res, |
| 3422 | }; | 3452 | }; |
| 3423 | int ret; | 3453 | int ret, i; |
| 3424 | 3454 | ||
| 3425 | if (!nfs4_server_supports_acls(server)) | 3455 | if (!nfs4_server_supports_acls(server)) |
| 3426 | return -EOPNOTSUPP; | 3456 | return -EOPNOTSUPP; |
| 3457 | i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase); | ||
| 3458 | if (i < 0) | ||
| 3459 | return i; | ||
| 3427 | nfs_inode_return_delegation(inode); | 3460 | nfs_inode_return_delegation(inode); |
| 3428 | buf_to_pages(buf, buflen, arg.acl_pages, &arg.acl_pgbase); | ||
| 3429 | ret = nfs4_call_sync(server, &msg, &arg, &res, 1); | 3461 | ret = nfs4_call_sync(server, &msg, &arg, &res, 1); |
| 3462 | |||
| 3463 | /* | ||
| 3464 | * Free each page after tx, so the only ref left is | ||
| 3465 | * held by the network stack | ||
| 3466 | */ | ||
| 3467 | for (; i > 0; i--) | ||
| 3468 | put_page(pages[i-1]); | ||
| 3469 | |||
| 3430 | /* | 3470 | /* |
| 3431 | * Acl update can result in inode attribute update. | 3471 | * Acl update can result in inode attribute update. |
| 3432 | * so mark the attribute cache invalid. | 3472 | * so mark the attribute cache invalid. |
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c index 98034271cd02..161791d26458 100644 --- a/fs/nilfs2/namei.c +++ b/fs/nilfs2/namei.c | |||
| @@ -397,7 +397,6 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
| 397 | new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page); | 397 | new_de = nilfs_find_entry(new_dir, &new_dentry->d_name, &new_page); |
| 398 | if (!new_de) | 398 | if (!new_de) |
| 399 | goto out_dir; | 399 | goto out_dir; |
| 400 | inc_nlink(old_inode); | ||
| 401 | nilfs_set_link(new_dir, new_de, new_page, old_inode); | 400 | nilfs_set_link(new_dir, new_de, new_page, old_inode); |
| 402 | nilfs_mark_inode_dirty(new_dir); | 401 | nilfs_mark_inode_dirty(new_dir); |
| 403 | new_inode->i_ctime = CURRENT_TIME; | 402 | new_inode->i_ctime = CURRENT_TIME; |
| @@ -411,13 +410,9 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
| 411 | if (new_dir->i_nlink >= NILFS_LINK_MAX) | 410 | if (new_dir->i_nlink >= NILFS_LINK_MAX) |
| 412 | goto out_dir; | 411 | goto out_dir; |
| 413 | } | 412 | } |
| 414 | inc_nlink(old_inode); | ||
| 415 | err = nilfs_add_link(new_dentry, old_inode); | 413 | err = nilfs_add_link(new_dentry, old_inode); |
| 416 | if (err) { | 414 | if (err) |
| 417 | drop_nlink(old_inode); | ||
| 418 | nilfs_mark_inode_dirty(old_inode); | ||
| 419 | goto out_dir; | 415 | goto out_dir; |
| 420 | } | ||
| 421 | if (dir_de) { | 416 | if (dir_de) { |
| 422 | inc_nlink(new_dir); | 417 | inc_nlink(new_dir); |
| 423 | nilfs_mark_inode_dirty(new_dir); | 418 | nilfs_mark_inode_dirty(new_dir); |
| @@ -431,7 +426,6 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
| 431 | old_inode->i_ctime = CURRENT_TIME; | 426 | old_inode->i_ctime = CURRENT_TIME; |
| 432 | 427 | ||
| 433 | nilfs_delete_entry(old_de, old_page); | 428 | nilfs_delete_entry(old_de, old_page); |
| 434 | drop_nlink(old_inode); | ||
| 435 | 429 | ||
| 436 | if (dir_de) { | 430 | if (dir_de) { |
| 437 | nilfs_set_link(old_inode, dir_de, dir_page, new_dir); | 431 | nilfs_set_link(old_inode, dir_de, dir_page, new_dir); |
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 55ebae5c7f39..2de9f636792a 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c | |||
| @@ -430,7 +430,8 @@ static void nilfs_segctor_begin_finfo(struct nilfs_sc_info *sci, | |||
| 430 | nilfs_segctor_map_segsum_entry( | 430 | nilfs_segctor_map_segsum_entry( |
| 431 | sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo)); | 431 | sci, &sci->sc_binfo_ptr, sizeof(struct nilfs_finfo)); |
| 432 | 432 | ||
| 433 | if (inode->i_sb && !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags)) | 433 | if (NILFS_I(inode)->i_root && |
| 434 | !test_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags)) | ||
| 434 | set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags); | 435 | set_bit(NILFS_SC_HAVE_DELTA, &sci->sc_flags); |
| 435 | /* skip finfo */ | 436 | /* skip finfo */ |
| 436 | } | 437 | } |
diff --git a/fs/proc/proc_devtree.c b/fs/proc/proc_devtree.c index d9396a4fc7ff..927cbd115e53 100644 --- a/fs/proc/proc_devtree.c +++ b/fs/proc/proc_devtree.c | |||
| @@ -233,7 +233,7 @@ void __init proc_device_tree_init(void) | |||
| 233 | return; | 233 | return; |
| 234 | root = of_find_node_by_path("/"); | 234 | root = of_find_node_by_path("/"); |
| 235 | if (root == NULL) { | 235 | if (root == NULL) { |
| 236 | printk(KERN_ERR "/proc/device-tree: can't find root\n"); | 236 | pr_debug("/proc/device-tree: can't find root\n"); |
| 237 | return; | 237 | return; |
| 238 | } | 238 | } |
| 239 | proc_device_tree_add_node(root, proc_device_tree); | 239 | proc_device_tree_add_node(root, proc_device_tree); |
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c index ba5f51ec3458..68fdf45cc6c9 100644 --- a/fs/reiserfs/namei.c +++ b/fs/reiserfs/namei.c | |||
| @@ -771,7 +771,7 @@ static int reiserfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
| 771 | EMPTY_DIR_SIZE_V1 : EMPTY_DIR_SIZE, | 771 | EMPTY_DIR_SIZE_V1 : EMPTY_DIR_SIZE, |
| 772 | dentry, inode, &security); | 772 | dentry, inode, &security); |
| 773 | if (retval) { | 773 | if (retval) { |
| 774 | dir->i_nlink--; | 774 | DEC_DIR_INODE_NLINK(dir) |
| 775 | goto out_failed; | 775 | goto out_failed; |
| 776 | } | 776 | } |
| 777 | 777 | ||
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c index b427b1208c26..e474fbcf8bde 100644 --- a/fs/sysv/namei.c +++ b/fs/sysv/namei.c | |||
| @@ -245,7 +245,6 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry, | |||
| 245 | new_de = sysv_find_entry(new_dentry, &new_page); | 245 | new_de = sysv_find_entry(new_dentry, &new_page); |
| 246 | if (!new_de) | 246 | if (!new_de) |
| 247 | goto out_dir; | 247 | goto out_dir; |
| 248 | inode_inc_link_count(old_inode); | ||
| 249 | sysv_set_link(new_de, new_page, old_inode); | 248 | sysv_set_link(new_de, new_page, old_inode); |
| 250 | new_inode->i_ctime = CURRENT_TIME_SEC; | 249 | new_inode->i_ctime = CURRENT_TIME_SEC; |
| 251 | if (dir_de) | 250 | if (dir_de) |
| @@ -257,18 +256,15 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry, | |||
| 257 | if (new_dir->i_nlink >= SYSV_SB(new_dir->i_sb)->s_link_max) | 256 | if (new_dir->i_nlink >= SYSV_SB(new_dir->i_sb)->s_link_max) |
| 258 | goto out_dir; | 257 | goto out_dir; |
| 259 | } | 258 | } |
| 260 | inode_inc_link_count(old_inode); | ||
| 261 | err = sysv_add_link(new_dentry, old_inode); | 259 | err = sysv_add_link(new_dentry, old_inode); |
| 262 | if (err) { | 260 | if (err) |
| 263 | inode_dec_link_count(old_inode); | ||
| 264 | goto out_dir; | 261 | goto out_dir; |
| 265 | } | ||
| 266 | if (dir_de) | 262 | if (dir_de) |
| 267 | inode_inc_link_count(new_dir); | 263 | inode_inc_link_count(new_dir); |
| 268 | } | 264 | } |
| 269 | 265 | ||
| 270 | sysv_delete_entry(old_de, old_page); | 266 | sysv_delete_entry(old_de, old_page); |
| 271 | inode_dec_link_count(old_inode); | 267 | mark_inode_dirty(old_inode); |
| 272 | 268 | ||
| 273 | if (dir_de) { | 269 | if (dir_de) { |
| 274 | sysv_set_link(dir_de, dir_page, new_dir); | 270 | sysv_set_link(dir_de, dir_page, new_dir); |
diff --git a/fs/udf/namei.c b/fs/udf/namei.c index 2be0f9eb86d2..b7c338d5e9df 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c | |||
| @@ -32,6 +32,8 @@ | |||
| 32 | #include <linux/crc-itu-t.h> | 32 | #include <linux/crc-itu-t.h> |
| 33 | #include <linux/exportfs.h> | 33 | #include <linux/exportfs.h> |
| 34 | 34 | ||
| 35 | enum { UDF_MAX_LINKS = 0xffff }; | ||
| 36 | |||
| 35 | static inline int udf_match(int len1, const unsigned char *name1, int len2, | 37 | static inline int udf_match(int len1, const unsigned char *name1, int len2, |
| 36 | const unsigned char *name2) | 38 | const unsigned char *name2) |
| 37 | { | 39 | { |
| @@ -650,7 +652,7 @@ static int udf_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
| 650 | struct udf_inode_info *iinfo; | 652 | struct udf_inode_info *iinfo; |
| 651 | 653 | ||
| 652 | err = -EMLINK; | 654 | err = -EMLINK; |
| 653 | if (dir->i_nlink >= (256 << sizeof(dir->i_nlink)) - 1) | 655 | if (dir->i_nlink >= UDF_MAX_LINKS) |
| 654 | goto out; | 656 | goto out; |
| 655 | 657 | ||
| 656 | err = -EIO; | 658 | err = -EIO; |
| @@ -1034,9 +1036,8 @@ static int udf_link(struct dentry *old_dentry, struct inode *dir, | |||
| 1034 | struct fileIdentDesc cfi, *fi; | 1036 | struct fileIdentDesc cfi, *fi; |
| 1035 | int err; | 1037 | int err; |
| 1036 | 1038 | ||
| 1037 | if (inode->i_nlink >= (256 << sizeof(inode->i_nlink)) - 1) { | 1039 | if (inode->i_nlink >= UDF_MAX_LINKS) |
| 1038 | return -EMLINK; | 1040 | return -EMLINK; |
| 1039 | } | ||
| 1040 | 1041 | ||
| 1041 | fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); | 1042 | fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); |
| 1042 | if (!fi) { | 1043 | if (!fi) { |
| @@ -1131,9 +1132,7 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
| 1131 | goto end_rename; | 1132 | goto end_rename; |
| 1132 | 1133 | ||
| 1133 | retval = -EMLINK; | 1134 | retval = -EMLINK; |
| 1134 | if (!new_inode && | 1135 | if (!new_inode && new_dir->i_nlink >= UDF_MAX_LINKS) |
| 1135 | new_dir->i_nlink >= | ||
| 1136 | (256 << sizeof(new_dir->i_nlink)) - 1) | ||
| 1137 | goto end_rename; | 1136 | goto end_rename; |
| 1138 | } | 1137 | } |
| 1139 | if (!nfi) { | 1138 | if (!nfi) { |
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c index 12f39b9e4437..d6f681535eb8 100644 --- a/fs/ufs/namei.c +++ b/fs/ufs/namei.c | |||
| @@ -306,7 +306,6 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
| 306 | new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page); | 306 | new_de = ufs_find_entry(new_dir, &new_dentry->d_name, &new_page); |
| 307 | if (!new_de) | 307 | if (!new_de) |
| 308 | goto out_dir; | 308 | goto out_dir; |
| 309 | inode_inc_link_count(old_inode); | ||
| 310 | ufs_set_link(new_dir, new_de, new_page, old_inode); | 309 | ufs_set_link(new_dir, new_de, new_page, old_inode); |
| 311 | new_inode->i_ctime = CURRENT_TIME_SEC; | 310 | new_inode->i_ctime = CURRENT_TIME_SEC; |
| 312 | if (dir_de) | 311 | if (dir_de) |
| @@ -318,12 +317,9 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
| 318 | if (new_dir->i_nlink >= UFS_LINK_MAX) | 317 | if (new_dir->i_nlink >= UFS_LINK_MAX) |
| 319 | goto out_dir; | 318 | goto out_dir; |
| 320 | } | 319 | } |
| 321 | inode_inc_link_count(old_inode); | ||
| 322 | err = ufs_add_link(new_dentry, old_inode); | 320 | err = ufs_add_link(new_dentry, old_inode); |
| 323 | if (err) { | 321 | if (err) |
| 324 | inode_dec_link_count(old_inode); | ||
| 325 | goto out_dir; | 322 | goto out_dir; |
| 326 | } | ||
| 327 | if (dir_de) | 323 | if (dir_de) |
| 328 | inode_inc_link_count(new_dir); | 324 | inode_inc_link_count(new_dir); |
| 329 | } | 325 | } |
| @@ -331,12 +327,11 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
| 331 | /* | 327 | /* |
| 332 | * Like most other Unix systems, set the ctime for inodes on a | 328 | * Like most other Unix systems, set the ctime for inodes on a |
| 333 | * rename. | 329 | * rename. |
| 334 | * inode_dec_link_count() will mark the inode dirty. | ||
| 335 | */ | 330 | */ |
| 336 | old_inode->i_ctime = CURRENT_TIME_SEC; | 331 | old_inode->i_ctime = CURRENT_TIME_SEC; |
| 337 | 332 | ||
| 338 | ufs_delete_entry(old_dir, old_de, old_page); | 333 | ufs_delete_entry(old_dir, old_de, old_page); |
| 339 | inode_dec_link_count(old_inode); | 334 | mark_inode_dirty(old_inode); |
| 340 | 335 | ||
| 341 | if (dir_de) { | 336 | if (dir_de) { |
| 342 | ufs_set_link(old_inode, dir_de, dir_page, new_dir); | 337 | ufs_set_link(old_inode, dir_de, dir_page, new_dir); |
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c index f5e2a19e0f8e..0ca0e3c024d7 100644 --- a/fs/xfs/linux-2.6/xfs_ioctl.c +++ b/fs/xfs/linux-2.6/xfs_ioctl.c | |||
| @@ -695,14 +695,19 @@ xfs_ioc_fsgeometry_v1( | |||
| 695 | xfs_mount_t *mp, | 695 | xfs_mount_t *mp, |
| 696 | void __user *arg) | 696 | void __user *arg) |
| 697 | { | 697 | { |
| 698 | xfs_fsop_geom_v1_t fsgeo; | 698 | xfs_fsop_geom_t fsgeo; |
| 699 | int error; | 699 | int error; |
| 700 | 700 | ||
| 701 | error = xfs_fs_geometry(mp, (xfs_fsop_geom_t *)&fsgeo, 3); | 701 | error = xfs_fs_geometry(mp, &fsgeo, 3); |
| 702 | if (error) | 702 | if (error) |
| 703 | return -error; | 703 | return -error; |
| 704 | 704 | ||
| 705 | if (copy_to_user(arg, &fsgeo, sizeof(fsgeo))) | 705 | /* |
| 706 | * Caller should have passed an argument of type | ||
| 707 | * xfs_fsop_geom_v1_t. This is a proper subset of the | ||
| 708 | * xfs_fsop_geom_t that xfs_fs_geometry() fills in. | ||
| 709 | */ | ||
| 710 | if (copy_to_user(arg, &fsgeo, sizeof(xfs_fsop_geom_v1_t))) | ||
| 706 | return -XFS_ERROR(EFAULT); | 711 | return -XFS_ERROR(EFAULT); |
| 707 | return 0; | 712 | return 0; |
| 708 | } | 713 | } |
diff --git a/include/keys/rxrpc-type.h b/include/keys/rxrpc-type.h index 5cb86c307f5d..fc4875433817 100644 --- a/include/keys/rxrpc-type.h +++ b/include/keys/rxrpc-type.h | |||
| @@ -99,7 +99,6 @@ struct rxrpc_key_token { | |||
| 99 | * structure of raw payloads passed to add_key() or instantiate key | 99 | * structure of raw payloads passed to add_key() or instantiate key |
| 100 | */ | 100 | */ |
| 101 | struct rxrpc_key_data_v1 { | 101 | struct rxrpc_key_data_v1 { |
| 102 | u32 kif_version; /* 1 */ | ||
| 103 | u16 security_index; | 102 | u16 security_index; |
| 104 | u16 ticket_length; | 103 | u16 ticket_length; |
| 105 | u32 expiry; /* time_t */ | 104 | u32 expiry; /* time_t */ |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 4d18ff34670a..d5063e1b5555 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -699,7 +699,7 @@ extern void blk_start_queue(struct request_queue *q); | |||
| 699 | extern void blk_stop_queue(struct request_queue *q); | 699 | extern void blk_stop_queue(struct request_queue *q); |
| 700 | extern void blk_sync_queue(struct request_queue *q); | 700 | extern void blk_sync_queue(struct request_queue *q); |
| 701 | extern void __blk_stop_queue(struct request_queue *q); | 701 | extern void __blk_stop_queue(struct request_queue *q); |
| 702 | extern void __blk_run_queue(struct request_queue *); | 702 | extern void __blk_run_queue(struct request_queue *q, bool force_kblockd); |
| 703 | extern void blk_run_queue(struct request_queue *); | 703 | extern void blk_run_queue(struct request_queue *); |
| 704 | extern int blk_rq_map_user(struct request_queue *, struct request *, | 704 | extern int blk_rq_map_user(struct request_queue *, struct request *, |
| 705 | struct rq_map_data *, void __user *, unsigned long, | 705 | struct rq_map_data *, void __user *, unsigned long, |
| @@ -1088,7 +1088,6 @@ static inline void put_dev_sector(Sector p) | |||
| 1088 | 1088 | ||
| 1089 | struct work_struct; | 1089 | struct work_struct; |
| 1090 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); | 1090 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); |
| 1091 | int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay); | ||
| 1092 | 1091 | ||
| 1093 | #ifdef CONFIG_BLK_CGROUP | 1092 | #ifdef CONFIG_BLK_CGROUP |
| 1094 | /* | 1093 | /* |
| @@ -1136,7 +1135,6 @@ static inline uint64_t rq_io_start_time_ns(struct request *req) | |||
| 1136 | extern int blk_throtl_init(struct request_queue *q); | 1135 | extern int blk_throtl_init(struct request_queue *q); |
| 1137 | extern void blk_throtl_exit(struct request_queue *q); | 1136 | extern void blk_throtl_exit(struct request_queue *q); |
| 1138 | extern int blk_throtl_bio(struct request_queue *q, struct bio **bio); | 1137 | extern int blk_throtl_bio(struct request_queue *q, struct bio **bio); |
| 1139 | extern void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay); | ||
| 1140 | extern void throtl_shutdown_timer_wq(struct request_queue *q); | 1138 | extern void throtl_shutdown_timer_wq(struct request_queue *q); |
| 1141 | #else /* CONFIG_BLK_DEV_THROTTLING */ | 1139 | #else /* CONFIG_BLK_DEV_THROTTLING */ |
| 1142 | static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) | 1140 | static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) |
| @@ -1146,7 +1144,6 @@ static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) | |||
| 1146 | 1144 | ||
| 1147 | static inline int blk_throtl_init(struct request_queue *q) { return 0; } | 1145 | static inline int blk_throtl_init(struct request_queue *q) { return 0; } |
| 1148 | static inline int blk_throtl_exit(struct request_queue *q) { return 0; } | 1146 | static inline int blk_throtl_exit(struct request_queue *q) { return 0; } |
| 1149 | static inline void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) {} | ||
| 1150 | static inline void throtl_shutdown_timer_wq(struct request_queue *q) {} | 1147 | static inline void throtl_shutdown_timer_wq(struct request_queue *q) {} |
| 1151 | #endif /* CONFIG_BLK_DEV_THROTTLING */ | 1148 | #endif /* CONFIG_BLK_DEV_THROTTLING */ |
| 1152 | 1149 | ||
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 3395cf7130f5..b22fb0d3db0f 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h | |||
| @@ -245,7 +245,6 @@ static inline int blk_cmd_buf_len(struct request *rq) | |||
| 245 | 245 | ||
| 246 | extern void blk_dump_cmd(char *buf, struct request *rq); | 246 | extern void blk_dump_cmd(char *buf, struct request *rq); |
| 247 | extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes); | 247 | extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes); |
| 248 | extern void blk_fill_rwbs_rq(char *rwbs, struct request *rq); | ||
| 249 | 248 | ||
| 250 | #endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */ | 249 | #endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */ |
| 251 | 250 | ||
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h index c3011beac30d..31d91a64838b 100644 --- a/include/linux/ceph/messenger.h +++ b/include/linux/ceph/messenger.h | |||
| @@ -123,6 +123,7 @@ struct ceph_msg_pos { | |||
| 123 | #define SOCK_CLOSED 11 /* socket state changed to closed */ | 123 | #define SOCK_CLOSED 11 /* socket state changed to closed */ |
| 124 | #define OPENING 13 /* open connection w/ (possibly new) peer */ | 124 | #define OPENING 13 /* open connection w/ (possibly new) peer */ |
| 125 | #define DEAD 14 /* dead, about to kfree */ | 125 | #define DEAD 14 /* dead, about to kfree */ |
| 126 | #define BACKOFF 15 | ||
| 126 | 127 | ||
| 127 | /* | 128 | /* |
| 128 | * A single connection with another host. | 129 | * A single connection with another host. |
| @@ -160,7 +161,6 @@ struct ceph_connection { | |||
| 160 | struct list_head out_queue; | 161 | struct list_head out_queue; |
| 161 | struct list_head out_sent; /* sending or sent but unacked */ | 162 | struct list_head out_sent; /* sending or sent but unacked */ |
| 162 | u64 out_seq; /* last message queued for send */ | 163 | u64 out_seq; /* last message queued for send */ |
| 163 | bool out_keepalive_pending; | ||
| 164 | 164 | ||
| 165 | u64 in_seq, in_seq_acked; /* last message received, acked */ | 165 | u64 in_seq, in_seq_acked; /* last message received, acked */ |
| 166 | 166 | ||
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 0b84c61607e8..dca31761b311 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
| @@ -332,16 +332,19 @@ alloc_pages(gfp_t gfp_mask, unsigned int order) | |||
| 332 | return alloc_pages_current(gfp_mask, order); | 332 | return alloc_pages_current(gfp_mask, order); |
| 333 | } | 333 | } |
| 334 | extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, | 334 | extern struct page *alloc_pages_vma(gfp_t gfp_mask, int order, |
| 335 | struct vm_area_struct *vma, unsigned long addr); | 335 | struct vm_area_struct *vma, unsigned long addr, |
| 336 | int node); | ||
| 336 | #else | 337 | #else |
| 337 | #define alloc_pages(gfp_mask, order) \ | 338 | #define alloc_pages(gfp_mask, order) \ |
| 338 | alloc_pages_node(numa_node_id(), gfp_mask, order) | 339 | alloc_pages_node(numa_node_id(), gfp_mask, order) |
| 339 | #define alloc_pages_vma(gfp_mask, order, vma, addr) \ | 340 | #define alloc_pages_vma(gfp_mask, order, vma, addr, node) \ |
| 340 | alloc_pages(gfp_mask, order) | 341 | alloc_pages(gfp_mask, order) |
| 341 | #endif | 342 | #endif |
| 342 | #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) | 343 | #define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0) |
| 343 | #define alloc_page_vma(gfp_mask, vma, addr) \ | 344 | #define alloc_page_vma(gfp_mask, vma, addr) \ |
| 344 | alloc_pages_vma(gfp_mask, 0, vma, addr) | 345 | alloc_pages_vma(gfp_mask, 0, vma, addr, numa_node_id()) |
| 346 | #define alloc_page_vma_node(gfp_mask, vma, addr, node) \ | ||
| 347 | alloc_pages_vma(gfp_mask, 0, vma, addr, node) | ||
| 345 | 348 | ||
| 346 | extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); | 349 | extern unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order); |
| 347 | extern unsigned long get_zeroed_page(gfp_t gfp_mask); | 350 | extern unsigned long get_zeroed_page(gfp_t gfp_mask); |
diff --git a/include/linux/mfd/wm8994/core.h b/include/linux/mfd/wm8994/core.h index 3fd36845ca45..ef4f0b6083a3 100644 --- a/include/linux/mfd/wm8994/core.h +++ b/include/linux/mfd/wm8994/core.h | |||
| @@ -71,6 +71,7 @@ struct wm8994 { | |||
| 71 | u16 irq_masks_cache[WM8994_NUM_IRQ_REGS]; | 71 | u16 irq_masks_cache[WM8994_NUM_IRQ_REGS]; |
| 72 | 72 | ||
| 73 | /* Used over suspend/resume */ | 73 | /* Used over suspend/resume */ |
| 74 | bool suspended; | ||
| 74 | u16 ldo_regs[WM8994_NUM_LDO_REGS]; | 75 | u16 ldo_regs[WM8994_NUM_LDO_REGS]; |
| 75 | u16 gpio_regs[WM8994_NUM_GPIO_REGS]; | 76 | u16 gpio_regs[WM8994_NUM_GPIO_REGS]; |
| 76 | 77 | ||
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 092a04f874a8..a1147e5dd245 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h | |||
| @@ -102,11 +102,8 @@ | |||
| 102 | 102 | ||
| 103 | extern long arch_ptrace(struct task_struct *child, long request, | 103 | extern long arch_ptrace(struct task_struct *child, long request, |
| 104 | unsigned long addr, unsigned long data); | 104 | unsigned long addr, unsigned long data); |
| 105 | extern int ptrace_traceme(void); | ||
| 106 | extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len); | 105 | extern int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len); |
| 107 | extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); | 106 | extern int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len); |
| 108 | extern int ptrace_attach(struct task_struct *tsk); | ||
| 109 | extern int ptrace_detach(struct task_struct *, unsigned int); | ||
| 110 | extern void ptrace_disable(struct task_struct *); | 107 | extern void ptrace_disable(struct task_struct *); |
| 111 | extern int ptrace_check_attach(struct task_struct *task, int kill); | 108 | extern int ptrace_check_attach(struct task_struct *task, int kill); |
| 112 | extern int ptrace_request(struct task_struct *child, long request, | 109 | extern int ptrace_request(struct task_struct *child, long request, |
diff --git a/include/trace/events/block.h b/include/trace/events/block.h index aba421d68f6f..78f18adb49c8 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h | |||
| @@ -31,7 +31,7 @@ DECLARE_EVENT_CLASS(block_rq_with_error, | |||
| 31 | 0 : blk_rq_sectors(rq); | 31 | 0 : blk_rq_sectors(rq); |
| 32 | __entry->errors = rq->errors; | 32 | __entry->errors = rq->errors; |
| 33 | 33 | ||
| 34 | blk_fill_rwbs_rq(__entry->rwbs, rq); | 34 | blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); |
| 35 | blk_dump_cmd(__get_str(cmd), rq); | 35 | blk_dump_cmd(__get_str(cmd), rq); |
| 36 | ), | 36 | ), |
| 37 | 37 | ||
| @@ -118,7 +118,7 @@ DECLARE_EVENT_CLASS(block_rq, | |||
| 118 | __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? | 118 | __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? |
| 119 | blk_rq_bytes(rq) : 0; | 119 | blk_rq_bytes(rq) : 0; |
| 120 | 120 | ||
| 121 | blk_fill_rwbs_rq(__entry->rwbs, rq); | 121 | blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); |
| 122 | blk_dump_cmd(__get_str(cmd), rq); | 122 | blk_dump_cmd(__get_str(cmd), rq); |
| 123 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); | 123 | memcpy(__entry->comm, current->comm, TASK_COMM_LEN); |
| 124 | ), | 124 | ), |
| @@ -563,7 +563,7 @@ TRACE_EVENT(block_rq_remap, | |||
| 563 | __entry->nr_sector = blk_rq_sectors(rq); | 563 | __entry->nr_sector = blk_rq_sectors(rq); |
| 564 | __entry->old_dev = dev; | 564 | __entry->old_dev = dev; |
| 565 | __entry->old_sector = from; | 565 | __entry->old_sector = from; |
| 566 | blk_fill_rwbs_rq(__entry->rwbs, rq); | 566 | blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); |
| 567 | ), | 567 | ), |
| 568 | 568 | ||
| 569 | TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", | 569 | TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 4349935c2ad8..e92e98189032 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
| @@ -1575,8 +1575,10 @@ static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft, | |||
| 1575 | return -ENODEV; | 1575 | return -ENODEV; |
| 1576 | 1576 | ||
| 1577 | trialcs = alloc_trial_cpuset(cs); | 1577 | trialcs = alloc_trial_cpuset(cs); |
| 1578 | if (!trialcs) | 1578 | if (!trialcs) { |
| 1579 | return -ENOMEM; | 1579 | retval = -ENOMEM; |
| 1580 | goto out; | ||
| 1581 | } | ||
| 1580 | 1582 | ||
| 1581 | switch (cft->private) { | 1583 | switch (cft->private) { |
| 1582 | case FILE_CPULIST: | 1584 | case FILE_CPULIST: |
| @@ -1591,6 +1593,7 @@ static int cpuset_write_resmask(struct cgroup *cgrp, struct cftype *cft, | |||
| 1591 | } | 1593 | } |
| 1592 | 1594 | ||
| 1593 | free_trial_cpuset(trialcs); | 1595 | free_trial_cpuset(trialcs); |
| 1596 | out: | ||
| 1594 | cgroup_unlock(); | 1597 | cgroup_unlock(); |
| 1595 | return retval; | 1598 | return retval; |
| 1596 | } | 1599 | } |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 1708b1e2972d..e2302e40b360 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
| @@ -163,7 +163,7 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode) | |||
| 163 | return !err; | 163 | return !err; |
| 164 | } | 164 | } |
| 165 | 165 | ||
| 166 | int ptrace_attach(struct task_struct *task) | 166 | static int ptrace_attach(struct task_struct *task) |
| 167 | { | 167 | { |
| 168 | int retval; | 168 | int retval; |
| 169 | 169 | ||
| @@ -219,7 +219,7 @@ out: | |||
| 219 | * Performs checks and sets PT_PTRACED. | 219 | * Performs checks and sets PT_PTRACED. |
| 220 | * Should be used by all ptrace implementations for PTRACE_TRACEME. | 220 | * Should be used by all ptrace implementations for PTRACE_TRACEME. |
| 221 | */ | 221 | */ |
| 222 | int ptrace_traceme(void) | 222 | static int ptrace_traceme(void) |
| 223 | { | 223 | { |
| 224 | int ret = -EPERM; | 224 | int ret = -EPERM; |
| 225 | 225 | ||
| @@ -293,7 +293,7 @@ static bool __ptrace_detach(struct task_struct *tracer, struct task_struct *p) | |||
| 293 | return false; | 293 | return false; |
| 294 | } | 294 | } |
| 295 | 295 | ||
| 296 | int ptrace_detach(struct task_struct *child, unsigned int data) | 296 | static int ptrace_detach(struct task_struct *child, unsigned int data) |
| 297 | { | 297 | { |
| 298 | bool dead = false; | 298 | bool dead = false; |
| 299 | 299 | ||
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index d95721f33702..cbafed7d4f38 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
| @@ -1827,21 +1827,5 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes) | |||
| 1827 | rwbs[i] = '\0'; | 1827 | rwbs[i] = '\0'; |
| 1828 | } | 1828 | } |
| 1829 | 1829 | ||
| 1830 | void blk_fill_rwbs_rq(char *rwbs, struct request *rq) | ||
| 1831 | { | ||
| 1832 | int rw = rq->cmd_flags & 0x03; | ||
| 1833 | int bytes; | ||
| 1834 | |||
| 1835 | if (rq->cmd_flags & REQ_DISCARD) | ||
| 1836 | rw |= REQ_DISCARD; | ||
| 1837 | |||
| 1838 | if (rq->cmd_flags & REQ_SECURE) | ||
| 1839 | rw |= REQ_SECURE; | ||
| 1840 | |||
| 1841 | bytes = blk_rq_bytes(rq); | ||
| 1842 | |||
| 1843 | blk_fill_rwbs(rwbs, rw, bytes); | ||
| 1844 | } | ||
| 1845 | |||
| 1846 | #endif /* CONFIG_EVENT_TRACING */ | 1830 | #endif /* CONFIG_EVENT_TRACING */ |
| 1847 | 1831 | ||
diff --git a/lib/nlattr.c b/lib/nlattr.c index 5021cbc34411..ac09f2226dc7 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c | |||
| @@ -148,7 +148,7 @@ nla_policy_len(const struct nla_policy *p, int n) | |||
| 148 | { | 148 | { |
| 149 | int i, len = 0; | 149 | int i, len = 0; |
| 150 | 150 | ||
| 151 | for (i = 0; i < n; i++) { | 151 | for (i = 0; i < n; i++, p++) { |
| 152 | if (p->len) | 152 | if (p->len) |
| 153 | len += nla_total_size(p->len); | 153 | len += nla_total_size(p->len); |
| 154 | else if (nla_attr_minlen[p->type]) | 154 | else if (nla_attr_minlen[p->type]) |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 3e29781ee762..dbe99a5f2073 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
| @@ -650,10 +650,10 @@ static inline gfp_t alloc_hugepage_gfpmask(int defrag) | |||
| 650 | 650 | ||
| 651 | static inline struct page *alloc_hugepage_vma(int defrag, | 651 | static inline struct page *alloc_hugepage_vma(int defrag, |
| 652 | struct vm_area_struct *vma, | 652 | struct vm_area_struct *vma, |
| 653 | unsigned long haddr) | 653 | unsigned long haddr, int nd) |
| 654 | { | 654 | { |
| 655 | return alloc_pages_vma(alloc_hugepage_gfpmask(defrag), | 655 | return alloc_pages_vma(alloc_hugepage_gfpmask(defrag), |
| 656 | HPAGE_PMD_ORDER, vma, haddr); | 656 | HPAGE_PMD_ORDER, vma, haddr, nd); |
| 657 | } | 657 | } |
| 658 | 658 | ||
| 659 | #ifndef CONFIG_NUMA | 659 | #ifndef CONFIG_NUMA |
| @@ -678,7 +678,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 678 | if (unlikely(khugepaged_enter(vma))) | 678 | if (unlikely(khugepaged_enter(vma))) |
| 679 | return VM_FAULT_OOM; | 679 | return VM_FAULT_OOM; |
| 680 | page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), | 680 | page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), |
| 681 | vma, haddr); | 681 | vma, haddr, numa_node_id()); |
| 682 | if (unlikely(!page)) | 682 | if (unlikely(!page)) |
| 683 | goto out; | 683 | goto out; |
| 684 | if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { | 684 | if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { |
| @@ -799,8 +799,8 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, | |||
| 799 | } | 799 | } |
| 800 | 800 | ||
| 801 | for (i = 0; i < HPAGE_PMD_NR; i++) { | 801 | for (i = 0; i < HPAGE_PMD_NR; i++) { |
| 802 | pages[i] = alloc_page_vma(GFP_HIGHUSER_MOVABLE, | 802 | pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, |
| 803 | vma, address); | 803 | vma, address, page_to_nid(page)); |
| 804 | if (unlikely(!pages[i] || | 804 | if (unlikely(!pages[i] || |
| 805 | mem_cgroup_newpage_charge(pages[i], mm, | 805 | mem_cgroup_newpage_charge(pages[i], mm, |
| 806 | GFP_KERNEL))) { | 806 | GFP_KERNEL))) { |
| @@ -902,7 +902,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
| 902 | if (transparent_hugepage_enabled(vma) && | 902 | if (transparent_hugepage_enabled(vma) && |
| 903 | !transparent_hugepage_debug_cow()) | 903 | !transparent_hugepage_debug_cow()) |
| 904 | new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), | 904 | new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), |
| 905 | vma, haddr); | 905 | vma, haddr, numa_node_id()); |
| 906 | else | 906 | else |
| 907 | new_page = NULL; | 907 | new_page = NULL; |
| 908 | 908 | ||
| @@ -1745,7 +1745,8 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page, | |||
| 1745 | static void collapse_huge_page(struct mm_struct *mm, | 1745 | static void collapse_huge_page(struct mm_struct *mm, |
| 1746 | unsigned long address, | 1746 | unsigned long address, |
| 1747 | struct page **hpage, | 1747 | struct page **hpage, |
| 1748 | struct vm_area_struct *vma) | 1748 | struct vm_area_struct *vma, |
| 1749 | int node) | ||
| 1749 | { | 1750 | { |
| 1750 | pgd_t *pgd; | 1751 | pgd_t *pgd; |
| 1751 | pud_t *pud; | 1752 | pud_t *pud; |
| @@ -1773,7 +1774,8 @@ static void collapse_huge_page(struct mm_struct *mm, | |||
| 1773 | * mmap_sem in read mode is good idea also to allow greater | 1774 | * mmap_sem in read mode is good idea also to allow greater |
| 1774 | * scalability. | 1775 | * scalability. |
| 1775 | */ | 1776 | */ |
| 1776 | new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address); | 1777 | new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address, |
| 1778 | node); | ||
| 1777 | if (unlikely(!new_page)) { | 1779 | if (unlikely(!new_page)) { |
| 1778 | up_read(&mm->mmap_sem); | 1780 | up_read(&mm->mmap_sem); |
| 1779 | *hpage = ERR_PTR(-ENOMEM); | 1781 | *hpage = ERR_PTR(-ENOMEM); |
| @@ -1919,6 +1921,7 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, | |||
| 1919 | struct page *page; | 1921 | struct page *page; |
| 1920 | unsigned long _address; | 1922 | unsigned long _address; |
| 1921 | spinlock_t *ptl; | 1923 | spinlock_t *ptl; |
| 1924 | int node = -1; | ||
| 1922 | 1925 | ||
| 1923 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); | 1926 | VM_BUG_ON(address & ~HPAGE_PMD_MASK); |
| 1924 | 1927 | ||
| @@ -1949,6 +1952,13 @@ static int khugepaged_scan_pmd(struct mm_struct *mm, | |||
| 1949 | page = vm_normal_page(vma, _address, pteval); | 1952 | page = vm_normal_page(vma, _address, pteval); |
| 1950 | if (unlikely(!page)) | 1953 | if (unlikely(!page)) |
| 1951 | goto out_unmap; | 1954 | goto out_unmap; |
| 1955 | /* | ||
| 1956 | * Chose the node of the first page. This could | ||
| 1957 | * be more sophisticated and look at more pages, | ||
| 1958 | * but isn't for now. | ||
| 1959 | */ | ||
| 1960 | if (node == -1) | ||
| 1961 | node = page_to_nid(page); | ||
| 1952 | VM_BUG_ON(PageCompound(page)); | 1962 | VM_BUG_ON(PageCompound(page)); |
| 1953 | if (!PageLRU(page) || PageLocked(page) || !PageAnon(page)) | 1963 | if (!PageLRU(page) || PageLocked(page) || !PageAnon(page)) |
| 1954 | goto out_unmap; | 1964 | goto out_unmap; |
| @@ -1965,7 +1975,7 @@ out_unmap: | |||
| 1965 | pte_unmap_unlock(pte, ptl); | 1975 | pte_unmap_unlock(pte, ptl); |
| 1966 | if (ret) | 1976 | if (ret) |
| 1967 | /* collapse_huge_page will return with the mmap_sem released */ | 1977 | /* collapse_huge_page will return with the mmap_sem released */ |
| 1968 | collapse_huge_page(mm, address, hpage, vma); | 1978 | collapse_huge_page(mm, address, hpage, vma, node); |
| 1969 | out: | 1979 | out: |
| 1970 | return ret; | 1980 | return ret; |
| 1971 | } | 1981 | } |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 49355a970be2..b53ec99f1428 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
| @@ -1524,10 +1524,9 @@ static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) | |||
| 1524 | } | 1524 | } |
| 1525 | 1525 | ||
| 1526 | /* Return a zonelist indicated by gfp for node representing a mempolicy */ | 1526 | /* Return a zonelist indicated by gfp for node representing a mempolicy */ |
| 1527 | static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy) | 1527 | static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy, |
| 1528 | int nd) | ||
| 1528 | { | 1529 | { |
| 1529 | int nd = numa_node_id(); | ||
| 1530 | |||
| 1531 | switch (policy->mode) { | 1530 | switch (policy->mode) { |
| 1532 | case MPOL_PREFERRED: | 1531 | case MPOL_PREFERRED: |
| 1533 | if (!(policy->flags & MPOL_F_LOCAL)) | 1532 | if (!(policy->flags & MPOL_F_LOCAL)) |
| @@ -1679,7 +1678,7 @@ struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr, | |||
| 1679 | zl = node_zonelist(interleave_nid(*mpol, vma, addr, | 1678 | zl = node_zonelist(interleave_nid(*mpol, vma, addr, |
| 1680 | huge_page_shift(hstate_vma(vma))), gfp_flags); | 1679 | huge_page_shift(hstate_vma(vma))), gfp_flags); |
| 1681 | } else { | 1680 | } else { |
| 1682 | zl = policy_zonelist(gfp_flags, *mpol); | 1681 | zl = policy_zonelist(gfp_flags, *mpol, numa_node_id()); |
| 1683 | if ((*mpol)->mode == MPOL_BIND) | 1682 | if ((*mpol)->mode == MPOL_BIND) |
| 1684 | *nodemask = &(*mpol)->v.nodes; | 1683 | *nodemask = &(*mpol)->v.nodes; |
| 1685 | } | 1684 | } |
| @@ -1820,7 +1819,7 @@ static struct page *alloc_page_interleave(gfp_t gfp, unsigned order, | |||
| 1820 | */ | 1819 | */ |
| 1821 | struct page * | 1820 | struct page * |
| 1822 | alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, | 1821 | alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, |
| 1823 | unsigned long addr) | 1822 | unsigned long addr, int node) |
| 1824 | { | 1823 | { |
| 1825 | struct mempolicy *pol = get_vma_policy(current, vma, addr); | 1824 | struct mempolicy *pol = get_vma_policy(current, vma, addr); |
| 1826 | struct zonelist *zl; | 1825 | struct zonelist *zl; |
| @@ -1836,7 +1835,7 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma, | |||
| 1836 | put_mems_allowed(); | 1835 | put_mems_allowed(); |
| 1837 | return page; | 1836 | return page; |
| 1838 | } | 1837 | } |
| 1839 | zl = policy_zonelist(gfp, pol); | 1838 | zl = policy_zonelist(gfp, pol, node); |
| 1840 | if (unlikely(mpol_needs_cond_ref(pol))) { | 1839 | if (unlikely(mpol_needs_cond_ref(pol))) { |
| 1841 | /* | 1840 | /* |
| 1842 | * slow path: ref counted shared policy | 1841 | * slow path: ref counted shared policy |
| @@ -1892,7 +1891,8 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order) | |||
| 1892 | page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); | 1891 | page = alloc_page_interleave(gfp, order, interleave_nodes(pol)); |
| 1893 | else | 1892 | else |
| 1894 | page = __alloc_pages_nodemask(gfp, order, | 1893 | page = __alloc_pages_nodemask(gfp, order, |
| 1895 | policy_zonelist(gfp, pol), policy_nodemask(gfp, pol)); | 1894 | policy_zonelist(gfp, pol, numa_node_id()), |
| 1895 | policy_nodemask(gfp, pol)); | ||
| 1896 | put_mems_allowed(); | 1896 | put_mems_allowed(); |
| 1897 | return page; | 1897 | return page; |
| 1898 | } | 1898 | } |
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 35b36b86d762..05f357828a2f 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c | |||
| @@ -336,7 +336,6 @@ static void reset_connection(struct ceph_connection *con) | |||
| 336 | ceph_msg_put(con->out_msg); | 336 | ceph_msg_put(con->out_msg); |
| 337 | con->out_msg = NULL; | 337 | con->out_msg = NULL; |
| 338 | } | 338 | } |
| 339 | con->out_keepalive_pending = false; | ||
| 340 | con->in_seq = 0; | 339 | con->in_seq = 0; |
| 341 | con->in_seq_acked = 0; | 340 | con->in_seq_acked = 0; |
| 342 | } | 341 | } |
| @@ -1248,8 +1247,6 @@ static int process_connect(struct ceph_connection *con) | |||
| 1248 | con->auth_retry); | 1247 | con->auth_retry); |
| 1249 | if (con->auth_retry == 2) { | 1248 | if (con->auth_retry == 2) { |
| 1250 | con->error_msg = "connect authorization failure"; | 1249 | con->error_msg = "connect authorization failure"; |
| 1251 | reset_connection(con); | ||
| 1252 | set_bit(CLOSED, &con->state); | ||
| 1253 | return -1; | 1250 | return -1; |
| 1254 | } | 1251 | } |
| 1255 | con->auth_retry = 1; | 1252 | con->auth_retry = 1; |
| @@ -1715,14 +1712,6 @@ more: | |||
| 1715 | 1712 | ||
| 1716 | /* open the socket first? */ | 1713 | /* open the socket first? */ |
| 1717 | if (con->sock == NULL) { | 1714 | if (con->sock == NULL) { |
| 1718 | /* | ||
| 1719 | * if we were STANDBY and are reconnecting _this_ | ||
| 1720 | * connection, bump connect_seq now. Always bump | ||
| 1721 | * global_seq. | ||
| 1722 | */ | ||
| 1723 | if (test_and_clear_bit(STANDBY, &con->state)) | ||
| 1724 | con->connect_seq++; | ||
| 1725 | |||
| 1726 | prepare_write_banner(msgr, con); | 1715 | prepare_write_banner(msgr, con); |
| 1727 | prepare_write_connect(msgr, con, 1); | 1716 | prepare_write_connect(msgr, con, 1); |
| 1728 | prepare_read_banner(con); | 1717 | prepare_read_banner(con); |
| @@ -1951,7 +1940,24 @@ static void con_work(struct work_struct *work) | |||
| 1951 | work.work); | 1940 | work.work); |
| 1952 | 1941 | ||
| 1953 | mutex_lock(&con->mutex); | 1942 | mutex_lock(&con->mutex); |
| 1943 | if (test_and_clear_bit(BACKOFF, &con->state)) { | ||
| 1944 | dout("con_work %p backing off\n", con); | ||
| 1945 | if (queue_delayed_work(ceph_msgr_wq, &con->work, | ||
| 1946 | round_jiffies_relative(con->delay))) { | ||
| 1947 | dout("con_work %p backoff %lu\n", con, con->delay); | ||
| 1948 | mutex_unlock(&con->mutex); | ||
| 1949 | return; | ||
| 1950 | } else { | ||
| 1951 | con->ops->put(con); | ||
| 1952 | dout("con_work %p FAILED to back off %lu\n", con, | ||
| 1953 | con->delay); | ||
| 1954 | } | ||
| 1955 | } | ||
| 1954 | 1956 | ||
| 1957 | if (test_bit(STANDBY, &con->state)) { | ||
| 1958 | dout("con_work %p STANDBY\n", con); | ||
| 1959 | goto done; | ||
| 1960 | } | ||
| 1955 | if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */ | 1961 | if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */ |
| 1956 | dout("con_work CLOSED\n"); | 1962 | dout("con_work CLOSED\n"); |
| 1957 | con_close_socket(con); | 1963 | con_close_socket(con); |
| @@ -2008,10 +2014,12 @@ static void ceph_fault(struct ceph_connection *con) | |||
| 2008 | /* Requeue anything that hasn't been acked */ | 2014 | /* Requeue anything that hasn't been acked */ |
| 2009 | list_splice_init(&con->out_sent, &con->out_queue); | 2015 | list_splice_init(&con->out_sent, &con->out_queue); |
| 2010 | 2016 | ||
| 2011 | /* If there are no messages in the queue, place the connection | 2017 | /* If there are no messages queued or keepalive pending, place |
| 2012 | * in a STANDBY state (i.e., don't try to reconnect just yet). */ | 2018 | * the connection in a STANDBY state */ |
| 2013 | if (list_empty(&con->out_queue) && !con->out_keepalive_pending) { | 2019 | if (list_empty(&con->out_queue) && |
| 2014 | dout("fault setting STANDBY\n"); | 2020 | !test_bit(KEEPALIVE_PENDING, &con->state)) { |
| 2021 | dout("fault %p setting STANDBY clearing WRITE_PENDING\n", con); | ||
| 2022 | clear_bit(WRITE_PENDING, &con->state); | ||
| 2015 | set_bit(STANDBY, &con->state); | 2023 | set_bit(STANDBY, &con->state); |
| 2016 | } else { | 2024 | } else { |
| 2017 | /* retry after a delay. */ | 2025 | /* retry after a delay. */ |
| @@ -2019,11 +2027,24 @@ static void ceph_fault(struct ceph_connection *con) | |||
| 2019 | con->delay = BASE_DELAY_INTERVAL; | 2027 | con->delay = BASE_DELAY_INTERVAL; |
| 2020 | else if (con->delay < MAX_DELAY_INTERVAL) | 2028 | else if (con->delay < MAX_DELAY_INTERVAL) |
| 2021 | con->delay *= 2; | 2029 | con->delay *= 2; |
| 2022 | dout("fault queueing %p delay %lu\n", con, con->delay); | ||
| 2023 | con->ops->get(con); | 2030 | con->ops->get(con); |
| 2024 | if (queue_delayed_work(ceph_msgr_wq, &con->work, | 2031 | if (queue_delayed_work(ceph_msgr_wq, &con->work, |
| 2025 | round_jiffies_relative(con->delay)) == 0) | 2032 | round_jiffies_relative(con->delay))) { |
| 2033 | dout("fault queued %p delay %lu\n", con, con->delay); | ||
| 2034 | } else { | ||
| 2026 | con->ops->put(con); | 2035 | con->ops->put(con); |
| 2036 | dout("fault failed to queue %p delay %lu, backoff\n", | ||
| 2037 | con, con->delay); | ||
| 2038 | /* | ||
| 2039 | * In many cases we see a socket state change | ||
| 2040 | * while con_work is running and end up | ||
| 2041 | * queuing (non-delayed) work, such that we | ||
| 2042 | * can't backoff with a delay. Set a flag so | ||
| 2043 | * that when con_work restarts we schedule the | ||
| 2044 | * delay then. | ||
| 2045 | */ | ||
| 2046 | set_bit(BACKOFF, &con->state); | ||
| 2047 | } | ||
| 2027 | } | 2048 | } |
| 2028 | 2049 | ||
| 2029 | out_unlock: | 2050 | out_unlock: |
| @@ -2094,6 +2115,19 @@ void ceph_messenger_destroy(struct ceph_messenger *msgr) | |||
| 2094 | } | 2115 | } |
| 2095 | EXPORT_SYMBOL(ceph_messenger_destroy); | 2116 | EXPORT_SYMBOL(ceph_messenger_destroy); |
| 2096 | 2117 | ||
| 2118 | static void clear_standby(struct ceph_connection *con) | ||
| 2119 | { | ||
| 2120 | /* come back from STANDBY? */ | ||
| 2121 | if (test_and_clear_bit(STANDBY, &con->state)) { | ||
| 2122 | mutex_lock(&con->mutex); | ||
| 2123 | dout("clear_standby %p and ++connect_seq\n", con); | ||
| 2124 | con->connect_seq++; | ||
| 2125 | WARN_ON(test_bit(WRITE_PENDING, &con->state)); | ||
| 2126 | WARN_ON(test_bit(KEEPALIVE_PENDING, &con->state)); | ||
| 2127 | mutex_unlock(&con->mutex); | ||
| 2128 | } | ||
| 2129 | } | ||
| 2130 | |||
| 2097 | /* | 2131 | /* |
| 2098 | * Queue up an outgoing message on the given connection. | 2132 | * Queue up an outgoing message on the given connection. |
| 2099 | */ | 2133 | */ |
| @@ -2126,6 +2160,7 @@ void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg) | |||
| 2126 | 2160 | ||
| 2127 | /* if there wasn't anything waiting to send before, queue | 2161 | /* if there wasn't anything waiting to send before, queue |
| 2128 | * new work */ | 2162 | * new work */ |
| 2163 | clear_standby(con); | ||
| 2129 | if (test_and_set_bit(WRITE_PENDING, &con->state) == 0) | 2164 | if (test_and_set_bit(WRITE_PENDING, &con->state) == 0) |
| 2130 | queue_con(con); | 2165 | queue_con(con); |
| 2131 | } | 2166 | } |
| @@ -2191,6 +2226,8 @@ void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg) | |||
| 2191 | */ | 2226 | */ |
| 2192 | void ceph_con_keepalive(struct ceph_connection *con) | 2227 | void ceph_con_keepalive(struct ceph_connection *con) |
| 2193 | { | 2228 | { |
| 2229 | dout("con_keepalive %p\n", con); | ||
| 2230 | clear_standby(con); | ||
| 2194 | if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 && | 2231 | if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 && |
| 2195 | test_and_set_bit(WRITE_PENDING, &con->state) == 0) | 2232 | test_and_set_bit(WRITE_PENDING, &con->state) == 0) |
| 2196 | queue_con(con); | 2233 | queue_con(con); |
diff --git a/net/ceph/pagevec.c b/net/ceph/pagevec.c index 1a040e64c69f..cd9c21df87d1 100644 --- a/net/ceph/pagevec.c +++ b/net/ceph/pagevec.c | |||
| @@ -16,22 +16,30 @@ struct page **ceph_get_direct_page_vector(const char __user *data, | |||
| 16 | int num_pages, bool write_page) | 16 | int num_pages, bool write_page) |
| 17 | { | 17 | { |
| 18 | struct page **pages; | 18 | struct page **pages; |
| 19 | int rc; | 19 | int got = 0; |
| 20 | int rc = 0; | ||
| 20 | 21 | ||
| 21 | pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS); | 22 | pages = kmalloc(sizeof(*pages) * num_pages, GFP_NOFS); |
| 22 | if (!pages) | 23 | if (!pages) |
| 23 | return ERR_PTR(-ENOMEM); | 24 | return ERR_PTR(-ENOMEM); |
| 24 | 25 | ||
| 25 | down_read(¤t->mm->mmap_sem); | 26 | down_read(¤t->mm->mmap_sem); |
| 26 | rc = get_user_pages(current, current->mm, (unsigned long)data, | 27 | while (got < num_pages) { |
| 27 | num_pages, write_page, 0, pages, NULL); | 28 | rc = get_user_pages(current, current->mm, |
| 29 | (unsigned long)data + ((unsigned long)got * PAGE_SIZE), | ||
| 30 | num_pages - got, write_page, 0, pages + got, NULL); | ||
| 31 | if (rc < 0) | ||
| 32 | break; | ||
| 33 | BUG_ON(rc == 0); | ||
| 34 | got += rc; | ||
| 35 | } | ||
| 28 | up_read(¤t->mm->mmap_sem); | 36 | up_read(¤t->mm->mmap_sem); |
| 29 | if (rc < num_pages) | 37 | if (rc < 0) |
| 30 | goto fail; | 38 | goto fail; |
| 31 | return pages; | 39 | return pages; |
| 32 | 40 | ||
| 33 | fail: | 41 | fail: |
| 34 | ceph_put_page_vector(pages, rc > 0 ? rc : 0, false); | 42 | ceph_put_page_vector(pages, got, false); |
| 35 | return ERR_PTR(rc); | 43 | return ERR_PTR(rc); |
| 36 | } | 44 | } |
| 37 | EXPORT_SYMBOL(ceph_get_direct_page_vector); | 45 | EXPORT_SYMBOL(ceph_get_direct_page_vector); |
diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index 508f9c18992f..133fd22ea287 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c | |||
| @@ -144,7 +144,7 @@ void __hw_addr_del_multiple(struct netdev_hw_addr_list *to_list, | |||
| 144 | 144 | ||
| 145 | list_for_each_entry(ha, &from_list->list, list) { | 145 | list_for_each_entry(ha, &from_list->list, list) { |
| 146 | type = addr_type ? addr_type : ha->type; | 146 | type = addr_type ? addr_type : ha->type; |
| 147 | __hw_addr_del(to_list, ha->addr, addr_len, addr_type); | 147 | __hw_addr_del(to_list, ha->addr, addr_len, type); |
| 148 | } | 148 | } |
| 149 | } | 149 | } |
| 150 | EXPORT_SYMBOL(__hw_addr_del_multiple); | 150 | EXPORT_SYMBOL(__hw_addr_del_multiple); |
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index d5074a567289..c44348adba3b 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c | |||
| @@ -1193,7 +1193,7 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlattr **tb, | |||
| 1193 | goto err; | 1193 | goto err; |
| 1194 | } | 1194 | } |
| 1195 | 1195 | ||
| 1196 | if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setets) { | 1196 | if (ieee[DCB_ATTR_IEEE_PFC] && ops->ieee_setpfc) { |
| 1197 | struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]); | 1197 | struct ieee_pfc *pfc = nla_data(ieee[DCB_ATTR_IEEE_PFC]); |
| 1198 | err = ops->ieee_setpfc(netdev, pfc); | 1198 | err = ops->ieee_setpfc(netdev, pfc); |
| 1199 | if (err) | 1199 | if (err) |
diff --git a/net/dccp/input.c b/net/dccp/input.c index 8cde009e8b85..4222e7a654b0 100644 --- a/net/dccp/input.c +++ b/net/dccp/input.c | |||
| @@ -614,6 +614,9 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
| 614 | /* Caller (dccp_v4_do_rcv) will send Reset */ | 614 | /* Caller (dccp_v4_do_rcv) will send Reset */ |
| 615 | dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; | 615 | dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; |
| 616 | return 1; | 616 | return 1; |
| 617 | } else if (sk->sk_state == DCCP_CLOSED) { | ||
| 618 | dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; | ||
| 619 | return 1; | ||
| 617 | } | 620 | } |
| 618 | 621 | ||
| 619 | if (sk->sk_state != DCCP_REQUESTING && sk->sk_state != DCCP_RESPOND) { | 622 | if (sk->sk_state != DCCP_REQUESTING && sk->sk_state != DCCP_RESPOND) { |
| @@ -668,10 +671,6 @@ int dccp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
| 668 | } | 671 | } |
| 669 | 672 | ||
| 670 | switch (sk->sk_state) { | 673 | switch (sk->sk_state) { |
| 671 | case DCCP_CLOSED: | ||
| 672 | dcb->dccpd_reset_code = DCCP_RESET_CODE_NO_CONNECTION; | ||
| 673 | return 1; | ||
| 674 | |||
| 675 | case DCCP_REQUESTING: | 674 | case DCCP_REQUESTING: |
| 676 | queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len); | 675 | queued = dccp_rcv_request_sent_state_process(sk, skb, dh, len); |
| 677 | if (queued >= 0) | 676 | if (queued >= 0) |
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c index 739435a6af39..cfa7a5e1c5c9 100644 --- a/net/dns_resolver/dns_key.c +++ b/net/dns_resolver/dns_key.c | |||
| @@ -67,8 +67,9 @@ dns_resolver_instantiate(struct key *key, const void *_data, size_t datalen) | |||
| 67 | size_t result_len = 0; | 67 | size_t result_len = 0; |
| 68 | const char *data = _data, *end, *opt; | 68 | const char *data = _data, *end, *opt; |
| 69 | 69 | ||
| 70 | kenter("%%%d,%s,'%s',%zu", | 70 | kenter("%%%d,%s,'%*.*s',%zu", |
| 71 | key->serial, key->description, data, datalen); | 71 | key->serial, key->description, |
| 72 | (int)datalen, (int)datalen, data, datalen); | ||
| 72 | 73 | ||
| 73 | if (datalen <= 1 || !data || data[datalen - 1] != '\0') | 74 | if (datalen <= 1 || !data || data[datalen - 1] != '\0') |
| 74 | return -EINVAL; | 75 | return -EINVAL; |
| @@ -217,6 +218,19 @@ static void dns_resolver_describe(const struct key *key, struct seq_file *m) | |||
| 217 | seq_printf(m, ": %u", key->datalen); | 218 | seq_printf(m, ": %u", key->datalen); |
| 218 | } | 219 | } |
| 219 | 220 | ||
| 221 | /* | ||
| 222 | * read the DNS data | ||
| 223 | * - the key's semaphore is read-locked | ||
| 224 | */ | ||
| 225 | static long dns_resolver_read(const struct key *key, | ||
| 226 | char __user *buffer, size_t buflen) | ||
| 227 | { | ||
| 228 | if (key->type_data.x[0]) | ||
| 229 | return key->type_data.x[0]; | ||
| 230 | |||
| 231 | return user_read(key, buffer, buflen); | ||
| 232 | } | ||
| 233 | |||
| 220 | struct key_type key_type_dns_resolver = { | 234 | struct key_type key_type_dns_resolver = { |
| 221 | .name = "dns_resolver", | 235 | .name = "dns_resolver", |
| 222 | .instantiate = dns_resolver_instantiate, | 236 | .instantiate = dns_resolver_instantiate, |
| @@ -224,7 +238,7 @@ struct key_type key_type_dns_resolver = { | |||
| 224 | .revoke = user_revoke, | 238 | .revoke = user_revoke, |
| 225 | .destroy = user_destroy, | 239 | .destroy = user_destroy, |
| 226 | .describe = dns_resolver_describe, | 240 | .describe = dns_resolver_describe, |
| 227 | .read = user_read, | 241 | .read = dns_resolver_read, |
| 228 | }; | 242 | }; |
| 229 | 243 | ||
| 230 | static int __init init_dns_resolver(void) | 244 | static int __init init_dns_resolver(void) |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index a998db6e7895..904312e25a3c 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
| @@ -2557,14 +2557,16 @@ static | |||
| 2557 | int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write, | 2557 | int ipv6_sysctl_rtcache_flush(ctl_table *ctl, int write, |
| 2558 | void __user *buffer, size_t *lenp, loff_t *ppos) | 2558 | void __user *buffer, size_t *lenp, loff_t *ppos) |
| 2559 | { | 2559 | { |
| 2560 | struct net *net = current->nsproxy->net_ns; | 2560 | struct net *net; |
| 2561 | int delay = net->ipv6.sysctl.flush_delay; | 2561 | int delay; |
| 2562 | if (write) { | 2562 | if (!write) |
| 2563 | proc_dointvec(ctl, write, buffer, lenp, ppos); | ||
| 2564 | fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net); | ||
| 2565 | return 0; | ||
| 2566 | } else | ||
| 2567 | return -EINVAL; | 2563 | return -EINVAL; |
| 2564 | |||
| 2565 | net = (struct net *)ctl->extra1; | ||
| 2566 | delay = net->ipv6.sysctl.flush_delay; | ||
| 2567 | proc_dointvec(ctl, write, buffer, lenp, ppos); | ||
| 2568 | fib6_run_gc(delay <= 0 ? ~0UL : (unsigned long)delay, net); | ||
| 2569 | return 0; | ||
| 2568 | } | 2570 | } |
| 2569 | 2571 | ||
| 2570 | ctl_table ipv6_route_table_template[] = { | 2572 | ctl_table ipv6_route_table_template[] = { |
| @@ -2651,6 +2653,7 @@ struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net) | |||
| 2651 | 2653 | ||
| 2652 | if (table) { | 2654 | if (table) { |
| 2653 | table[0].data = &net->ipv6.sysctl.flush_delay; | 2655 | table[0].data = &net->ipv6.sysctl.flush_delay; |
| 2656 | table[0].extra1 = net; | ||
| 2654 | table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh; | 2657 | table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh; |
| 2655 | table[2].data = &net->ipv6.sysctl.ip6_rt_max_size; | 2658 | table[2].data = &net->ipv6.sysctl.ip6_rt_max_size; |
| 2656 | table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; | 2659 | table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; |
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 22f7ad5101ab..ba98e1308f3c 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c | |||
| @@ -808,9 +808,9 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest, | |||
| 808 | dest->u_threshold = udest->u_threshold; | 808 | dest->u_threshold = udest->u_threshold; |
| 809 | dest->l_threshold = udest->l_threshold; | 809 | dest->l_threshold = udest->l_threshold; |
| 810 | 810 | ||
| 811 | spin_lock(&dest->dst_lock); | 811 | spin_lock_bh(&dest->dst_lock); |
| 812 | ip_vs_dst_reset(dest); | 812 | ip_vs_dst_reset(dest); |
| 813 | spin_unlock(&dest->dst_lock); | 813 | spin_unlock_bh(&dest->dst_lock); |
| 814 | 814 | ||
| 815 | if (add) | 815 | if (add) |
| 816 | ip_vs_new_estimator(&dest->stats); | 816 | ip_vs_new_estimator(&dest->stats); |
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index b07393eab88e..91816998ed86 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c | |||
| @@ -85,6 +85,8 @@ EXPORT_SYMBOL(nf_log_unregister); | |||
| 85 | 85 | ||
| 86 | int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger) | 86 | int nf_log_bind_pf(u_int8_t pf, const struct nf_logger *logger) |
| 87 | { | 87 | { |
| 88 | if (pf >= ARRAY_SIZE(nf_loggers)) | ||
| 89 | return -EINVAL; | ||
| 88 | mutex_lock(&nf_log_mutex); | 90 | mutex_lock(&nf_log_mutex); |
| 89 | if (__find_logger(pf, logger->name) == NULL) { | 91 | if (__find_logger(pf, logger->name) == NULL) { |
| 90 | mutex_unlock(&nf_log_mutex); | 92 | mutex_unlock(&nf_log_mutex); |
| @@ -98,6 +100,8 @@ EXPORT_SYMBOL(nf_log_bind_pf); | |||
| 98 | 100 | ||
| 99 | void nf_log_unbind_pf(u_int8_t pf) | 101 | void nf_log_unbind_pf(u_int8_t pf) |
| 100 | { | 102 | { |
| 103 | if (pf >= ARRAY_SIZE(nf_loggers)) | ||
| 104 | return; | ||
| 101 | mutex_lock(&nf_log_mutex); | 105 | mutex_lock(&nf_log_mutex); |
| 102 | rcu_assign_pointer(nf_loggers[pf], NULL); | 106 | rcu_assign_pointer(nf_loggers[pf], NULL); |
| 103 | mutex_unlock(&nf_log_mutex); | 107 | mutex_unlock(&nf_log_mutex); |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 478181d53c55..1f924595bdef 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
| @@ -1407,7 +1407,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, | |||
| 1407 | int noblock = flags&MSG_DONTWAIT; | 1407 | int noblock = flags&MSG_DONTWAIT; |
| 1408 | size_t copied; | 1408 | size_t copied; |
| 1409 | struct sk_buff *skb, *data_skb; | 1409 | struct sk_buff *skb, *data_skb; |
| 1410 | int err; | 1410 | int err, ret; |
| 1411 | 1411 | ||
| 1412 | if (flags&MSG_OOB) | 1412 | if (flags&MSG_OOB) |
| 1413 | return -EOPNOTSUPP; | 1413 | return -EOPNOTSUPP; |
| @@ -1470,8 +1470,13 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, | |||
| 1470 | 1470 | ||
| 1471 | skb_free_datagram(sk, skb); | 1471 | skb_free_datagram(sk, skb); |
| 1472 | 1472 | ||
| 1473 | if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) | 1473 | if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) { |
| 1474 | netlink_dump(sk); | 1474 | ret = netlink_dump(sk); |
| 1475 | if (ret) { | ||
| 1476 | sk->sk_err = ret; | ||
| 1477 | sk->sk_error_report(sk); | ||
| 1478 | } | ||
| 1479 | } | ||
| 1475 | 1480 | ||
| 1476 | scm_recv(sock, msg, siocb->scm, flags); | 1481 | scm_recv(sock, msg, siocb->scm, flags); |
| 1477 | out: | 1482 | out: |
| @@ -1736,6 +1741,7 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, | |||
| 1736 | struct netlink_callback *cb; | 1741 | struct netlink_callback *cb; |
| 1737 | struct sock *sk; | 1742 | struct sock *sk; |
| 1738 | struct netlink_sock *nlk; | 1743 | struct netlink_sock *nlk; |
| 1744 | int ret; | ||
| 1739 | 1745 | ||
| 1740 | cb = kzalloc(sizeof(*cb), GFP_KERNEL); | 1746 | cb = kzalloc(sizeof(*cb), GFP_KERNEL); |
| 1741 | if (cb == NULL) | 1747 | if (cb == NULL) |
| @@ -1764,9 +1770,13 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, | |||
| 1764 | nlk->cb = cb; | 1770 | nlk->cb = cb; |
| 1765 | mutex_unlock(nlk->cb_mutex); | 1771 | mutex_unlock(nlk->cb_mutex); |
| 1766 | 1772 | ||
| 1767 | netlink_dump(sk); | 1773 | ret = netlink_dump(sk); |
| 1774 | |||
| 1768 | sock_put(sk); | 1775 | sock_put(sk); |
| 1769 | 1776 | ||
| 1777 | if (ret) | ||
| 1778 | return ret; | ||
| 1779 | |||
| 1770 | /* We successfully started a dump, by returning -EINTR we | 1780 | /* We successfully started a dump, by returning -EINTR we |
| 1771 | * signal not to send ACK even if it was requested. | 1781 | * signal not to send ACK even if it was requested. |
| 1772 | */ | 1782 | */ |
diff --git a/net/rxrpc/ar-input.c b/net/rxrpc/ar-input.c index 89315009bab1..1a2b0633fece 100644 --- a/net/rxrpc/ar-input.c +++ b/net/rxrpc/ar-input.c | |||
| @@ -423,6 +423,7 @@ void rxrpc_fast_process_packet(struct rxrpc_call *call, struct sk_buff *skb) | |||
| 423 | goto protocol_error; | 423 | goto protocol_error; |
| 424 | } | 424 | } |
| 425 | 425 | ||
| 426 | case RXRPC_PACKET_TYPE_ACKALL: | ||
| 426 | case RXRPC_PACKET_TYPE_ACK: | 427 | case RXRPC_PACKET_TYPE_ACK: |
| 427 | /* ACK processing is done in process context */ | 428 | /* ACK processing is done in process context */ |
| 428 | read_lock_bh(&call->state_lock); | 429 | read_lock_bh(&call->state_lock); |
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c index a07b031090d8..067982f4f182 100644 --- a/sound/pci/hda/patch_cirrus.c +++ b/sound/pci/hda/patch_cirrus.c | |||
| @@ -1039,9 +1039,11 @@ static struct hda_verb cs_errata_init_verbs[] = { | |||
| 1039 | {0x11, AC_VERB_SET_PROC_COEF, 0x0008}, | 1039 | {0x11, AC_VERB_SET_PROC_COEF, 0x0008}, |
| 1040 | {0x11, AC_VERB_SET_PROC_STATE, 0x00}, | 1040 | {0x11, AC_VERB_SET_PROC_STATE, 0x00}, |
| 1041 | 1041 | ||
| 1042 | #if 0 /* Don't to set to D3 as we are in power-up sequence */ | ||
| 1042 | {0x07, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Rx: D3 */ | 1043 | {0x07, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Rx: D3 */ |
| 1043 | {0x08, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Tx: D3 */ | 1044 | {0x08, AC_VERB_SET_POWER_STATE, 0x03}, /* S/PDIF Tx: D3 */ |
| 1044 | /*{0x01, AC_VERB_SET_POWER_STATE, 0x03},*/ /* AFG: D3 This is already handled */ | 1045 | /*{0x01, AC_VERB_SET_POWER_STATE, 0x03},*/ /* AFG: D3 This is already handled */ |
| 1046 | #endif | ||
| 1045 | 1047 | ||
| 1046 | {} /* terminator */ | 1048 | {} /* terminator */ |
| 1047 | }; | 1049 | }; |
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index a58767736727..ec0fa2dd0a27 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c | |||
| @@ -1634,6 +1634,9 @@ static struct hda_codec_preset snd_hda_preset_hdmi[] = { | |||
| 1634 | { .id = 0x10de0012, .name = "GPU 12 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, | 1634 | { .id = 0x10de0012, .name = "GPU 12 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, |
| 1635 | { .id = 0x10de0013, .name = "GPU 13 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, | 1635 | { .id = 0x10de0013, .name = "GPU 13 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, |
| 1636 | { .id = 0x10de0014, .name = "GPU 14 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, | 1636 | { .id = 0x10de0014, .name = "GPU 14 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, |
| 1637 | { .id = 0x10de0015, .name = "GPU 15 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, | ||
| 1638 | { .id = 0x10de0016, .name = "GPU 16 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, | ||
| 1639 | /* 17 is known to be absent */ | ||
| 1637 | { .id = 0x10de0018, .name = "GPU 18 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, | 1640 | { .id = 0x10de0018, .name = "GPU 18 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, |
| 1638 | { .id = 0x10de0019, .name = "GPU 19 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, | 1641 | { .id = 0x10de0019, .name = "GPU 19 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, |
| 1639 | { .id = 0x10de001a, .name = "GPU 1a HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, | 1642 | { .id = 0x10de001a, .name = "GPU 1a HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, |
| @@ -1676,6 +1679,8 @@ MODULE_ALIAS("snd-hda-codec-id:10de0011"); | |||
| 1676 | MODULE_ALIAS("snd-hda-codec-id:10de0012"); | 1679 | MODULE_ALIAS("snd-hda-codec-id:10de0012"); |
| 1677 | MODULE_ALIAS("snd-hda-codec-id:10de0013"); | 1680 | MODULE_ALIAS("snd-hda-codec-id:10de0013"); |
| 1678 | MODULE_ALIAS("snd-hda-codec-id:10de0014"); | 1681 | MODULE_ALIAS("snd-hda-codec-id:10de0014"); |
| 1682 | MODULE_ALIAS("snd-hda-codec-id:10de0015"); | ||
| 1683 | MODULE_ALIAS("snd-hda-codec-id:10de0016"); | ||
| 1679 | MODULE_ALIAS("snd-hda-codec-id:10de0018"); | 1684 | MODULE_ALIAS("snd-hda-codec-id:10de0018"); |
| 1680 | MODULE_ALIAS("snd-hda-codec-id:10de0019"); | 1685 | MODULE_ALIAS("snd-hda-codec-id:10de0019"); |
| 1681 | MODULE_ALIAS("snd-hda-codec-id:10de001a"); | 1686 | MODULE_ALIAS("snd-hda-codec-id:10de001a"); |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 3328a259a242..4261bb8eec1d 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
| @@ -1133,11 +1133,8 @@ static void alc_automute_speaker(struct hda_codec *codec, int pinctl) | |||
| 1133 | nid = spec->autocfg.hp_pins[i]; | 1133 | nid = spec->autocfg.hp_pins[i]; |
| 1134 | if (!nid) | 1134 | if (!nid) |
| 1135 | break; | 1135 | break; |
| 1136 | if (snd_hda_jack_detect(codec, nid)) { | 1136 | alc_report_jack(codec, nid); |
| 1137 | spec->jack_present = 1; | 1137 | spec->jack_present |= snd_hda_jack_detect(codec, nid); |
| 1138 | break; | ||
| 1139 | } | ||
| 1140 | alc_report_jack(codec, spec->autocfg.hp_pins[i]); | ||
| 1141 | } | 1138 | } |
| 1142 | 1139 | ||
| 1143 | mute = spec->jack_present ? HDA_AMP_MUTE : 0; | 1140 | mute = spec->jack_present ? HDA_AMP_MUTE : 0; |
| @@ -15015,7 +15012,7 @@ static struct snd_pci_quirk alc269_cfg_tbl[] = { | |||
| 15015 | SND_PCI_QUIRK(0x1043, 0x11e3, "ASUS U33Jc", ALC269VB_AMIC), | 15012 | SND_PCI_QUIRK(0x1043, 0x11e3, "ASUS U33Jc", ALC269VB_AMIC), |
| 15016 | SND_PCI_QUIRK(0x1043, 0x1273, "ASUS UL80Jt", ALC269VB_AMIC), | 15013 | SND_PCI_QUIRK(0x1043, 0x1273, "ASUS UL80Jt", ALC269VB_AMIC), |
| 15017 | SND_PCI_QUIRK(0x1043, 0x1283, "ASUS U53Jc", ALC269_AMIC), | 15014 | SND_PCI_QUIRK(0x1043, 0x1283, "ASUS U53Jc", ALC269_AMIC), |
| 15018 | SND_PCI_QUIRK(0x1043, 0x12b3, "ASUS N82Jv", ALC269_AMIC), | 15015 | SND_PCI_QUIRK(0x1043, 0x12b3, "ASUS N82JV", ALC269VB_AMIC), |
| 15019 | SND_PCI_QUIRK(0x1043, 0x12d3, "ASUS N61Jv", ALC269_AMIC), | 15016 | SND_PCI_QUIRK(0x1043, 0x12d3, "ASUS N61Jv", ALC269_AMIC), |
| 15020 | SND_PCI_QUIRK(0x1043, 0x13a3, "ASUS UL30Vt", ALC269_AMIC), | 15017 | SND_PCI_QUIRK(0x1043, 0x13a3, "ASUS UL30Vt", ALC269_AMIC), |
| 15021 | SND_PCI_QUIRK(0x1043, 0x1373, "ASUS G73JX", ALC269_AMIC), | 15018 | SND_PCI_QUIRK(0x1043, 0x1373, "ASUS G73JX", ALC269_AMIC), |
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c index ebaee5ca7434..4afbe3b2e443 100644 --- a/sound/soc/codecs/wm8994.c +++ b/sound/soc/codecs/wm8994.c | |||
| @@ -110,6 +110,9 @@ struct wm8994_priv { | |||
| 110 | 110 | ||
| 111 | unsigned int aif1clk_enable:1; | 111 | unsigned int aif1clk_enable:1; |
| 112 | unsigned int aif2clk_enable:1; | 112 | unsigned int aif2clk_enable:1; |
| 113 | |||
| 114 | unsigned int aif1clk_disable:1; | ||
| 115 | unsigned int aif2clk_disable:1; | ||
| 113 | }; | 116 | }; |
| 114 | 117 | ||
| 115 | static int wm8994_readable(unsigned int reg) | 118 | static int wm8994_readable(unsigned int reg) |
| @@ -1015,14 +1018,18 @@ static int late_enable_ev(struct snd_soc_dapm_widget *w, | |||
| 1015 | 1018 | ||
| 1016 | switch (event) { | 1019 | switch (event) { |
| 1017 | case SND_SOC_DAPM_PRE_PMU: | 1020 | case SND_SOC_DAPM_PRE_PMU: |
| 1018 | if (wm8994->aif1clk_enable) | 1021 | if (wm8994->aif1clk_enable) { |
| 1019 | snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, | 1022 | snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, |
| 1020 | WM8994_AIF1CLK_ENA_MASK, | 1023 | WM8994_AIF1CLK_ENA_MASK, |
| 1021 | WM8994_AIF1CLK_ENA); | 1024 | WM8994_AIF1CLK_ENA); |
| 1022 | if (wm8994->aif2clk_enable) | 1025 | wm8994->aif1clk_enable = 0; |
| 1026 | } | ||
| 1027 | if (wm8994->aif2clk_enable) { | ||
| 1023 | snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, | 1028 | snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, |
| 1024 | WM8994_AIF2CLK_ENA_MASK, | 1029 | WM8994_AIF2CLK_ENA_MASK, |
| 1025 | WM8994_AIF2CLK_ENA); | 1030 | WM8994_AIF2CLK_ENA); |
| 1031 | wm8994->aif2clk_enable = 0; | ||
| 1032 | } | ||
| 1026 | break; | 1033 | break; |
| 1027 | } | 1034 | } |
| 1028 | 1035 | ||
| @@ -1037,15 +1044,15 @@ static int late_disable_ev(struct snd_soc_dapm_widget *w, | |||
| 1037 | 1044 | ||
| 1038 | switch (event) { | 1045 | switch (event) { |
| 1039 | case SND_SOC_DAPM_POST_PMD: | 1046 | case SND_SOC_DAPM_POST_PMD: |
| 1040 | if (wm8994->aif1clk_enable) { | 1047 | if (wm8994->aif1clk_disable) { |
| 1041 | snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, | 1048 | snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, |
| 1042 | WM8994_AIF1CLK_ENA_MASK, 0); | 1049 | WM8994_AIF1CLK_ENA_MASK, 0); |
| 1043 | wm8994->aif1clk_enable = 0; | 1050 | wm8994->aif1clk_disable = 0; |
| 1044 | } | 1051 | } |
| 1045 | if (wm8994->aif2clk_enable) { | 1052 | if (wm8994->aif2clk_disable) { |
| 1046 | snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, | 1053 | snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, |
| 1047 | WM8994_AIF2CLK_ENA_MASK, 0); | 1054 | WM8994_AIF2CLK_ENA_MASK, 0); |
| 1048 | wm8994->aif2clk_enable = 0; | 1055 | wm8994->aif2clk_disable = 0; |
| 1049 | } | 1056 | } |
| 1050 | break; | 1057 | break; |
| 1051 | } | 1058 | } |
| @@ -1063,6 +1070,9 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w, | |||
| 1063 | case SND_SOC_DAPM_PRE_PMU: | 1070 | case SND_SOC_DAPM_PRE_PMU: |
| 1064 | wm8994->aif1clk_enable = 1; | 1071 | wm8994->aif1clk_enable = 1; |
| 1065 | break; | 1072 | break; |
| 1073 | case SND_SOC_DAPM_POST_PMD: | ||
| 1074 | wm8994->aif1clk_disable = 1; | ||
| 1075 | break; | ||
| 1066 | } | 1076 | } |
| 1067 | 1077 | ||
| 1068 | return 0; | 1078 | return 0; |
| @@ -1078,11 +1088,21 @@ static int aif2clk_ev(struct snd_soc_dapm_widget *w, | |||
| 1078 | case SND_SOC_DAPM_PRE_PMU: | 1088 | case SND_SOC_DAPM_PRE_PMU: |
| 1079 | wm8994->aif2clk_enable = 1; | 1089 | wm8994->aif2clk_enable = 1; |
| 1080 | break; | 1090 | break; |
| 1091 | case SND_SOC_DAPM_POST_PMD: | ||
| 1092 | wm8994->aif2clk_disable = 1; | ||
| 1093 | break; | ||
| 1081 | } | 1094 | } |
| 1082 | 1095 | ||
| 1083 | return 0; | 1096 | return 0; |
| 1084 | } | 1097 | } |
| 1085 | 1098 | ||
| 1099 | static int adc_mux_ev(struct snd_soc_dapm_widget *w, | ||
| 1100 | struct snd_kcontrol *kcontrol, int event) | ||
| 1101 | { | ||
| 1102 | late_enable_ev(w, kcontrol, event); | ||
| 1103 | return 0; | ||
| 1104 | } | ||
| 1105 | |||
| 1086 | static int dac_ev(struct snd_soc_dapm_widget *w, | 1106 | static int dac_ev(struct snd_soc_dapm_widget *w, |
| 1087 | struct snd_kcontrol *kcontrol, int event) | 1107 | struct snd_kcontrol *kcontrol, int event) |
| 1088 | { | 1108 | { |
| @@ -1403,6 +1423,18 @@ SND_SOC_DAPM_DAC("DAC1L", NULL, WM8994_POWER_MANAGEMENT_5, 1, 0), | |||
| 1403 | SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0), | 1423 | SND_SOC_DAPM_DAC("DAC1R", NULL, WM8994_POWER_MANAGEMENT_5, 0, 0), |
| 1404 | }; | 1424 | }; |
| 1405 | 1425 | ||
| 1426 | static const struct snd_soc_dapm_widget wm8994_adc_revd_widgets[] = { | ||
| 1427 | SND_SOC_DAPM_MUX_E("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux, | ||
| 1428 | adc_mux_ev, SND_SOC_DAPM_PRE_PMU), | ||
| 1429 | SND_SOC_DAPM_MUX_E("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux, | ||
| 1430 | adc_mux_ev, SND_SOC_DAPM_PRE_PMU), | ||
| 1431 | }; | ||
| 1432 | |||
| 1433 | static const struct snd_soc_dapm_widget wm8994_adc_widgets[] = { | ||
| 1434 | SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux), | ||
| 1435 | SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux), | ||
| 1436 | }; | ||
| 1437 | |||
| 1406 | static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = { | 1438 | static const struct snd_soc_dapm_widget wm8994_dapm_widgets[] = { |
| 1407 | SND_SOC_DAPM_INPUT("DMIC1DAT"), | 1439 | SND_SOC_DAPM_INPUT("DMIC1DAT"), |
| 1408 | SND_SOC_DAPM_INPUT("DMIC2DAT"), | 1440 | SND_SOC_DAPM_INPUT("DMIC2DAT"), |
| @@ -1497,9 +1529,6 @@ SND_SOC_DAPM_ADC("DMIC1R", NULL, WM8994_POWER_MANAGEMENT_4, 2, 0), | |||
| 1497 | SND_SOC_DAPM_ADC("ADCL", NULL, SND_SOC_NOPM, 1, 0), | 1529 | SND_SOC_DAPM_ADC("ADCL", NULL, SND_SOC_NOPM, 1, 0), |
| 1498 | SND_SOC_DAPM_ADC("ADCR", NULL, SND_SOC_NOPM, 0, 0), | 1530 | SND_SOC_DAPM_ADC("ADCR", NULL, SND_SOC_NOPM, 0, 0), |
| 1499 | 1531 | ||
| 1500 | SND_SOC_DAPM_MUX("ADCL Mux", WM8994_POWER_MANAGEMENT_4, 1, 0, &adcl_mux), | ||
| 1501 | SND_SOC_DAPM_MUX("ADCR Mux", WM8994_POWER_MANAGEMENT_4, 0, 0, &adcr_mux), | ||
| 1502 | |||
| 1503 | SND_SOC_DAPM_MUX("Left Headphone Mux", SND_SOC_NOPM, 0, 0, &hpl_mux), | 1532 | SND_SOC_DAPM_MUX("Left Headphone Mux", SND_SOC_NOPM, 0, 0, &hpl_mux), |
| 1504 | SND_SOC_DAPM_MUX("Right Headphone Mux", SND_SOC_NOPM, 0, 0, &hpr_mux), | 1533 | SND_SOC_DAPM_MUX("Right Headphone Mux", SND_SOC_NOPM, 0, 0, &hpr_mux), |
| 1505 | 1534 | ||
| @@ -3280,11 +3309,15 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec) | |||
| 3280 | if (wm8994->revision < 4) { | 3309 | if (wm8994->revision < 4) { |
| 3281 | snd_soc_dapm_new_controls(dapm, wm8994_lateclk_revd_widgets, | 3310 | snd_soc_dapm_new_controls(dapm, wm8994_lateclk_revd_widgets, |
| 3282 | ARRAY_SIZE(wm8994_lateclk_revd_widgets)); | 3311 | ARRAY_SIZE(wm8994_lateclk_revd_widgets)); |
| 3312 | snd_soc_dapm_new_controls(dapm, wm8994_adc_revd_widgets, | ||
| 3313 | ARRAY_SIZE(wm8994_adc_revd_widgets)); | ||
| 3283 | snd_soc_dapm_new_controls(dapm, wm8994_dac_revd_widgets, | 3314 | snd_soc_dapm_new_controls(dapm, wm8994_dac_revd_widgets, |
| 3284 | ARRAY_SIZE(wm8994_dac_revd_widgets)); | 3315 | ARRAY_SIZE(wm8994_dac_revd_widgets)); |
| 3285 | } else { | 3316 | } else { |
| 3286 | snd_soc_dapm_new_controls(dapm, wm8994_lateclk_widgets, | 3317 | snd_soc_dapm_new_controls(dapm, wm8994_lateclk_widgets, |
| 3287 | ARRAY_SIZE(wm8994_lateclk_widgets)); | 3318 | ARRAY_SIZE(wm8994_lateclk_widgets)); |
| 3319 | snd_soc_dapm_new_controls(dapm, wm8994_adc_widgets, | ||
| 3320 | ARRAY_SIZE(wm8994_adc_widgets)); | ||
| 3288 | snd_soc_dapm_new_controls(dapm, wm8994_dac_widgets, | 3321 | snd_soc_dapm_new_controls(dapm, wm8994_dac_widgets, |
| 3289 | ARRAY_SIZE(wm8994_dac_widgets)); | 3322 | ARRAY_SIZE(wm8994_dac_widgets)); |
| 3290 | } | 3323 | } |
diff --git a/sound/soc/codecs/wm9081.c b/sound/soc/codecs/wm9081.c index 43825b2102a5..cce704c275c6 100644 --- a/sound/soc/codecs/wm9081.c +++ b/sound/soc/codecs/wm9081.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/moduleparam.h> | 15 | #include <linux/moduleparam.h> |
| 16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
| 17 | #include <linux/delay.h> | 17 | #include <linux/delay.h> |
| 18 | #include <linux/device.h> | ||
| 18 | #include <linux/pm.h> | 19 | #include <linux/pm.h> |
| 19 | #include <linux/i2c.h> | 20 | #include <linux/i2c.h> |
| 20 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
| @@ -1341,6 +1342,10 @@ static __devinit int wm9081_i2c_probe(struct i2c_client *i2c, | |||
| 1341 | wm9081->control_type = SND_SOC_I2C; | 1342 | wm9081->control_type = SND_SOC_I2C; |
| 1342 | wm9081->control_data = i2c; | 1343 | wm9081->control_data = i2c; |
| 1343 | 1344 | ||
| 1345 | if (dev_get_platdata(&i2c->dev)) | ||
| 1346 | memcpy(&wm9081->retune, dev_get_platdata(&i2c->dev), | ||
| 1347 | sizeof(wm9081->retune)); | ||
| 1348 | |||
| 1344 | ret = snd_soc_register_codec(&i2c->dev, | 1349 | ret = snd_soc_register_codec(&i2c->dev, |
| 1345 | &soc_codec_dev_wm9081, &wm9081_dai, 1); | 1350 | &soc_codec_dev_wm9081, &wm9081_dai, 1); |
| 1346 | if (ret < 0) | 1351 | if (ret < 0) |
