diff options
author | Arnd Bergmann <arnd@arndb.de> | 2011-10-30 15:59:09 -0400 |
---|---|---|
committer | Arnd Bergmann <arnd@arndb.de> | 2011-10-30 15:59:09 -0400 |
commit | b8bc83971cc20cae3c3b65c26a804f350d74960c (patch) | |
tree | 2573e6367e1806c298344fa023bb49a16cc4d1e4 | |
parent | 11f0d9311c6e9ed928dc98ce6e9d14c0ee274b05 (diff) | |
parent | bca7ab316edd819edd0e3d59f9ccb26c6b2173f5 (diff) |
Merge branch 'pxa/devel' into next/devel2
Conflicts:
arch/arm/mach-s3c2416/s3c2416.c
256 files changed, 1871 insertions, 1185 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 854ed5ca7e3f..d6e6724446c8 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -2706,10 +2706,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted. | |||
2706 | functions are at fixed addresses, they make nice | 2706 | functions are at fixed addresses, they make nice |
2707 | targets for exploits that can control RIP. | 2707 | targets for exploits that can control RIP. |
2708 | 2708 | ||
2709 | emulate [default] Vsyscalls turn into traps and are | 2709 | emulate Vsyscalls turn into traps and are emulated |
2710 | emulated reasonably safely. | 2710 | reasonably safely. |
2711 | 2711 | ||
2712 | native Vsyscalls are native syscall instructions. | 2712 | native [default] Vsyscalls are native syscall |
2713 | instructions. | ||
2713 | This is a little bit faster than trapping | 2714 | This is a little bit faster than trapping |
2714 | and makes a few dynamic recompilers work | 2715 | and makes a few dynamic recompilers work |
2715 | better than they would in emulation mode. | 2716 | better than they would in emulation mode. |
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 81546990f41c..ca5cdcd0f0e3 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt | |||
@@ -1042,7 +1042,7 @@ conf/interface/*: | |||
1042 | The functional behaviour for certain settings is different | 1042 | The functional behaviour for certain settings is different |
1043 | depending on whether local forwarding is enabled or not. | 1043 | depending on whether local forwarding is enabled or not. |
1044 | 1044 | ||
1045 | accept_ra - BOOLEAN | 1045 | accept_ra - INTEGER |
1046 | Accept Router Advertisements; autoconfigure using them. | 1046 | Accept Router Advertisements; autoconfigure using them. |
1047 | 1047 | ||
1048 | Possible values are: | 1048 | Possible values are: |
@@ -1106,7 +1106,7 @@ dad_transmits - INTEGER | |||
1106 | The amount of Duplicate Address Detection probes to send. | 1106 | The amount of Duplicate Address Detection probes to send. |
1107 | Default: 1 | 1107 | Default: 1 |
1108 | 1108 | ||
1109 | forwarding - BOOLEAN | 1109 | forwarding - INTEGER |
1110 | Configure interface-specific Host/Router behaviour. | 1110 | Configure interface-specific Host/Router behaviour. |
1111 | 1111 | ||
1112 | Note: It is recommended to have the same setting on all | 1112 | Note: It is recommended to have the same setting on all |
diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt index 58fd7414e6c0..fe67b5c79f0f 100644 --- a/Documentation/networking/scaling.txt +++ b/Documentation/networking/scaling.txt | |||
@@ -27,7 +27,7 @@ applying a filter to each packet that assigns it to one of a small number | |||
27 | of logical flows. Packets for each flow are steered to a separate receive | 27 | of logical flows. Packets for each flow are steered to a separate receive |
28 | queue, which in turn can be processed by separate CPUs. This mechanism is | 28 | queue, which in turn can be processed by separate CPUs. This mechanism is |
29 | generally known as “Receive-side Scaling” (RSS). The goal of RSS and | 29 | generally known as “Receive-side Scaling” (RSS). The goal of RSS and |
30 | the other scaling techniques to increase performance uniformly. | 30 | the other scaling techniques is to increase performance uniformly. |
31 | Multi-queue distribution can also be used for traffic prioritization, but | 31 | Multi-queue distribution can also be used for traffic prioritization, but |
32 | that is not the focus of these techniques. | 32 | that is not the focus of these techniques. |
33 | 33 | ||
@@ -186,10 +186,10 @@ are steered using plain RPS. Multiple table entries may point to the | |||
186 | same CPU. Indeed, with many flows and few CPUs, it is very likely that | 186 | same CPU. Indeed, with many flows and few CPUs, it is very likely that |
187 | a single application thread handles flows with many different flow hashes. | 187 | a single application thread handles flows with many different flow hashes. |
188 | 188 | ||
189 | rps_sock_table is a global flow table that contains the *desired* CPU for | 189 | rps_sock_flow_table is a global flow table that contains the *desired* CPU |
190 | flows: the CPU that is currently processing the flow in userspace. Each | 190 | for flows: the CPU that is currently processing the flow in userspace. |
191 | table value is a CPU index that is updated during calls to recvmsg and | 191 | Each table value is a CPU index that is updated during calls to recvmsg |
192 | sendmsg (specifically, inet_recvmsg(), inet_sendmsg(), inet_sendpage() | 192 | and sendmsg (specifically, inet_recvmsg(), inet_sendmsg(), inet_sendpage() |
193 | and tcp_splice_read()). | 193 | and tcp_splice_read()). |
194 | 194 | ||
195 | When the scheduler moves a thread to a new CPU while it has outstanding | 195 | When the scheduler moves a thread to a new CPU while it has outstanding |
@@ -243,7 +243,7 @@ configured. The number of entries in the global flow table is set through: | |||
243 | 243 | ||
244 | The number of entries in the per-queue flow table are set through: | 244 | The number of entries in the per-queue flow table are set through: |
245 | 245 | ||
246 | /sys/class/net/<dev>/queues/tx-<n>/rps_flow_cnt | 246 | /sys/class/net/<dev>/queues/rx-<n>/rps_flow_cnt |
247 | 247 | ||
248 | == Suggested Configuration | 248 | == Suggested Configuration |
249 | 249 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index ae8820e173a2..6185d0513584 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -2460,7 +2460,7 @@ S: Supported | |||
2460 | F: drivers/infiniband/hw/ehca/ | 2460 | F: drivers/infiniband/hw/ehca/ |
2461 | 2461 | ||
2462 | EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER | 2462 | EHEA (IBM pSeries eHEA 10Gb ethernet adapter) DRIVER |
2463 | M: Breno Leitao <leitao@linux.vnet.ibm.com> | 2463 | M: Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com> |
2464 | L: netdev@vger.kernel.org | 2464 | L: netdev@vger.kernel.org |
2465 | S: Maintained | 2465 | S: Maintained |
2466 | F: drivers/net/ehea/ | 2466 | F: drivers/net/ehea/ |
@@ -3313,7 +3313,7 @@ M: David Woodhouse <dwmw2@infradead.org> | |||
3313 | L: iommu@lists.linux-foundation.org | 3313 | L: iommu@lists.linux-foundation.org |
3314 | T: git git://git.infradead.org/iommu-2.6.git | 3314 | T: git git://git.infradead.org/iommu-2.6.git |
3315 | S: Supported | 3315 | S: Supported |
3316 | F: drivers/pci/intel-iommu.c | 3316 | F: drivers/iommu/intel-iommu.c |
3317 | F: include/linux/intel-iommu.h | 3317 | F: include/linux/intel-iommu.h |
3318 | 3318 | ||
3319 | INTEL IOP-ADMA DMA DRIVER | 3319 | INTEL IOP-ADMA DMA DRIVER |
@@ -6366,15 +6366,14 @@ F: net/ipv4/tcp_lp.c | |||
6366 | 6366 | ||
6367 | TEGRA SUPPORT | 6367 | TEGRA SUPPORT |
6368 | M: Colin Cross <ccross@android.com> | 6368 | M: Colin Cross <ccross@android.com> |
6369 | M: Erik Gilling <konkers@android.com> | ||
6370 | M: Olof Johansson <olof@lixom.net> | 6369 | M: Olof Johansson <olof@lixom.net> |
6370 | M: Stephen Warren <swarren@nvidia.com> | ||
6371 | L: linux-tegra@vger.kernel.org | 6371 | L: linux-tegra@vger.kernel.org |
6372 | T: git git://android.git.kernel.org/kernel/tegra.git | 6372 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/olof/tegra.git |
6373 | S: Supported | 6373 | S: Supported |
6374 | F: arch/arm/mach-tegra | 6374 | F: arch/arm/mach-tegra |
6375 | 6375 | ||
6376 | TEHUTI ETHERNET DRIVER | 6376 | TEHUTI ETHERNET DRIVER |
6377 | M: Alexander Indenbaum <baum@tehutinetworks.net> | ||
6378 | M: Andy Gospodarek <andy@greyhouse.net> | 6377 | M: Andy Gospodarek <andy@greyhouse.net> |
6379 | L: netdev@vger.kernel.org | 6378 | L: netdev@vger.kernel.org |
6380 | S: Supported | 6379 | S: Supported |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 3 | 1 | VERSION = 3 |
2 | PATCHLEVEL = 1 | 2 | PATCHLEVEL = 1 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc8 | 4 | EXTRAVERSION = |
5 | NAME = "Divemaster Edition" | 5 | NAME = "Divemaster Edition" |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 012ff5fbb7e8..863c078ce2e7 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -559,6 +559,7 @@ config ARCH_MMP | |||
559 | select TICK_ONESHOT | 559 | select TICK_ONESHOT |
560 | select PLAT_PXA | 560 | select PLAT_PXA |
561 | select SPARSE_IRQ | 561 | select SPARSE_IRQ |
562 | select GENERIC_ALLOCATOR | ||
562 | help | 563 | help |
563 | Support for Marvell's PXA168/PXA910(MMP) and MMP2 processor line. | 564 | Support for Marvell's PXA168/PXA910(MMP) and MMP2 processor line. |
564 | 565 | ||
diff --git a/arch/arm/common/vic.c b/arch/arm/common/vic.c index 7aa4262ada7a..197f81c77351 100644 --- a/arch/arm/common/vic.c +++ b/arch/arm/common/vic.c | |||
@@ -259,7 +259,6 @@ static void __init vic_disable(void __iomem *base) | |||
259 | writel(0, base + VIC_INT_SELECT); | 259 | writel(0, base + VIC_INT_SELECT); |
260 | writel(0, base + VIC_INT_ENABLE); | 260 | writel(0, base + VIC_INT_ENABLE); |
261 | writel(~0, base + VIC_INT_ENABLE_CLEAR); | 261 | writel(~0, base + VIC_INT_ENABLE_CLEAR); |
262 | writel(0, base + VIC_IRQ_STATUS); | ||
263 | writel(0, base + VIC_ITCR); | 262 | writel(0, base + VIC_ITCR); |
264 | writel(~0, base + VIC_INT_SOFT_CLEAR); | 263 | writel(~0, base + VIC_INT_SOFT_CLEAR); |
265 | } | 264 | } |
diff --git a/arch/arm/include/asm/localtimer.h b/arch/arm/include/asm/localtimer.h index 080d74f8128d..ff66638ff54d 100644 --- a/arch/arm/include/asm/localtimer.h +++ b/arch/arm/include/asm/localtimer.h | |||
@@ -10,6 +10,8 @@ | |||
10 | #ifndef __ASM_ARM_LOCALTIMER_H | 10 | #ifndef __ASM_ARM_LOCALTIMER_H |
11 | #define __ASM_ARM_LOCALTIMER_H | 11 | #define __ASM_ARM_LOCALTIMER_H |
12 | 12 | ||
13 | #include <linux/errno.h> | ||
14 | |||
13 | struct clock_event_device; | 15 | struct clock_event_device; |
14 | 16 | ||
15 | /* | 17 | /* |
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 4c851834f68e..6be3e2e4d838 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c | |||
@@ -321,8 +321,8 @@ static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = { | |||
321 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, | 321 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, |
322 | [PERF_COUNT_HW_INSTRUCTIONS] = | 322 | [PERF_COUNT_HW_INSTRUCTIONS] = |
323 | ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE, | 323 | ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE, |
324 | [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_COHERENT_LINE_HIT, | 324 | [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_DCACHE_ACCESS, |
325 | [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_COHERENT_LINE_MISS, | 325 | [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_DCACHE_REFILL, |
326 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, | 326 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, |
327 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | 327 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, |
328 | [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, | 328 | [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, |
diff --git a/arch/arm/mach-mmp/Makefile b/arch/arm/mach-mmp/Makefile index b0ac942327aa..169c6743bde8 100644 --- a/arch/arm/mach-mmp/Makefile +++ b/arch/arm/mach-mmp/Makefile | |||
@@ -7,7 +7,7 @@ obj-y += common.o clock.o devices.o time.o | |||
7 | # SoC support | 7 | # SoC support |
8 | obj-$(CONFIG_CPU_PXA168) += pxa168.o irq-pxa168.o | 8 | obj-$(CONFIG_CPU_PXA168) += pxa168.o irq-pxa168.o |
9 | obj-$(CONFIG_CPU_PXA910) += pxa910.o irq-pxa168.o | 9 | obj-$(CONFIG_CPU_PXA910) += pxa910.o irq-pxa168.o |
10 | obj-$(CONFIG_CPU_MMP2) += mmp2.o irq-mmp2.o | 10 | obj-$(CONFIG_CPU_MMP2) += mmp2.o irq-mmp2.o sram.o |
11 | 11 | ||
12 | # board support | 12 | # board support |
13 | obj-$(CONFIG_MACH_ASPENITE) += aspenite.o | 13 | obj-$(CONFIG_MACH_ASPENITE) += aspenite.o |
diff --git a/arch/arm/mach-mmp/brownstone.c b/arch/arm/mach-mmp/brownstone.c index c79162a50f28..59dcf9df1537 100644 --- a/arch/arm/mach-mmp/brownstone.c +++ b/arch/arm/mach-mmp/brownstone.c | |||
@@ -186,6 +186,15 @@ static struct sdhci_pxa_platdata mmp2_sdh_platdata_mmc2 = { | |||
186 | | PXA_FLAG_SD_8_BIT_CAPABLE_SLOT, | 186 | | PXA_FLAG_SD_8_BIT_CAPABLE_SLOT, |
187 | }; | 187 | }; |
188 | 188 | ||
189 | static struct sram_platdata mmp2_asram_platdata = { | ||
190 | .pool_name = "asram", | ||
191 | .granularity = SRAM_GRANULARITY, | ||
192 | }; | ||
193 | |||
194 | static struct sram_platdata mmp2_isram_platdata = { | ||
195 | .pool_name = "isram", | ||
196 | .granularity = SRAM_GRANULARITY, | ||
197 | }; | ||
189 | 198 | ||
190 | static void __init brownstone_init(void) | 199 | static void __init brownstone_init(void) |
191 | { | 200 | { |
@@ -197,6 +206,8 @@ static void __init brownstone_init(void) | |||
197 | mmp2_add_twsi(1, NULL, ARRAY_AND_SIZE(brownstone_twsi1_info)); | 206 | mmp2_add_twsi(1, NULL, ARRAY_AND_SIZE(brownstone_twsi1_info)); |
198 | mmp2_add_sdhost(0, &mmp2_sdh_platdata_mmc0); /* SD/MMC */ | 207 | mmp2_add_sdhost(0, &mmp2_sdh_platdata_mmc0); /* SD/MMC */ |
199 | mmp2_add_sdhost(2, &mmp2_sdh_platdata_mmc2); /* eMMC */ | 208 | mmp2_add_sdhost(2, &mmp2_sdh_platdata_mmc2); /* eMMC */ |
209 | mmp2_add_asram(&mmp2_asram_platdata); | ||
210 | mmp2_add_isram(&mmp2_isram_platdata); | ||
200 | 211 | ||
201 | /* enable 5v regulator */ | 212 | /* enable 5v regulator */ |
202 | platform_device_register(&brownstone_v_5vp_device); | 213 | platform_device_register(&brownstone_v_5vp_device); |
diff --git a/arch/arm/mach-mmp/include/mach/mmp2.h b/arch/arm/mach-mmp/include/mach/mmp2.h index de7b88826ad7..2f7b2d3c2b18 100644 --- a/arch/arm/mach-mmp/include/mach/mmp2.h +++ b/arch/arm/mach-mmp/include/mach/mmp2.h | |||
@@ -13,6 +13,7 @@ extern void mmp2_clear_pmic_int(void); | |||
13 | #include <linux/i2c.h> | 13 | #include <linux/i2c.h> |
14 | #include <linux/i2c/pxa-i2c.h> | 14 | #include <linux/i2c/pxa-i2c.h> |
15 | #include <mach/devices.h> | 15 | #include <mach/devices.h> |
16 | #include <mach/sram.h> | ||
16 | 17 | ||
17 | extern struct pxa_device_desc mmp2_device_uart1; | 18 | extern struct pxa_device_desc mmp2_device_uart1; |
18 | extern struct pxa_device_desc mmp2_device_uart2; | 19 | extern struct pxa_device_desc mmp2_device_uart2; |
@@ -28,6 +29,8 @@ extern struct pxa_device_desc mmp2_device_sdh0; | |||
28 | extern struct pxa_device_desc mmp2_device_sdh1; | 29 | extern struct pxa_device_desc mmp2_device_sdh1; |
29 | extern struct pxa_device_desc mmp2_device_sdh2; | 30 | extern struct pxa_device_desc mmp2_device_sdh2; |
30 | extern struct pxa_device_desc mmp2_device_sdh3; | 31 | extern struct pxa_device_desc mmp2_device_sdh3; |
32 | extern struct pxa_device_desc mmp2_device_asram; | ||
33 | extern struct pxa_device_desc mmp2_device_isram; | ||
31 | 34 | ||
32 | static inline int mmp2_add_uart(int id) | 35 | static inline int mmp2_add_uart(int id) |
33 | { | 36 | { |
@@ -85,5 +88,15 @@ static inline int mmp2_add_sdhost(int id, struct sdhci_pxa_platdata *data) | |||
85 | return pxa_register_device(d, data, sizeof(*data)); | 88 | return pxa_register_device(d, data, sizeof(*data)); |
86 | } | 89 | } |
87 | 90 | ||
91 | static inline int mmp2_add_asram(struct sram_platdata *data) | ||
92 | { | ||
93 | return pxa_register_device(&mmp2_device_asram, data, sizeof(*data)); | ||
94 | } | ||
95 | |||
96 | static inline int mmp2_add_isram(struct sram_platdata *data) | ||
97 | { | ||
98 | return pxa_register_device(&mmp2_device_isram, data, sizeof(*data)); | ||
99 | } | ||
100 | |||
88 | #endif /* __ASM_MACH_MMP2_H */ | 101 | #endif /* __ASM_MACH_MMP2_H */ |
89 | 102 | ||
diff --git a/arch/arm/mach-mmp/include/mach/sram.h b/arch/arm/mach-mmp/include/mach/sram.h new file mode 100644 index 000000000000..239e0fc1bb1f --- /dev/null +++ b/arch/arm/mach-mmp/include/mach/sram.h | |||
@@ -0,0 +1,35 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/mach-mmp/include/mach/sram.h | ||
3 | * | ||
4 | * SRAM Memory Management | ||
5 | * | ||
6 | * Copyright (c) 2011 Marvell Semiconductors Inc. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #ifndef __ASM_ARCH_SRAM_H | ||
15 | #define __ASM_ARCH_SRAM_H | ||
16 | |||
17 | #include <linux/genalloc.h> | ||
18 | |||
19 | /* ARBITRARY: SRAM allocations are multiples of this 2^N size */ | ||
20 | #define SRAM_GRANULARITY 512 | ||
21 | |||
22 | enum sram_type { | ||
23 | MMP_SRAM_UNDEFINED = 0, | ||
24 | MMP_ASRAM, | ||
25 | MMP_ISRAM, | ||
26 | }; | ||
27 | |||
28 | struct sram_platdata { | ||
29 | char *pool_name; | ||
30 | int granularity; | ||
31 | }; | ||
32 | |||
33 | extern struct gen_pool *sram_get_gpool(char *pool_name); | ||
34 | |||
35 | #endif /* __ASM_ARCH_SRAM_H */ | ||
diff --git a/arch/arm/mach-mmp/mmp2.c b/arch/arm/mach-mmp/mmp2.c index 079c18861d5c..43266c44cb36 100644 --- a/arch/arm/mach-mmp/mmp2.c +++ b/arch/arm/mach-mmp/mmp2.c | |||
@@ -226,4 +226,7 @@ MMP2_DEVICE(sdh0, "sdhci-pxav3", 0, MMC, 0xd4280000, 0x120); | |||
226 | MMP2_DEVICE(sdh1, "sdhci-pxav3", 1, MMC2, 0xd4280800, 0x120); | 226 | MMP2_DEVICE(sdh1, "sdhci-pxav3", 1, MMC2, 0xd4280800, 0x120); |
227 | MMP2_DEVICE(sdh2, "sdhci-pxav3", 2, MMC3, 0xd4281000, 0x120); | 227 | MMP2_DEVICE(sdh2, "sdhci-pxav3", 2, MMC3, 0xd4281000, 0x120); |
228 | MMP2_DEVICE(sdh3, "sdhci-pxav3", 3, MMC4, 0xd4281800, 0x120); | 228 | MMP2_DEVICE(sdh3, "sdhci-pxav3", 3, MMC4, 0xd4281800, 0x120); |
229 | MMP2_DEVICE(asram, "asram", -1, NONE, 0xe0000000, 0x4000); | ||
230 | /* 0xd1000000 ~ 0xd101ffff is reserved for secure processor */ | ||
231 | MMP2_DEVICE(isram, "isram", -1, NONE, 0xd1020000, 0x18000); | ||
229 | 232 | ||
diff --git a/arch/arm/mach-mmp/sram.c b/arch/arm/mach-mmp/sram.c new file mode 100644 index 000000000000..4304f9519372 --- /dev/null +++ b/arch/arm/mach-mmp/sram.c | |||
@@ -0,0 +1,168 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/mach-mmp/sram.c | ||
3 | * | ||
4 | * based on mach-davinci/sram.c - DaVinci simple SRAM allocator | ||
5 | * | ||
6 | * Copyright (c) 2011 Marvell Semiconductors Inc. | ||
7 | * All Rights Reserved | ||
8 | * | ||
9 | * Add for mmp sram support - Leo Yan <leoy@marvell.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License version 2 as | ||
13 | * published by the Free Software Foundation. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #include <linux/module.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/platform_device.h> | ||
20 | #include <linux/io.h> | ||
21 | #include <linux/err.h> | ||
22 | #include <linux/slab.h> | ||
23 | #include <linux/genalloc.h> | ||
24 | |||
25 | #include <mach/sram.h> | ||
26 | |||
27 | struct sram_bank_info { | ||
28 | char *pool_name; | ||
29 | struct gen_pool *gpool; | ||
30 | int granularity; | ||
31 | |||
32 | phys_addr_t sram_phys; | ||
33 | void __iomem *sram_virt; | ||
34 | u32 sram_size; | ||
35 | |||
36 | struct list_head node; | ||
37 | }; | ||
38 | |||
39 | static DEFINE_MUTEX(sram_lock); | ||
40 | static LIST_HEAD(sram_bank_list); | ||
41 | |||
42 | struct gen_pool *sram_get_gpool(char *pool_name) | ||
43 | { | ||
44 | struct sram_bank_info *info = NULL; | ||
45 | |||
46 | if (!pool_name) | ||
47 | return NULL; | ||
48 | |||
49 | mutex_lock(&sram_lock); | ||
50 | |||
51 | list_for_each_entry(info, &sram_bank_list, node) | ||
52 | if (!strcmp(pool_name, info->pool_name)) | ||
53 | break; | ||
54 | |||
55 | mutex_unlock(&sram_lock); | ||
56 | |||
57 | if (&info->node == &sram_bank_list) | ||
58 | return NULL; | ||
59 | |||
60 | return info->gpool; | ||
61 | } | ||
62 | EXPORT_SYMBOL(sram_get_gpool); | ||
63 | |||
64 | static int __devinit sram_probe(struct platform_device *pdev) | ||
65 | { | ||
66 | struct sram_platdata *pdata = pdev->dev.platform_data; | ||
67 | struct sram_bank_info *info; | ||
68 | struct resource *res; | ||
69 | int ret = 0; | ||
70 | |||
71 | if (!pdata && !pdata->pool_name) | ||
72 | return -ENODEV; | ||
73 | |||
74 | info = kzalloc(sizeof(*info), GFP_KERNEL); | ||
75 | if (!info) | ||
76 | return -ENOMEM; | ||
77 | |||
78 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
79 | if (res == NULL) { | ||
80 | dev_err(&pdev->dev, "no memory resource defined\n"); | ||
81 | ret = -ENODEV; | ||
82 | goto out; | ||
83 | } | ||
84 | |||
85 | if (!resource_size(res)) | ||
86 | return 0; | ||
87 | |||
88 | info->sram_phys = (phys_addr_t)res->start; | ||
89 | info->sram_size = resource_size(res); | ||
90 | info->sram_virt = ioremap(info->sram_phys, info->sram_size); | ||
91 | info->pool_name = kstrdup(pdata->pool_name, GFP_KERNEL); | ||
92 | info->granularity = pdata->granularity; | ||
93 | |||
94 | info->gpool = gen_pool_create(ilog2(info->granularity), -1); | ||
95 | if (!info->gpool) { | ||
96 | dev_err(&pdev->dev, "create pool failed\n"); | ||
97 | ret = -ENOMEM; | ||
98 | goto create_pool_err; | ||
99 | } | ||
100 | |||
101 | ret = gen_pool_add_virt(info->gpool, (unsigned long)info->sram_virt, | ||
102 | info->sram_phys, info->sram_size, -1); | ||
103 | if (ret < 0) { | ||
104 | dev_err(&pdev->dev, "add new chunk failed\n"); | ||
105 | ret = -ENOMEM; | ||
106 | goto add_chunk_err; | ||
107 | } | ||
108 | |||
109 | mutex_lock(&sram_lock); | ||
110 | list_add(&info->node, &sram_bank_list); | ||
111 | mutex_unlock(&sram_lock); | ||
112 | |||
113 | platform_set_drvdata(pdev, info); | ||
114 | |||
115 | dev_info(&pdev->dev, "initialized\n"); | ||
116 | return 0; | ||
117 | |||
118 | add_chunk_err: | ||
119 | gen_pool_destroy(info->gpool); | ||
120 | create_pool_err: | ||
121 | iounmap(info->sram_virt); | ||
122 | kfree(info->pool_name); | ||
123 | out: | ||
124 | kfree(info); | ||
125 | return ret; | ||
126 | } | ||
127 | |||
128 | static int __devexit sram_remove(struct platform_device *pdev) | ||
129 | { | ||
130 | struct sram_bank_info *info; | ||
131 | |||
132 | info = platform_get_drvdata(pdev); | ||
133 | if (info == NULL) | ||
134 | return -ENODEV; | ||
135 | |||
136 | mutex_lock(&sram_lock); | ||
137 | list_del(&info->node); | ||
138 | mutex_unlock(&sram_lock); | ||
139 | |||
140 | gen_pool_destroy(info->gpool); | ||
141 | iounmap(info->sram_virt); | ||
142 | kfree(info->pool_name); | ||
143 | kfree(info); | ||
144 | return 0; | ||
145 | } | ||
146 | |||
147 | static const struct platform_device_id sram_id_table[] = { | ||
148 | { "asram", MMP_ASRAM }, | ||
149 | { "isram", MMP_ISRAM }, | ||
150 | { } | ||
151 | }; | ||
152 | |||
153 | static struct platform_driver sram_driver = { | ||
154 | .probe = sram_probe, | ||
155 | .remove = sram_remove, | ||
156 | .driver = { | ||
157 | .name = "mmp-sram", | ||
158 | }, | ||
159 | .id_table = sram_id_table, | ||
160 | }; | ||
161 | |||
162 | static int __init sram_init(void) | ||
163 | { | ||
164 | return platform_driver_register(&sram_driver); | ||
165 | } | ||
166 | core_initcall(sram_init); | ||
167 | |||
168 | MODULE_LICENSE("GPL"); | ||
diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c index 2028464cf5b9..f79b7d2a8ed4 100644 --- a/arch/arm/mach-omap2/board-2430sdp.c +++ b/arch/arm/mach-omap2/board-2430sdp.c | |||
@@ -193,7 +193,8 @@ static int __init omap2430_i2c_init(void) | |||
193 | { | 193 | { |
194 | omap_register_i2c_bus(1, 100, sdp2430_i2c1_boardinfo, | 194 | omap_register_i2c_bus(1, 100, sdp2430_i2c1_boardinfo, |
195 | ARRAY_SIZE(sdp2430_i2c1_boardinfo)); | 195 | ARRAY_SIZE(sdp2430_i2c1_boardinfo)); |
196 | omap2_pmic_init("twl4030", &sdp2430_twldata); | 196 | omap_pmic_init(2, 100, "twl4030", INT_24XX_SYS_NIRQ, |
197 | &sdp2430_twldata); | ||
197 | return 0; | 198 | return 0; |
198 | } | 199 | } |
199 | 200 | ||
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c index a9b45c76e1d3..097a42d81e59 100644 --- a/arch/arm/mach-omap2/hsmmc.c +++ b/arch/arm/mach-omap2/hsmmc.c | |||
@@ -137,8 +137,7 @@ static void omap4_hsmmc1_before_set_reg(struct device *dev, int slot, | |||
137 | */ | 137 | */ |
138 | reg = omap4_ctrl_pad_readl(control_pbias_offset); | 138 | reg = omap4_ctrl_pad_readl(control_pbias_offset); |
139 | reg &= ~(OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK | | 139 | reg &= ~(OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK | |
140 | OMAP4_MMC1_PWRDNZ_MASK | | 140 | OMAP4_MMC1_PWRDNZ_MASK); |
141 | OMAP4_USBC1_ICUSB_PWRDNZ_MASK); | ||
142 | omap4_ctrl_pad_writel(reg, control_pbias_offset); | 141 | omap4_ctrl_pad_writel(reg, control_pbias_offset); |
143 | } | 142 | } |
144 | 143 | ||
@@ -156,8 +155,7 @@ static void omap4_hsmmc1_after_set_reg(struct device *dev, int slot, | |||
156 | else | 155 | else |
157 | reg |= OMAP4_MMC1_PBIASLITE_VMODE_MASK; | 156 | reg |= OMAP4_MMC1_PBIASLITE_VMODE_MASK; |
158 | reg |= (OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK | | 157 | reg |= (OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK | |
159 | OMAP4_MMC1_PWRDNZ_MASK | | 158 | OMAP4_MMC1_PWRDNZ_MASK); |
160 | OMAP4_USBC1_ICUSB_PWRDNZ_MASK); | ||
161 | omap4_ctrl_pad_writel(reg, control_pbias_offset); | 159 | omap4_ctrl_pad_writel(reg, control_pbias_offset); |
162 | 160 | ||
163 | timeout = jiffies + msecs_to_jiffies(5); | 161 | timeout = jiffies + msecs_to_jiffies(5); |
@@ -171,16 +169,14 @@ static void omap4_hsmmc1_after_set_reg(struct device *dev, int slot, | |||
171 | if (reg & OMAP4_MMC1_PBIASLITE_VMODE_ERROR_MASK) { | 169 | if (reg & OMAP4_MMC1_PBIASLITE_VMODE_ERROR_MASK) { |
172 | pr_err("Pbias Voltage is not same as LDO\n"); | 170 | pr_err("Pbias Voltage is not same as LDO\n"); |
173 | /* Caution : On VMODE_ERROR Power Down MMC IO */ | 171 | /* Caution : On VMODE_ERROR Power Down MMC IO */ |
174 | reg &= ~(OMAP4_MMC1_PWRDNZ_MASK | | 172 | reg &= ~(OMAP4_MMC1_PWRDNZ_MASK); |
175 | OMAP4_USBC1_ICUSB_PWRDNZ_MASK); | ||
176 | omap4_ctrl_pad_writel(reg, control_pbias_offset); | 173 | omap4_ctrl_pad_writel(reg, control_pbias_offset); |
177 | } | 174 | } |
178 | } else { | 175 | } else { |
179 | reg = omap4_ctrl_pad_readl(control_pbias_offset); | 176 | reg = omap4_ctrl_pad_readl(control_pbias_offset); |
180 | reg |= (OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK | | 177 | reg |= (OMAP4_MMC1_PBIASLITE_PWRDNZ_MASK | |
181 | OMAP4_MMC1_PWRDNZ_MASK | | 178 | OMAP4_MMC1_PWRDNZ_MASK | |
182 | OMAP4_MMC1_PBIASLITE_VMODE_MASK | | 179 | OMAP4_MMC1_PBIASLITE_VMODE_MASK); |
183 | OMAP4_USBC1_ICUSB_PWRDNZ_MASK); | ||
184 | omap4_ctrl_pad_writel(reg, control_pbias_offset); | 180 | omap4_ctrl_pad_writel(reg, control_pbias_offset); |
185 | } | 181 | } |
186 | } | 182 | } |
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c index a65145b02a55..19e4dac62a8c 100644 --- a/arch/arm/mach-omap2/usb-musb.c +++ b/arch/arm/mach-omap2/usb-musb.c | |||
@@ -137,9 +137,6 @@ void __init usb_musb_init(struct omap_musb_board_data *musb_board_data) | |||
137 | musb_plat.mode = board_data->mode; | 137 | musb_plat.mode = board_data->mode; |
138 | musb_plat.extvbus = board_data->extvbus; | 138 | musb_plat.extvbus = board_data->extvbus; |
139 | 139 | ||
140 | if (cpu_is_omap44xx()) | ||
141 | omap4430_phy_init(dev); | ||
142 | |||
143 | if (cpu_is_omap3517() || cpu_is_omap3505()) { | 140 | if (cpu_is_omap3517() || cpu_is_omap3505()) { |
144 | oh_name = "am35x_otg_hs"; | 141 | oh_name = "am35x_otg_hs"; |
145 | name = "musb-am35x"; | 142 | name = "musb-am35x"; |
diff --git a/arch/arm/mach-s3c2410/s3c2410.c b/arch/arm/mach-s3c2410/s3c2410.c index a99c2f4a523f..3d7ebc557a72 100644 --- a/arch/arm/mach-s3c2410/s3c2410.c +++ b/arch/arm/mach-s3c2410/s3c2410.c | |||
@@ -170,7 +170,9 @@ int __init s3c2410_init(void) | |||
170 | { | 170 | { |
171 | printk("S3C2410: Initialising architecture\n"); | 171 | printk("S3C2410: Initialising architecture\n"); |
172 | 172 | ||
173 | #ifdef CONFIG_PM | ||
173 | register_syscore_ops(&s3c2410_pm_syscore_ops); | 174 | register_syscore_ops(&s3c2410_pm_syscore_ops); |
175 | #endif | ||
174 | register_syscore_ops(&s3c24xx_irq_syscore_ops); | 176 | register_syscore_ops(&s3c24xx_irq_syscore_ops); |
175 | 177 | ||
176 | return sysdev_register(&s3c2410_sysdev); | 178 | return sysdev_register(&s3c2410_sysdev); |
diff --git a/arch/arm/mach-s3c2412/s3c2412.c b/arch/arm/mach-s3c2412/s3c2412.c index ef0958d3e5c6..57a1e01e4e50 100644 --- a/arch/arm/mach-s3c2412/s3c2412.c +++ b/arch/arm/mach-s3c2412/s3c2412.c | |||
@@ -245,7 +245,9 @@ int __init s3c2412_init(void) | |||
245 | { | 245 | { |
246 | printk("S3C2412: Initialising architecture\n"); | 246 | printk("S3C2412: Initialising architecture\n"); |
247 | 247 | ||
248 | #ifdef CONFIG_PM | ||
248 | register_syscore_ops(&s3c2412_pm_syscore_ops); | 249 | register_syscore_ops(&s3c2412_pm_syscore_ops); |
250 | #endif | ||
249 | register_syscore_ops(&s3c24xx_irq_syscore_ops); | 251 | register_syscore_ops(&s3c24xx_irq_syscore_ops); |
250 | 252 | ||
251 | return sysdev_register(&s3c2412_sysdev); | 253 | return sysdev_register(&s3c2412_sysdev); |
diff --git a/arch/arm/mach-s3c2416/s3c2416.c b/arch/arm/mach-s3c2416/s3c2416.c index 081ef4cb8688..ee214bc83c83 100644 --- a/arch/arm/mach-s3c2416/s3c2416.c +++ b/arch/arm/mach-s3c2416/s3c2416.c | |||
@@ -100,7 +100,9 @@ int __init s3c2416_init(void) | |||
100 | 100 | ||
101 | s3c_adc_setname("s3c2416-adc"); | 101 | s3c_adc_setname("s3c2416-adc"); |
102 | 102 | ||
103 | #ifdef CONFIG_PM | ||
103 | register_syscore_ops(&s3c2416_pm_syscore_ops); | 104 | register_syscore_ops(&s3c2416_pm_syscore_ops); |
105 | #endif | ||
104 | register_syscore_ops(&s3c24xx_irq_syscore_ops); | 106 | register_syscore_ops(&s3c24xx_irq_syscore_ops); |
105 | 107 | ||
106 | return sysdev_register(&s3c2416_sysdev); | 108 | return sysdev_register(&s3c2416_sysdev); |
diff --git a/arch/arm/mach-s3c2440/s3c2440.c b/arch/arm/mach-s3c2440/s3c2440.c index fc84e481efcf..37f8cc6aabd4 100644 --- a/arch/arm/mach-s3c2440/s3c2440.c +++ b/arch/arm/mach-s3c2440/s3c2440.c | |||
@@ -55,7 +55,9 @@ int __init s3c2440_init(void) | |||
55 | 55 | ||
56 | /* register suspend/resume handlers */ | 56 | /* register suspend/resume handlers */ |
57 | 57 | ||
58 | #ifdef CONFIG_PM | ||
58 | register_syscore_ops(&s3c2410_pm_syscore_ops); | 59 | register_syscore_ops(&s3c2410_pm_syscore_ops); |
60 | #endif | ||
59 | register_syscore_ops(&s3c244x_pm_syscore_ops); | 61 | register_syscore_ops(&s3c244x_pm_syscore_ops); |
60 | register_syscore_ops(&s3c24xx_irq_syscore_ops); | 62 | register_syscore_ops(&s3c24xx_irq_syscore_ops); |
61 | 63 | ||
diff --git a/arch/arm/mach-s3c2440/s3c2442.c b/arch/arm/mach-s3c2440/s3c2442.c index 48e273ce9f9a..2c822e09392f 100644 --- a/arch/arm/mach-s3c2440/s3c2442.c +++ b/arch/arm/mach-s3c2440/s3c2442.c | |||
@@ -169,7 +169,9 @@ int __init s3c2442_init(void) | |||
169 | { | 169 | { |
170 | printk("S3C2442: Initialising architecture\n"); | 170 | printk("S3C2442: Initialising architecture\n"); |
171 | 171 | ||
172 | #ifdef CONFIG_PM | ||
172 | register_syscore_ops(&s3c2410_pm_syscore_ops); | 173 | register_syscore_ops(&s3c2410_pm_syscore_ops); |
174 | #endif | ||
173 | register_syscore_ops(&s3c244x_pm_syscore_ops); | 175 | register_syscore_ops(&s3c244x_pm_syscore_ops); |
174 | register_syscore_ops(&s3c24xx_irq_syscore_ops); | 176 | register_syscore_ops(&s3c24xx_irq_syscore_ops); |
175 | 177 | ||
diff --git a/arch/arm/mach-tegra/cpu-tegra.c b/arch/arm/mach-tegra/cpu-tegra.c index 0e1016a827ac..0e0fd4d889bd 100644 --- a/arch/arm/mach-tegra/cpu-tegra.c +++ b/arch/arm/mach-tegra/cpu-tegra.c | |||
@@ -32,7 +32,6 @@ | |||
32 | 32 | ||
33 | #include <asm/system.h> | 33 | #include <asm/system.h> |
34 | 34 | ||
35 | #include <mach/hardware.h> | ||
36 | #include <mach/clk.h> | 35 | #include <mach/clk.h> |
37 | 36 | ||
38 | /* Frequency table index must be sequential starting at 0 */ | 37 | /* Frequency table index must be sequential starting at 0 */ |
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig index 4210cb434dbc..a3e0c8692f0d 100644 --- a/arch/arm/mach-ux500/Kconfig +++ b/arch/arm/mach-ux500/Kconfig | |||
@@ -6,6 +6,7 @@ config UX500_SOC_COMMON | |||
6 | select ARM_GIC | 6 | select ARM_GIC |
7 | select HAS_MTU | 7 | select HAS_MTU |
8 | select ARM_ERRATA_753970 | 8 | select ARM_ERRATA_753970 |
9 | select ARM_ERRATA_754322 | ||
9 | 10 | ||
10 | menu "Ux500 SoC" | 11 | menu "Ux500 SoC" |
11 | 12 | ||
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index cc7e2d8be9aa..f8037ba338ac 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
@@ -496,6 +496,13 @@ static void __init free_unused_memmap(struct meminfo *mi) | |||
496 | */ | 496 | */ |
497 | bank_start = min(bank_start, | 497 | bank_start = min(bank_start, |
498 | ALIGN(prev_bank_end, PAGES_PER_SECTION)); | 498 | ALIGN(prev_bank_end, PAGES_PER_SECTION)); |
499 | #else | ||
500 | /* | ||
501 | * Align down here since the VM subsystem insists that the | ||
502 | * memmap entries are valid from the bank start aligned to | ||
503 | * MAX_ORDER_NR_PAGES. | ||
504 | */ | ||
505 | bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES); | ||
499 | #endif | 506 | #endif |
500 | /* | 507 | /* |
501 | * If we had a previous bank, and there is a space | 508 | * If we had a previous bank, and there is a space |
diff --git a/arch/arm/plat-s5p/irq-gpioint.c b/arch/arm/plat-s5p/irq-gpioint.c index a566523d34ec..1fdfaa4599ce 100644 --- a/arch/arm/plat-s5p/irq-gpioint.c +++ b/arch/arm/plat-s5p/irq-gpioint.c | |||
@@ -163,9 +163,9 @@ static __init int s5p_gpioint_add(struct samsung_gpio_chip *chip) | |||
163 | ct->chip.irq_mask = irq_gc_mask_set_bit; | 163 | ct->chip.irq_mask = irq_gc_mask_set_bit; |
164 | ct->chip.irq_unmask = irq_gc_mask_clr_bit; | 164 | ct->chip.irq_unmask = irq_gc_mask_clr_bit; |
165 | ct->chip.irq_set_type = s5p_gpioint_set_type, | 165 | ct->chip.irq_set_type = s5p_gpioint_set_type, |
166 | ct->regs.ack = PEND_OFFSET + REG_OFFSET(chip->group); | 166 | ct->regs.ack = PEND_OFFSET + REG_OFFSET(group - bank->start); |
167 | ct->regs.mask = MASK_OFFSET + REG_OFFSET(chip->group); | 167 | ct->regs.mask = MASK_OFFSET + REG_OFFSET(group - bank->start); |
168 | ct->regs.type = CON_OFFSET + REG_OFFSET(chip->group); | 168 | ct->regs.type = CON_OFFSET + REG_OFFSET(group - bank->start); |
169 | irq_setup_generic_chip(gc, IRQ_MSK(chip->chip.ngpio), | 169 | irq_setup_generic_chip(gc, IRQ_MSK(chip->chip.ngpio), |
170 | IRQ_GC_INIT_MASK_CACHE, | 170 | IRQ_GC_INIT_MASK_CACHE, |
171 | IRQ_NOREQUEST | IRQ_NOPROBE, 0); | 171 | IRQ_NOREQUEST | IRQ_NOPROBE, 0); |
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 177cdaf83564..b122adc8bdbb 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -24,6 +24,7 @@ config MIPS | |||
24 | select GENERIC_IRQ_PROBE | 24 | select GENERIC_IRQ_PROBE |
25 | select GENERIC_IRQ_SHOW | 25 | select GENERIC_IRQ_SHOW |
26 | select HAVE_ARCH_JUMP_LABEL | 26 | select HAVE_ARCH_JUMP_LABEL |
27 | select IRQ_FORCED_THREADING | ||
27 | 28 | ||
28 | menu "Machine selection" | 29 | menu "Machine selection" |
29 | 30 | ||
@@ -722,6 +723,7 @@ config CAVIUM_OCTEON_SIMULATOR | |||
722 | select SYS_SUPPORTS_HIGHMEM | 723 | select SYS_SUPPORTS_HIGHMEM |
723 | select SYS_SUPPORTS_HOTPLUG_CPU | 724 | select SYS_SUPPORTS_HOTPLUG_CPU |
724 | select SYS_HAS_CPU_CAVIUM_OCTEON | 725 | select SYS_HAS_CPU_CAVIUM_OCTEON |
726 | select HOLES_IN_ZONE | ||
725 | help | 727 | help |
726 | The Octeon simulator is software performance model of the Cavium | 728 | The Octeon simulator is software performance model of the Cavium |
727 | Octeon Processor. It supports simulating Octeon processors on x86 | 729 | Octeon Processor. It supports simulating Octeon processors on x86 |
@@ -744,6 +746,7 @@ config CAVIUM_OCTEON_REFERENCE_BOARD | |||
744 | select ZONE_DMA32 | 746 | select ZONE_DMA32 |
745 | select USB_ARCH_HAS_OHCI | 747 | select USB_ARCH_HAS_OHCI |
746 | select USB_ARCH_HAS_EHCI | 748 | select USB_ARCH_HAS_EHCI |
749 | select HOLES_IN_ZONE | ||
747 | help | 750 | help |
748 | This option supports all of the Octeon reference boards from Cavium | 751 | This option supports all of the Octeon reference boards from Cavium |
749 | Networks. It builds a kernel that dynamically determines the Octeon | 752 | Networks. It builds a kernel that dynamically determines the Octeon |
@@ -973,6 +976,9 @@ config ISA_DMA_API | |||
973 | config GENERIC_GPIO | 976 | config GENERIC_GPIO |
974 | bool | 977 | bool |
975 | 978 | ||
979 | config HOLES_IN_ZONE | ||
980 | bool | ||
981 | |||
976 | # | 982 | # |
977 | # Endianess selection. Sufficiently obscure so many users don't know what to | 983 | # Endianess selection. Sufficiently obscure so many users don't know what to |
978 | # answer,so we try hard to limit the available choices. Also the use of a | 984 | # answer,so we try hard to limit the available choices. Also the use of a |
diff --git a/arch/mips/alchemy/common/platform.c b/arch/mips/alchemy/common/platform.c index 3b2c18b14341..f72c48d4804c 100644 --- a/arch/mips/alchemy/common/platform.c +++ b/arch/mips/alchemy/common/platform.c | |||
@@ -492,7 +492,7 @@ static void __init alchemy_setup_macs(int ctype) | |||
492 | memcpy(au1xxx_eth0_platform_data.mac, ethaddr, 6); | 492 | memcpy(au1xxx_eth0_platform_data.mac, ethaddr, 6); |
493 | 493 | ||
494 | ret = platform_device_register(&au1xxx_eth0_device); | 494 | ret = platform_device_register(&au1xxx_eth0_device); |
495 | if (!ret) | 495 | if (ret) |
496 | printk(KERN_INFO "Alchemy: failed to register MAC0\n"); | 496 | printk(KERN_INFO "Alchemy: failed to register MAC0\n"); |
497 | 497 | ||
498 | 498 | ||
diff --git a/arch/mips/alchemy/common/power.c b/arch/mips/alchemy/common/power.c index 647e518c90bc..b86324a42601 100644 --- a/arch/mips/alchemy/common/power.c +++ b/arch/mips/alchemy/common/power.c | |||
@@ -158,15 +158,21 @@ static void restore_core_regs(void) | |||
158 | 158 | ||
159 | void au_sleep(void) | 159 | void au_sleep(void) |
160 | { | 160 | { |
161 | int cpuid = alchemy_get_cputype(); | 161 | save_core_regs(); |
162 | if (cpuid != ALCHEMY_CPU_UNKNOWN) { | 162 | |
163 | save_core_regs(); | 163 | switch (alchemy_get_cputype()) { |
164 | if (cpuid <= ALCHEMY_CPU_AU1500) | 164 | case ALCHEMY_CPU_AU1000: |
165 | alchemy_sleep_au1000(); | 165 | case ALCHEMY_CPU_AU1500: |
166 | else if (cpuid <= ALCHEMY_CPU_AU1200) | 166 | case ALCHEMY_CPU_AU1100: |
167 | alchemy_sleep_au1550(); | 167 | alchemy_sleep_au1000(); |
168 | restore_core_regs(); | 168 | break; |
169 | case ALCHEMY_CPU_AU1550: | ||
170 | case ALCHEMY_CPU_AU1200: | ||
171 | alchemy_sleep_au1550(); | ||
172 | break; | ||
169 | } | 173 | } |
174 | |||
175 | restore_core_regs(); | ||
170 | } | 176 | } |
171 | 177 | ||
172 | #endif /* CONFIG_PM */ | 178 | #endif /* CONFIG_PM */ |
diff --git a/arch/mips/alchemy/devboards/bcsr.c b/arch/mips/alchemy/devboards/bcsr.c index 596ad00e7f05..463d2c4d9441 100644 --- a/arch/mips/alchemy/devboards/bcsr.c +++ b/arch/mips/alchemy/devboards/bcsr.c | |||
@@ -89,8 +89,12 @@ static void bcsr_csc_handler(unsigned int irq, struct irq_desc *d) | |||
89 | { | 89 | { |
90 | unsigned short bisr = __raw_readw(bcsr_virt + BCSR_REG_INTSTAT); | 90 | unsigned short bisr = __raw_readw(bcsr_virt + BCSR_REG_INTSTAT); |
91 | 91 | ||
92 | disable_irq_nosync(irq); | ||
93 | |||
92 | for ( ; bisr; bisr &= bisr - 1) | 94 | for ( ; bisr; bisr &= bisr - 1) |
93 | generic_handle_irq(bcsr_csc_base + __ffs(bisr)); | 95 | generic_handle_irq(bcsr_csc_base + __ffs(bisr)); |
96 | |||
97 | enable_irq(irq); | ||
94 | } | 98 | } |
95 | 99 | ||
96 | /* NOTE: both the enable and mask bits must be cleared, otherwise the | 100 | /* NOTE: both the enable and mask bits must be cleared, otherwise the |
diff --git a/arch/mips/alchemy/devboards/db1200/setup.c b/arch/mips/alchemy/devboards/db1200/setup.c index 1dac4f27d334..4a8980027ecf 100644 --- a/arch/mips/alchemy/devboards/db1200/setup.c +++ b/arch/mips/alchemy/devboards/db1200/setup.c | |||
@@ -23,13 +23,6 @@ void __init board_setup(void) | |||
23 | unsigned long freq0, clksrc, div, pfc; | 23 | unsigned long freq0, clksrc, div, pfc; |
24 | unsigned short whoami; | 24 | unsigned short whoami; |
25 | 25 | ||
26 | /* Set Config[OD] (disable overlapping bus transaction): | ||
27 | * This gets rid of a _lot_ of spurious interrupts (especially | ||
28 | * wrt. IDE); but incurs ~10% performance hit in some | ||
29 | * cpu-bound applications. | ||
30 | */ | ||
31 | set_c0_config(1 << 19); | ||
32 | |||
33 | bcsr_init(DB1200_BCSR_PHYS_ADDR, | 26 | bcsr_init(DB1200_BCSR_PHYS_ADDR, |
34 | DB1200_BCSR_PHYS_ADDR + DB1200_BCSR_HEXLED_OFS); | 27 | DB1200_BCSR_PHYS_ADDR + DB1200_BCSR_HEXLED_OFS); |
35 | 28 | ||
diff --git a/arch/mips/ar7/irq.c b/arch/mips/ar7/irq.c index 03db3daadbd8..88c4babfdb5d 100644 --- a/arch/mips/ar7/irq.c +++ b/arch/mips/ar7/irq.c | |||
@@ -98,7 +98,8 @@ static struct irq_chip ar7_sec_irq_type = { | |||
98 | 98 | ||
99 | static struct irqaction ar7_cascade_action = { | 99 | static struct irqaction ar7_cascade_action = { |
100 | .handler = no_action, | 100 | .handler = no_action, |
101 | .name = "AR7 cascade interrupt" | 101 | .name = "AR7 cascade interrupt", |
102 | .flags = IRQF_NO_THREAD, | ||
102 | }; | 103 | }; |
103 | 104 | ||
104 | static void __init ar7_irq_init(int base) | 105 | static void __init ar7_irq_init(int base) |
diff --git a/arch/mips/bcm63xx/irq.c b/arch/mips/bcm63xx/irq.c index cea6021cb8d7..162e11b4ed75 100644 --- a/arch/mips/bcm63xx/irq.c +++ b/arch/mips/bcm63xx/irq.c | |||
@@ -222,6 +222,7 @@ static struct irq_chip bcm63xx_external_irq_chip = { | |||
222 | static struct irqaction cpu_ip2_cascade_action = { | 222 | static struct irqaction cpu_ip2_cascade_action = { |
223 | .handler = no_action, | 223 | .handler = no_action, |
224 | .name = "cascade_ip2", | 224 | .name = "cascade_ip2", |
225 | .flags = IRQF_NO_THREAD, | ||
225 | }; | 226 | }; |
226 | 227 | ||
227 | void __init arch_init_irq(void) | 228 | void __init arch_init_irq(void) |
diff --git a/arch/mips/cobalt/irq.c b/arch/mips/cobalt/irq.c index cb9bf820fe53..965c777d3561 100644 --- a/arch/mips/cobalt/irq.c +++ b/arch/mips/cobalt/irq.c | |||
@@ -48,6 +48,7 @@ asmlinkage void plat_irq_dispatch(void) | |||
48 | static struct irqaction cascade = { | 48 | static struct irqaction cascade = { |
49 | .handler = no_action, | 49 | .handler = no_action, |
50 | .name = "cascade", | 50 | .name = "cascade", |
51 | .flags = IRQF_NO_THREAD, | ||
51 | }; | 52 | }; |
52 | 53 | ||
53 | void __init arch_init_irq(void) | 54 | void __init arch_init_irq(void) |
diff --git a/arch/mips/dec/setup.c b/arch/mips/dec/setup.c index fa45e924be05..f7b7ba6d5c45 100644 --- a/arch/mips/dec/setup.c +++ b/arch/mips/dec/setup.c | |||
@@ -101,20 +101,24 @@ int cpu_fpu_mask = DEC_CPU_IRQ_MASK(DEC_CPU_INR_FPU); | |||
101 | static struct irqaction ioirq = { | 101 | static struct irqaction ioirq = { |
102 | .handler = no_action, | 102 | .handler = no_action, |
103 | .name = "cascade", | 103 | .name = "cascade", |
104 | .flags = IRQF_NO_THREAD, | ||
104 | }; | 105 | }; |
105 | static struct irqaction fpuirq = { | 106 | static struct irqaction fpuirq = { |
106 | .handler = no_action, | 107 | .handler = no_action, |
107 | .name = "fpu", | 108 | .name = "fpu", |
109 | .flags = IRQF_NO_THREAD, | ||
108 | }; | 110 | }; |
109 | 111 | ||
110 | static struct irqaction busirq = { | 112 | static struct irqaction busirq = { |
111 | .flags = IRQF_DISABLED, | 113 | .flags = IRQF_DISABLED, |
112 | .name = "bus error", | 114 | .name = "bus error", |
115 | .flags = IRQF_NO_THREAD, | ||
113 | }; | 116 | }; |
114 | 117 | ||
115 | static struct irqaction haltirq = { | 118 | static struct irqaction haltirq = { |
116 | .handler = dec_intr_halt, | 119 | .handler = dec_intr_halt, |
117 | .name = "halt", | 120 | .name = "halt", |
121 | .flags = IRQF_NO_THREAD, | ||
118 | }; | 122 | }; |
119 | 123 | ||
120 | 124 | ||
diff --git a/arch/mips/emma/markeins/irq.c b/arch/mips/emma/markeins/irq.c index 3dbd7a5a6ad3..7798887a1288 100644 --- a/arch/mips/emma/markeins/irq.c +++ b/arch/mips/emma/markeins/irq.c | |||
@@ -169,7 +169,7 @@ void emma2rh_gpio_irq_init(void) | |||
169 | 169 | ||
170 | static struct irqaction irq_cascade = { | 170 | static struct irqaction irq_cascade = { |
171 | .handler = no_action, | 171 | .handler = no_action, |
172 | .flags = 0, | 172 | .flags = IRQF_NO_THREAD, |
173 | .name = "cascade", | 173 | .name = "cascade", |
174 | .dev_id = NULL, | 174 | .dev_id = NULL, |
175 | .next = NULL, | 175 | .next = NULL, |
diff --git a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h index 0d5a42b5f47a..a58addb98cfd 100644 --- a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h +++ b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h | |||
@@ -54,7 +54,6 @@ | |||
54 | #define cpu_has_mips_r2_exec_hazard 0 | 54 | #define cpu_has_mips_r2_exec_hazard 0 |
55 | #define cpu_has_dsp 0 | 55 | #define cpu_has_dsp 0 |
56 | #define cpu_has_mipsmt 0 | 56 | #define cpu_has_mipsmt 0 |
57 | #define cpu_has_userlocal 0 | ||
58 | #define cpu_has_vint 0 | 57 | #define cpu_has_vint 0 |
59 | #define cpu_has_veic 0 | 58 | #define cpu_has_veic 0 |
60 | #define cpu_hwrena_impl_bits 0xc0000000 | 59 | #define cpu_hwrena_impl_bits 0xc0000000 |
diff --git a/arch/mips/include/asm/mach-powertv/dma-coherence.h b/arch/mips/include/asm/mach-powertv/dma-coherence.h index 62c094085947..35371641575d 100644 --- a/arch/mips/include/asm/mach-powertv/dma-coherence.h +++ b/arch/mips/include/asm/mach-powertv/dma-coherence.h | |||
@@ -13,7 +13,6 @@ | |||
13 | #define __ASM_MACH_POWERTV_DMA_COHERENCE_H | 13 | #define __ASM_MACH_POWERTV_DMA_COHERENCE_H |
14 | 14 | ||
15 | #include <linux/sched.h> | 15 | #include <linux/sched.h> |
16 | #include <linux/version.h> | ||
17 | #include <linux/device.h> | 16 | #include <linux/device.h> |
18 | #include <asm/mach-powertv/asic.h> | 17 | #include <asm/mach-powertv/asic.h> |
19 | 18 | ||
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h index b4ba2449444b..cb41af5f3406 100644 --- a/arch/mips/include/asm/stackframe.h +++ b/arch/mips/include/asm/stackframe.h | |||
@@ -195,9 +195,9 @@ | |||
195 | * to cover the pipeline delay. | 195 | * to cover the pipeline delay. |
196 | */ | 196 | */ |
197 | .set mips32 | 197 | .set mips32 |
198 | mfc0 v1, CP0_TCSTATUS | 198 | mfc0 k0, CP0_TCSTATUS |
199 | .set mips0 | 199 | .set mips0 |
200 | LONG_S v1, PT_TCSTATUS(sp) | 200 | LONG_S k0, PT_TCSTATUS(sp) |
201 | #endif /* CONFIG_MIPS_MT_SMTC */ | 201 | #endif /* CONFIG_MIPS_MT_SMTC */ |
202 | LONG_S $4, PT_R4(sp) | 202 | LONG_S $4, PT_R4(sp) |
203 | LONG_S $5, PT_R5(sp) | 203 | LONG_S $5, PT_R5(sp) |
diff --git a/arch/mips/jz4740/gpio.c b/arch/mips/jz4740/gpio.c index 73031f7fc827..4397972949fa 100644 --- a/arch/mips/jz4740/gpio.c +++ b/arch/mips/jz4740/gpio.c | |||
@@ -18,7 +18,7 @@ | |||
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | 19 | ||
20 | #include <linux/spinlock.h> | 20 | #include <linux/spinlock.h> |
21 | #include <linux/sysdev.h> | 21 | #include <linux/syscore_ops.h> |
22 | #include <linux/io.h> | 22 | #include <linux/io.h> |
23 | #include <linux/gpio.h> | 23 | #include <linux/gpio.h> |
24 | #include <linux/delay.h> | 24 | #include <linux/delay.h> |
@@ -86,7 +86,6 @@ struct jz_gpio_chip { | |||
86 | spinlock_t lock; | 86 | spinlock_t lock; |
87 | 87 | ||
88 | struct gpio_chip gpio_chip; | 88 | struct gpio_chip gpio_chip; |
89 | struct sys_device sysdev; | ||
90 | }; | 89 | }; |
91 | 90 | ||
92 | static struct jz_gpio_chip jz4740_gpio_chips[]; | 91 | static struct jz_gpio_chip jz4740_gpio_chips[]; |
@@ -459,49 +458,47 @@ static struct jz_gpio_chip jz4740_gpio_chips[] = { | |||
459 | JZ4740_GPIO_CHIP(D), | 458 | JZ4740_GPIO_CHIP(D), |
460 | }; | 459 | }; |
461 | 460 | ||
462 | static inline struct jz_gpio_chip *sysdev_to_chip(struct sys_device *dev) | 461 | static void jz4740_gpio_suspend_chip(struct jz_gpio_chip *chip) |
463 | { | 462 | { |
464 | return container_of(dev, struct jz_gpio_chip, sysdev); | 463 | chip->suspend_mask = readl(chip->base + JZ_REG_GPIO_MASK); |
464 | writel(~(chip->wakeup), chip->base + JZ_REG_GPIO_MASK_SET); | ||
465 | writel(chip->wakeup, chip->base + JZ_REG_GPIO_MASK_CLEAR); | ||
465 | } | 466 | } |
466 | 467 | ||
467 | static int jz4740_gpio_suspend(struct sys_device *dev, pm_message_t state) | 468 | static int jz4740_gpio_suspend(void) |
468 | { | 469 | { |
469 | struct jz_gpio_chip *chip = sysdev_to_chip(dev); | 470 | int i; |
470 | 471 | ||
471 | chip->suspend_mask = readl(chip->base + JZ_REG_GPIO_MASK); | 472 | for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); i++) |
472 | writel(~(chip->wakeup), chip->base + JZ_REG_GPIO_MASK_SET); | 473 | jz4740_gpio_suspend_chip(&jz4740_gpio_chips[i]); |
473 | writel(chip->wakeup, chip->base + JZ_REG_GPIO_MASK_CLEAR); | ||
474 | 474 | ||
475 | return 0; | 475 | return 0; |
476 | } | 476 | } |
477 | 477 | ||
478 | static int jz4740_gpio_resume(struct sys_device *dev) | 478 | static void jz4740_gpio_resume_chip(struct jz_gpio_chip *chip) |
479 | { | 479 | { |
480 | struct jz_gpio_chip *chip = sysdev_to_chip(dev); | ||
481 | uint32_t mask = chip->suspend_mask; | 480 | uint32_t mask = chip->suspend_mask; |
482 | 481 | ||
483 | writel(~mask, chip->base + JZ_REG_GPIO_MASK_CLEAR); | 482 | writel(~mask, chip->base + JZ_REG_GPIO_MASK_CLEAR); |
484 | writel(mask, chip->base + JZ_REG_GPIO_MASK_SET); | 483 | writel(mask, chip->base + JZ_REG_GPIO_MASK_SET); |
484 | } | ||
485 | 485 | ||
486 | return 0; | 486 | static void jz4740_gpio_resume(void) |
487 | { | ||
488 | int i; | ||
489 | |||
490 | for (i = ARRAY_SIZE(jz4740_gpio_chips) - 1; i >= 0 ; i--) | ||
491 | jz4740_gpio_resume_chip(&jz4740_gpio_chips[i]); | ||
487 | } | 492 | } |
488 | 493 | ||
489 | static struct sysdev_class jz4740_gpio_sysdev_class = { | 494 | static struct syscore_ops jz4740_gpio_syscore_ops = { |
490 | .name = "gpio", | ||
491 | .suspend = jz4740_gpio_suspend, | 495 | .suspend = jz4740_gpio_suspend, |
492 | .resume = jz4740_gpio_resume, | 496 | .resume = jz4740_gpio_resume, |
493 | }; | 497 | }; |
494 | 498 | ||
495 | static int jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id) | 499 | static void jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id) |
496 | { | 500 | { |
497 | int ret, irq; | 501 | int irq; |
498 | |||
499 | chip->sysdev.id = id; | ||
500 | chip->sysdev.cls = &jz4740_gpio_sysdev_class; | ||
501 | ret = sysdev_register(&chip->sysdev); | ||
502 | |||
503 | if (ret) | ||
504 | return ret; | ||
505 | 502 | ||
506 | spin_lock_init(&chip->lock); | 503 | spin_lock_init(&chip->lock); |
507 | 504 | ||
@@ -519,22 +516,17 @@ static int jz4740_gpio_chip_init(struct jz_gpio_chip *chip, unsigned int id) | |||
519 | irq_set_chip_and_handler(irq, &jz_gpio_irq_chip, | 516 | irq_set_chip_and_handler(irq, &jz_gpio_irq_chip, |
520 | handle_level_irq); | 517 | handle_level_irq); |
521 | } | 518 | } |
522 | |||
523 | return 0; | ||
524 | } | 519 | } |
525 | 520 | ||
526 | static int __init jz4740_gpio_init(void) | 521 | static int __init jz4740_gpio_init(void) |
527 | { | 522 | { |
528 | unsigned int i; | 523 | unsigned int i; |
529 | int ret; | ||
530 | |||
531 | ret = sysdev_class_register(&jz4740_gpio_sysdev_class); | ||
532 | if (ret) | ||
533 | return ret; | ||
534 | 524 | ||
535 | for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); ++i) | 525 | for (i = 0; i < ARRAY_SIZE(jz4740_gpio_chips); ++i) |
536 | jz4740_gpio_chip_init(&jz4740_gpio_chips[i], i); | 526 | jz4740_gpio_chip_init(&jz4740_gpio_chips[i], i); |
537 | 527 | ||
528 | register_syscore_ops(&jz4740_gpio_syscore_ops); | ||
529 | |||
538 | printk(KERN_INFO "JZ4740 GPIO initialized\n"); | 530 | printk(KERN_INFO "JZ4740 GPIO initialized\n"); |
539 | 531 | ||
540 | return 0; | 532 | return 0; |
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c index feb8021a305f..6a2d758dd8e9 100644 --- a/arch/mips/kernel/ftrace.c +++ b/arch/mips/kernel/ftrace.c | |||
@@ -19,6 +19,26 @@ | |||
19 | 19 | ||
20 | #include <asm-generic/sections.h> | 20 | #include <asm-generic/sections.h> |
21 | 21 | ||
22 | #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) | ||
23 | #define MCOUNT_OFFSET_INSNS 5 | ||
24 | #else | ||
25 | #define MCOUNT_OFFSET_INSNS 4 | ||
26 | #endif | ||
27 | |||
28 | /* | ||
29 | * Check if the address is in kernel space | ||
30 | * | ||
31 | * Clone core_kernel_text() from kernel/extable.c, but doesn't call | ||
32 | * init_kernel_text() for Ftrace doesn't trace functions in init sections. | ||
33 | */ | ||
34 | static inline int in_kernel_space(unsigned long ip) | ||
35 | { | ||
36 | if (ip >= (unsigned long)_stext && | ||
37 | ip <= (unsigned long)_etext) | ||
38 | return 1; | ||
39 | return 0; | ||
40 | } | ||
41 | |||
22 | #ifdef CONFIG_DYNAMIC_FTRACE | 42 | #ifdef CONFIG_DYNAMIC_FTRACE |
23 | 43 | ||
24 | #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ | 44 | #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ |
@@ -54,20 +74,6 @@ static inline void ftrace_dyn_arch_init_insns(void) | |||
54 | #endif | 74 | #endif |
55 | } | 75 | } |
56 | 76 | ||
57 | /* | ||
58 | * Check if the address is in kernel space | ||
59 | * | ||
60 | * Clone core_kernel_text() from kernel/extable.c, but doesn't call | ||
61 | * init_kernel_text() for Ftrace doesn't trace functions in init sections. | ||
62 | */ | ||
63 | static inline int in_kernel_space(unsigned long ip) | ||
64 | { | ||
65 | if (ip >= (unsigned long)_stext && | ||
66 | ip <= (unsigned long)_etext) | ||
67 | return 1; | ||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | static int ftrace_modify_code(unsigned long ip, unsigned int new_code) | 77 | static int ftrace_modify_code(unsigned long ip, unsigned int new_code) |
72 | { | 78 | { |
73 | int faulted; | 79 | int faulted; |
@@ -112,11 +118,6 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code) | |||
112 | * 1: offset = 4 instructions | 118 | * 1: offset = 4 instructions |
113 | */ | 119 | */ |
114 | 120 | ||
115 | #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) | ||
116 | #define MCOUNT_OFFSET_INSNS 5 | ||
117 | #else | ||
118 | #define MCOUNT_OFFSET_INSNS 4 | ||
119 | #endif | ||
120 | #define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS) | 121 | #define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS) |
121 | 122 | ||
122 | int ftrace_make_nop(struct module *mod, | 123 | int ftrace_make_nop(struct module *mod, |
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c index 5c74eb797f08..32b397b646ee 100644 --- a/arch/mips/kernel/i8259.c +++ b/arch/mips/kernel/i8259.c | |||
@@ -229,7 +229,7 @@ static void i8259A_shutdown(void) | |||
229 | */ | 229 | */ |
230 | if (i8259A_auto_eoi >= 0) { | 230 | if (i8259A_auto_eoi >= 0) { |
231 | outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ | 231 | outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ |
232 | outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */ | 232 | outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ |
233 | } | 233 | } |
234 | } | 234 | } |
235 | 235 | ||
@@ -295,6 +295,7 @@ static void init_8259A(int auto_eoi) | |||
295 | static struct irqaction irq2 = { | 295 | static struct irqaction irq2 = { |
296 | .handler = no_action, | 296 | .handler = no_action, |
297 | .name = "cascade", | 297 | .name = "cascade", |
298 | .flags = IRQF_NO_THREAD, | ||
298 | }; | 299 | }; |
299 | 300 | ||
300 | static struct resource pic1_io_resource = { | 301 | static struct resource pic1_io_resource = { |
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index 876a75cc376f..922a554cd108 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c | |||
@@ -349,3 +349,10 @@ SYSCALL_DEFINE6(32_fanotify_mark, int, fanotify_fd, unsigned int, flags, | |||
349 | return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4), | 349 | return sys_fanotify_mark(fanotify_fd, flags, merge_64(a3, a4), |
350 | dfd, pathname); | 350 | dfd, pathname); |
351 | } | 351 | } |
352 | |||
353 | SYSCALL_DEFINE6(32_futex, u32 __user *, uaddr, int, op, u32, val, | ||
354 | struct compat_timespec __user *, utime, u32 __user *, uaddr2, | ||
355 | u32, val3) | ||
356 | { | ||
357 | return compat_sys_futex(uaddr, op, val, utime, uaddr2, val3); | ||
358 | } | ||
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index f9296e894e46..6de1f598346e 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S | |||
@@ -315,7 +315,7 @@ EXPORT(sysn32_call_table) | |||
315 | PTR sys_fremovexattr | 315 | PTR sys_fremovexattr |
316 | PTR sys_tkill | 316 | PTR sys_tkill |
317 | PTR sys_ni_syscall | 317 | PTR sys_ni_syscall |
318 | PTR compat_sys_futex | 318 | PTR sys_32_futex |
319 | PTR compat_sys_sched_setaffinity /* 6195 */ | 319 | PTR compat_sys_sched_setaffinity /* 6195 */ |
320 | PTR compat_sys_sched_getaffinity | 320 | PTR compat_sys_sched_getaffinity |
321 | PTR sys_cacheflush | 321 | PTR sys_cacheflush |
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 4d7c9827706f..1d813169e453 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S | |||
@@ -441,7 +441,7 @@ sys_call_table: | |||
441 | PTR sys_fremovexattr /* 4235 */ | 441 | PTR sys_fremovexattr /* 4235 */ |
442 | PTR sys_tkill | 442 | PTR sys_tkill |
443 | PTR sys_sendfile64 | 443 | PTR sys_sendfile64 |
444 | PTR compat_sys_futex | 444 | PTR sys_32_futex |
445 | PTR compat_sys_sched_setaffinity | 445 | PTR compat_sys_sched_setaffinity |
446 | PTR compat_sys_sched_getaffinity /* 4240 */ | 446 | PTR compat_sys_sched_getaffinity /* 4240 */ |
447 | PTR compat_sys_io_setup | 447 | PTR compat_sys_io_setup |
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c index dbbe0ce48d89..f8524003676a 100644 --- a/arch/mips/kernel/signal.c +++ b/arch/mips/kernel/signal.c | |||
@@ -8,6 +8,7 @@ | |||
8 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | 8 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. |
9 | */ | 9 | */ |
10 | #include <linux/cache.h> | 10 | #include <linux/cache.h> |
11 | #include <linux/irqflags.h> | ||
11 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
12 | #include <linux/mm.h> | 13 | #include <linux/mm.h> |
13 | #include <linux/personality.h> | 14 | #include <linux/personality.h> |
@@ -658,6 +659,8 @@ static void do_signal(struct pt_regs *regs) | |||
658 | asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, | 659 | asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, |
659 | __u32 thread_info_flags) | 660 | __u32 thread_info_flags) |
660 | { | 661 | { |
662 | local_irq_enable(); | ||
663 | |||
661 | /* deal with pending signal delivery */ | 664 | /* deal with pending signal delivery */ |
662 | if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) | 665 | if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) |
663 | do_signal(regs); | 666 | do_signal(regs); |
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index b7517e3abc85..cbea618af0b4 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/bug.h> | 14 | #include <linux/bug.h> |
15 | #include <linux/compiler.h> | 15 | #include <linux/compiler.h> |
16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
17 | #include <linux/kernel.h> | ||
17 | #include <linux/mm.h> | 18 | #include <linux/mm.h> |
18 | #include <linux/module.h> | 19 | #include <linux/module.h> |
19 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
@@ -364,21 +365,26 @@ static int regs_to_trapnr(struct pt_regs *regs) | |||
364 | return (regs->cp0_cause >> 2) & 0x1f; | 365 | return (regs->cp0_cause >> 2) & 0x1f; |
365 | } | 366 | } |
366 | 367 | ||
367 | static DEFINE_SPINLOCK(die_lock); | 368 | static DEFINE_RAW_SPINLOCK(die_lock); |
368 | 369 | ||
369 | void __noreturn die(const char *str, struct pt_regs *regs) | 370 | void __noreturn die(const char *str, struct pt_regs *regs) |
370 | { | 371 | { |
371 | static int die_counter; | 372 | static int die_counter; |
372 | int sig = SIGSEGV; | 373 | int sig = SIGSEGV; |
373 | #ifdef CONFIG_MIPS_MT_SMTC | 374 | #ifdef CONFIG_MIPS_MT_SMTC |
374 | unsigned long dvpret = dvpe(); | 375 | unsigned long dvpret; |
375 | #endif /* CONFIG_MIPS_MT_SMTC */ | 376 | #endif /* CONFIG_MIPS_MT_SMTC */ |
376 | 377 | ||
378 | oops_enter(); | ||
379 | |||
377 | if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP) | 380 | if (notify_die(DIE_OOPS, str, regs, 0, regs_to_trapnr(regs), SIGSEGV) == NOTIFY_STOP) |
378 | sig = 0; | 381 | sig = 0; |
379 | 382 | ||
380 | console_verbose(); | 383 | console_verbose(); |
381 | spin_lock_irq(&die_lock); | 384 | raw_spin_lock_irq(&die_lock); |
385 | #ifdef CONFIG_MIPS_MT_SMTC | ||
386 | dvpret = dvpe(); | ||
387 | #endif /* CONFIG_MIPS_MT_SMTC */ | ||
382 | bust_spinlocks(1); | 388 | bust_spinlocks(1); |
383 | #ifdef CONFIG_MIPS_MT_SMTC | 389 | #ifdef CONFIG_MIPS_MT_SMTC |
384 | mips_mt_regdump(dvpret); | 390 | mips_mt_regdump(dvpret); |
@@ -387,7 +393,9 @@ void __noreturn die(const char *str, struct pt_regs *regs) | |||
387 | printk("%s[#%d]:\n", str, ++die_counter); | 393 | printk("%s[#%d]:\n", str, ++die_counter); |
388 | show_registers(regs); | 394 | show_registers(regs); |
389 | add_taint(TAINT_DIE); | 395 | add_taint(TAINT_DIE); |
390 | spin_unlock_irq(&die_lock); | 396 | raw_spin_unlock_irq(&die_lock); |
397 | |||
398 | oops_exit(); | ||
391 | 399 | ||
392 | if (in_interrupt()) | 400 | if (in_interrupt()) |
393 | panic("Fatal exception in interrupt"); | 401 | panic("Fatal exception in interrupt"); |
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index 2cd50ad0d5c6..3efcb065f78a 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c | |||
@@ -192,7 +192,7 @@ static struct tc *get_tc(int index) | |||
192 | } | 192 | } |
193 | spin_unlock(&vpecontrol.tc_list_lock); | 193 | spin_unlock(&vpecontrol.tc_list_lock); |
194 | 194 | ||
195 | return NULL; | 195 | return res; |
196 | } | 196 | } |
197 | 197 | ||
198 | /* allocate a vpe and associate it with this minor (or index) */ | 198 | /* allocate a vpe and associate it with this minor (or index) */ |
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c index fc89795cafdb..f9737bb3c5ab 100644 --- a/arch/mips/lantiq/irq.c +++ b/arch/mips/lantiq/irq.c | |||
@@ -123,11 +123,10 @@ void ltq_enable_irq(struct irq_data *d) | |||
123 | static unsigned int ltq_startup_eiu_irq(struct irq_data *d) | 123 | static unsigned int ltq_startup_eiu_irq(struct irq_data *d) |
124 | { | 124 | { |
125 | int i; | 125 | int i; |
126 | int irq_nr = d->irq - INT_NUM_IRQ0; | ||
127 | 126 | ||
128 | ltq_enable_irq(d); | 127 | ltq_enable_irq(d); |
129 | for (i = 0; i < MAX_EIU; i++) { | 128 | for (i = 0; i < MAX_EIU; i++) { |
130 | if (irq_nr == ltq_eiu_irq[i]) { | 129 | if (d->irq == ltq_eiu_irq[i]) { |
131 | /* low level - we should really handle set_type */ | 130 | /* low level - we should really handle set_type */ |
132 | ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) | | 131 | ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) | |
133 | (0x6 << (i * 4)), LTQ_EIU_EXIN_C); | 132 | (0x6 << (i * 4)), LTQ_EIU_EXIN_C); |
@@ -147,11 +146,10 @@ static unsigned int ltq_startup_eiu_irq(struct irq_data *d) | |||
147 | static void ltq_shutdown_eiu_irq(struct irq_data *d) | 146 | static void ltq_shutdown_eiu_irq(struct irq_data *d) |
148 | { | 147 | { |
149 | int i; | 148 | int i; |
150 | int irq_nr = d->irq - INT_NUM_IRQ0; | ||
151 | 149 | ||
152 | ltq_disable_irq(d); | 150 | ltq_disable_irq(d); |
153 | for (i = 0; i < MAX_EIU; i++) { | 151 | for (i = 0; i < MAX_EIU; i++) { |
154 | if (irq_nr == ltq_eiu_irq[i]) { | 152 | if (d->irq == ltq_eiu_irq[i]) { |
155 | /* disable */ | 153 | /* disable */ |
156 | ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~(1 << i), | 154 | ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~(1 << i), |
157 | LTQ_EIU_EXIN_INEN); | 155 | LTQ_EIU_EXIN_INEN); |
diff --git a/arch/mips/lantiq/xway/ebu.c b/arch/mips/lantiq/xway/ebu.c index 66eb52fa50a1..033b3184c7a7 100644 --- a/arch/mips/lantiq/xway/ebu.c +++ b/arch/mips/lantiq/xway/ebu.c | |||
@@ -10,7 +10,6 @@ | |||
10 | 10 | ||
11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/version.h> | ||
14 | #include <linux/ioport.h> | 13 | #include <linux/ioport.h> |
15 | 14 | ||
16 | #include <lantiq_soc.h> | 15 | #include <lantiq_soc.h> |
diff --git a/arch/mips/lantiq/xway/pmu.c b/arch/mips/lantiq/xway/pmu.c index 9d69f01e352b..39f0d2641cbf 100644 --- a/arch/mips/lantiq/xway/pmu.c +++ b/arch/mips/lantiq/xway/pmu.c | |||
@@ -8,7 +8,6 @@ | |||
8 | 8 | ||
9 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
10 | #include <linux/module.h> | 10 | #include <linux/module.h> |
11 | #include <linux/version.h> | ||
12 | #include <linux/ioport.h> | 11 | #include <linux/ioport.h> |
13 | 12 | ||
14 | #include <lantiq_soc.h> | 13 | #include <lantiq_soc.h> |
diff --git a/arch/mips/lasat/interrupt.c b/arch/mips/lasat/interrupt.c index de4c165515d7..d608b6ef0edd 100644 --- a/arch/mips/lasat/interrupt.c +++ b/arch/mips/lasat/interrupt.c | |||
@@ -105,6 +105,7 @@ asmlinkage void plat_irq_dispatch(void) | |||
105 | static struct irqaction cascade = { | 105 | static struct irqaction cascade = { |
106 | .handler = no_action, | 106 | .handler = no_action, |
107 | .name = "cascade", | 107 | .name = "cascade", |
108 | .flags = IRQF_NO_THREAD, | ||
108 | }; | 109 | }; |
109 | 110 | ||
110 | void __init arch_init_irq(void) | 111 | void __init arch_init_irq(void) |
diff --git a/arch/mips/loongson/fuloong-2e/irq.c b/arch/mips/loongson/fuloong-2e/irq.c index d61a04222b87..3cf1fef29f0e 100644 --- a/arch/mips/loongson/fuloong-2e/irq.c +++ b/arch/mips/loongson/fuloong-2e/irq.c | |||
@@ -42,6 +42,7 @@ asmlinkage void mach_irq_dispatch(unsigned int pending) | |||
42 | static struct irqaction cascade_irqaction = { | 42 | static struct irqaction cascade_irqaction = { |
43 | .handler = no_action, | 43 | .handler = no_action, |
44 | .name = "cascade", | 44 | .name = "cascade", |
45 | .flags = IRQF_NO_THREAD, | ||
45 | }; | 46 | }; |
46 | 47 | ||
47 | void __init mach_init_irq(void) | 48 | void __init mach_init_irq(void) |
diff --git a/arch/mips/loongson/lemote-2f/irq.c b/arch/mips/loongson/lemote-2f/irq.c index 081db102bb98..14b081841b6b 100644 --- a/arch/mips/loongson/lemote-2f/irq.c +++ b/arch/mips/loongson/lemote-2f/irq.c | |||
@@ -96,12 +96,13 @@ static irqreturn_t ip6_action(int cpl, void *dev_id) | |||
96 | struct irqaction ip6_irqaction = { | 96 | struct irqaction ip6_irqaction = { |
97 | .handler = ip6_action, | 97 | .handler = ip6_action, |
98 | .name = "cascade", | 98 | .name = "cascade", |
99 | .flags = IRQF_SHARED, | 99 | .flags = IRQF_SHARED | IRQF_NO_THREAD, |
100 | }; | 100 | }; |
101 | 101 | ||
102 | struct irqaction cascade_irqaction = { | 102 | struct irqaction cascade_irqaction = { |
103 | .handler = no_action, | 103 | .handler = no_action, |
104 | .name = "cascade", | 104 | .name = "cascade", |
105 | .flags = IRQF_NO_THREAD, | ||
105 | }; | 106 | }; |
106 | 107 | ||
107 | void __init mach_init_irq(void) | 108 | void __init mach_init_irq(void) |
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c index 9ff5d0fac556..302d779d5b0d 100644 --- a/arch/mips/mm/mmap.c +++ b/arch/mips/mm/mmap.c | |||
@@ -6,6 +6,7 @@ | |||
6 | * Copyright (C) 2011 Wind River Systems, | 6 | * Copyright (C) 2011 Wind River Systems, |
7 | * written by Ralf Baechle <ralf@linux-mips.org> | 7 | * written by Ralf Baechle <ralf@linux-mips.org> |
8 | */ | 8 | */ |
9 | #include <linux/compiler.h> | ||
9 | #include <linux/errno.h> | 10 | #include <linux/errno.h> |
10 | #include <linux/mm.h> | 11 | #include <linux/mm.h> |
11 | #include <linux/mman.h> | 12 | #include <linux/mman.h> |
@@ -15,12 +16,11 @@ | |||
15 | #include <linux/sched.h> | 16 | #include <linux/sched.h> |
16 | 17 | ||
17 | unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ | 18 | unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ |
18 | |||
19 | EXPORT_SYMBOL(shm_align_mask); | 19 | EXPORT_SYMBOL(shm_align_mask); |
20 | 20 | ||
21 | /* gap between mmap and stack */ | 21 | /* gap between mmap and stack */ |
22 | #define MIN_GAP (128*1024*1024UL) | 22 | #define MIN_GAP (128*1024*1024UL) |
23 | #define MAX_GAP ((TASK_SIZE)/6*5) | 23 | #define MAX_GAP ((TASK_SIZE)/6*5) |
24 | 24 | ||
25 | static int mmap_is_legacy(void) | 25 | static int mmap_is_legacy(void) |
26 | { | 26 | { |
@@ -57,13 +57,13 @@ static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr, | |||
57 | return base - off; | 57 | return base - off; |
58 | } | 58 | } |
59 | 59 | ||
60 | #define COLOUR_ALIGN(addr,pgoff) \ | 60 | #define COLOUR_ALIGN(addr, pgoff) \ |
61 | ((((addr) + shm_align_mask) & ~shm_align_mask) + \ | 61 | ((((addr) + shm_align_mask) & ~shm_align_mask) + \ |
62 | (((pgoff) << PAGE_SHIFT) & shm_align_mask)) | 62 | (((pgoff) << PAGE_SHIFT) & shm_align_mask)) |
63 | 63 | ||
64 | enum mmap_allocation_direction {UP, DOWN}; | 64 | enum mmap_allocation_direction {UP, DOWN}; |
65 | 65 | ||
66 | static unsigned long arch_get_unmapped_area_foo(struct file *filp, | 66 | static unsigned long arch_get_unmapped_area_common(struct file *filp, |
67 | unsigned long addr0, unsigned long len, unsigned long pgoff, | 67 | unsigned long addr0, unsigned long len, unsigned long pgoff, |
68 | unsigned long flags, enum mmap_allocation_direction dir) | 68 | unsigned long flags, enum mmap_allocation_direction dir) |
69 | { | 69 | { |
@@ -103,16 +103,16 @@ static unsigned long arch_get_unmapped_area_foo(struct file *filp, | |||
103 | 103 | ||
104 | vma = find_vma(mm, addr); | 104 | vma = find_vma(mm, addr); |
105 | if (TASK_SIZE - len >= addr && | 105 | if (TASK_SIZE - len >= addr && |
106 | (!vma || addr + len <= vma->vm_start)) | 106 | (!vma || addr + len <= vma->vm_start)) |
107 | return addr; | 107 | return addr; |
108 | } | 108 | } |
109 | 109 | ||
110 | if (dir == UP) { | 110 | if (dir == UP) { |
111 | addr = mm->mmap_base; | 111 | addr = mm->mmap_base; |
112 | if (do_color_align) | 112 | if (do_color_align) |
113 | addr = COLOUR_ALIGN(addr, pgoff); | 113 | addr = COLOUR_ALIGN(addr, pgoff); |
114 | else | 114 | else |
115 | addr = PAGE_ALIGN(addr); | 115 | addr = PAGE_ALIGN(addr); |
116 | 116 | ||
117 | for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { | 117 | for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { |
118 | /* At this point: (!vma || addr < vma->vm_end). */ | 118 | /* At this point: (!vma || addr < vma->vm_end). */ |
@@ -131,28 +131,30 @@ static unsigned long arch_get_unmapped_area_foo(struct file *filp, | |||
131 | mm->free_area_cache = mm->mmap_base; | 131 | mm->free_area_cache = mm->mmap_base; |
132 | } | 132 | } |
133 | 133 | ||
134 | /* either no address requested or can't fit in requested address hole */ | 134 | /* |
135 | * either no address requested, or the mapping can't fit into | ||
136 | * the requested address hole | ||
137 | */ | ||
135 | addr = mm->free_area_cache; | 138 | addr = mm->free_area_cache; |
136 | if (do_color_align) { | 139 | if (do_color_align) { |
137 | unsigned long base = | 140 | unsigned long base = |
138 | COLOUR_ALIGN_DOWN(addr - len, pgoff); | 141 | COLOUR_ALIGN_DOWN(addr - len, pgoff); |
139 | |||
140 | addr = base + len; | 142 | addr = base + len; |
141 | } | 143 | } |
142 | 144 | ||
143 | /* make sure it can fit in the remaining address space */ | 145 | /* make sure it can fit in the remaining address space */ |
144 | if (likely(addr > len)) { | 146 | if (likely(addr > len)) { |
145 | vma = find_vma(mm, addr - len); | 147 | vma = find_vma(mm, addr - len); |
146 | if (!vma || addr <= vma->vm_start) { | 148 | if (!vma || addr <= vma->vm_start) { |
147 | /* remember the address as a hint for next time */ | 149 | /* cache the address as a hint for next time */ |
148 | return mm->free_area_cache = addr-len; | 150 | return mm->free_area_cache = addr - len; |
149 | } | 151 | } |
150 | } | 152 | } |
151 | 153 | ||
152 | if (unlikely(mm->mmap_base < len)) | 154 | if (unlikely(mm->mmap_base < len)) |
153 | goto bottomup; | 155 | goto bottomup; |
154 | 156 | ||
155 | addr = mm->mmap_base-len; | 157 | addr = mm->mmap_base - len; |
156 | if (do_color_align) | 158 | if (do_color_align) |
157 | addr = COLOUR_ALIGN_DOWN(addr, pgoff); | 159 | addr = COLOUR_ALIGN_DOWN(addr, pgoff); |
158 | 160 | ||
@@ -163,8 +165,8 @@ static unsigned long arch_get_unmapped_area_foo(struct file *filp, | |||
163 | * return with success: | 165 | * return with success: |
164 | */ | 166 | */ |
165 | vma = find_vma(mm, addr); | 167 | vma = find_vma(mm, addr); |
166 | if (likely(!vma || addr+len <= vma->vm_start)) { | 168 | if (likely(!vma || addr + len <= vma->vm_start)) { |
167 | /* remember the address as a hint for next time */ | 169 | /* cache the address as a hint for next time */ |
168 | return mm->free_area_cache = addr; | 170 | return mm->free_area_cache = addr; |
169 | } | 171 | } |
170 | 172 | ||
@@ -173,7 +175,7 @@ static unsigned long arch_get_unmapped_area_foo(struct file *filp, | |||
173 | mm->cached_hole_size = vma->vm_start - addr; | 175 | mm->cached_hole_size = vma->vm_start - addr; |
174 | 176 | ||
175 | /* try just below the current vma->vm_start */ | 177 | /* try just below the current vma->vm_start */ |
176 | addr = vma->vm_start-len; | 178 | addr = vma->vm_start - len; |
177 | if (do_color_align) | 179 | if (do_color_align) |
178 | addr = COLOUR_ALIGN_DOWN(addr, pgoff); | 180 | addr = COLOUR_ALIGN_DOWN(addr, pgoff); |
179 | } while (likely(len < vma->vm_start)); | 181 | } while (likely(len < vma->vm_start)); |
@@ -201,7 +203,7 @@ bottomup: | |||
201 | unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0, | 203 | unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0, |
202 | unsigned long len, unsigned long pgoff, unsigned long flags) | 204 | unsigned long len, unsigned long pgoff, unsigned long flags) |
203 | { | 205 | { |
204 | return arch_get_unmapped_area_foo(filp, | 206 | return arch_get_unmapped_area_common(filp, |
205 | addr0, len, pgoff, flags, UP); | 207 | addr0, len, pgoff, flags, UP); |
206 | } | 208 | } |
207 | 209 | ||
@@ -213,7 +215,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp, | |||
213 | unsigned long addr0, unsigned long len, unsigned long pgoff, | 215 | unsigned long addr0, unsigned long len, unsigned long pgoff, |
214 | unsigned long flags) | 216 | unsigned long flags) |
215 | { | 217 | { |
216 | return arch_get_unmapped_area_foo(filp, | 218 | return arch_get_unmapped_area_common(filp, |
217 | addr0, len, pgoff, flags, DOWN); | 219 | addr0, len, pgoff, flags, DOWN); |
218 | } | 220 | } |
219 | 221 | ||
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index b6e1cff50667..e06370f58ef3 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c | |||
@@ -1759,14 +1759,13 @@ static void __cpuinit build_r3000_tlb_modify_handler(void) | |||
1759 | u32 *p = handle_tlbm; | 1759 | u32 *p = handle_tlbm; |
1760 | struct uasm_label *l = labels; | 1760 | struct uasm_label *l = labels; |
1761 | struct uasm_reloc *r = relocs; | 1761 | struct uasm_reloc *r = relocs; |
1762 | struct work_registers wr; | ||
1763 | 1762 | ||
1764 | memset(handle_tlbm, 0, sizeof(handle_tlbm)); | 1763 | memset(handle_tlbm, 0, sizeof(handle_tlbm)); |
1765 | memset(labels, 0, sizeof(labels)); | 1764 | memset(labels, 0, sizeof(labels)); |
1766 | memset(relocs, 0, sizeof(relocs)); | 1765 | memset(relocs, 0, sizeof(relocs)); |
1767 | 1766 | ||
1768 | build_r3000_tlbchange_handler_head(&p, K0, K1); | 1767 | build_r3000_tlbchange_handler_head(&p, K0, K1); |
1769 | build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm); | 1768 | build_pte_modifiable(&p, &r, K0, K1, -1, label_nopage_tlbm); |
1770 | uasm_i_nop(&p); /* load delay */ | 1769 | uasm_i_nop(&p); /* load delay */ |
1771 | build_make_write(&p, &r, K0, K1); | 1770 | build_make_write(&p, &r, K0, K1); |
1772 | build_r3000_pte_reload_tlbwi(&p, K0, K1); | 1771 | build_r3000_pte_reload_tlbwi(&p, K0, K1); |
@@ -1963,7 +1962,8 @@ static void __cpuinit build_r4000_tlb_load_handler(void) | |||
1963 | uasm_i_andi(&p, wr.r3, wr.r3, 2); | 1962 | uasm_i_andi(&p, wr.r3, wr.r3, 2); |
1964 | uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2); | 1963 | uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2); |
1965 | } | 1964 | } |
1966 | 1965 | if (PM_DEFAULT_MASK == 0) | |
1966 | uasm_i_nop(&p); | ||
1967 | /* | 1967 | /* |
1968 | * We clobbered C0_PAGEMASK, restore it. On the other branch | 1968 | * We clobbered C0_PAGEMASK, restore it. On the other branch |
1969 | * it is restored in build_huge_tlb_write_entry. | 1969 | * it is restored in build_huge_tlb_write_entry. |
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c index 1d36c511a7a5..d53ff91b277c 100644 --- a/arch/mips/mti-malta/malta-int.c +++ b/arch/mips/mti-malta/malta-int.c | |||
@@ -350,12 +350,14 @@ unsigned int plat_ipi_resched_int_xlate(unsigned int cpu) | |||
350 | 350 | ||
351 | static struct irqaction i8259irq = { | 351 | static struct irqaction i8259irq = { |
352 | .handler = no_action, | 352 | .handler = no_action, |
353 | .name = "XT-PIC cascade" | 353 | .name = "XT-PIC cascade", |
354 | .flags = IRQF_NO_THREAD, | ||
354 | }; | 355 | }; |
355 | 356 | ||
356 | static struct irqaction corehi_irqaction = { | 357 | static struct irqaction corehi_irqaction = { |
357 | .handler = no_action, | 358 | .handler = no_action, |
358 | .name = "CoreHi" | 359 | .name = "CoreHi", |
360 | .flags = IRQF_NO_THREAD, | ||
359 | }; | 361 | }; |
360 | 362 | ||
361 | static msc_irqmap_t __initdata msc_irqmap[] = { | 363 | static msc_irqmap_t __initdata msc_irqmap[] = { |
diff --git a/arch/mips/netlogic/xlr/Makefile b/arch/mips/netlogic/xlr/Makefile index 9bd3f731f62e..2dca585dd2f7 100644 --- a/arch/mips/netlogic/xlr/Makefile +++ b/arch/mips/netlogic/xlr/Makefile | |||
@@ -2,4 +2,4 @@ obj-y += setup.o platform.o irq.o setup.o time.o | |||
2 | obj-$(CONFIG_SMP) += smp.o smpboot.o | 2 | obj-$(CONFIG_SMP) += smp.o smpboot.o |
3 | obj-$(CONFIG_EARLY_PRINTK) += xlr_console.o | 3 | obj-$(CONFIG_EARLY_PRINTK) += xlr_console.o |
4 | 4 | ||
5 | EXTRA_CFLAGS += -Werror | 5 | ccflags-y += -Werror |
diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c index 603d7493e966..8656388b34bd 100644 --- a/arch/mips/pci/pci-lantiq.c +++ b/arch/mips/pci/pci-lantiq.c | |||
@@ -171,8 +171,13 @@ static int __devinit ltq_pci_startup(struct ltq_pci_data *conf) | |||
171 | u32 temp_buffer; | 171 | u32 temp_buffer; |
172 | 172 | ||
173 | /* set clock to 33Mhz */ | 173 | /* set clock to 33Mhz */ |
174 | ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0xf00000, LTQ_CGU_IFCCR); | 174 | if (ltq_is_ar9()) { |
175 | ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0x800000, LTQ_CGU_IFCCR); | 175 | ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0x1f00000, LTQ_CGU_IFCCR); |
176 | ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0xe00000, LTQ_CGU_IFCCR); | ||
177 | } else { | ||
178 | ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0xf00000, LTQ_CGU_IFCCR); | ||
179 | ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0x800000, LTQ_CGU_IFCCR); | ||
180 | } | ||
176 | 181 | ||
177 | /* external or internal clock ? */ | 182 | /* external or internal clock ? */ |
178 | if (conf->clock) { | 183 | if (conf->clock) { |
diff --git a/arch/mips/pci/pci-rc32434.c b/arch/mips/pci/pci-rc32434.c index 764362ce5e40..5f3a69cebad1 100644 --- a/arch/mips/pci/pci-rc32434.c +++ b/arch/mips/pci/pci-rc32434.c | |||
@@ -215,7 +215,7 @@ static int __init rc32434_pci_init(void) | |||
215 | rc32434_pcibridge_init(); | 215 | rc32434_pcibridge_init(); |
216 | 216 | ||
217 | io_map_base = ioremap(rc32434_res_pci_io1.start, | 217 | io_map_base = ioremap(rc32434_res_pci_io1.start, |
218 | resource_size(&rcrc32434_res_pci_io1)); | 218 | resource_size(&rc32434_res_pci_io1)); |
219 | 219 | ||
220 | if (!io_map_base) | 220 | if (!io_map_base) |
221 | return -ENOMEM; | 221 | return -ENOMEM; |
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_irq.c b/arch/mips/pmc-sierra/msp71xx/msp_irq.c index 4531c4a514bc..d3c3d81757a5 100644 --- a/arch/mips/pmc-sierra/msp71xx/msp_irq.c +++ b/arch/mips/pmc-sierra/msp71xx/msp_irq.c | |||
@@ -108,12 +108,14 @@ asmlinkage void plat_irq_dispatch(struct pt_regs *regs) | |||
108 | 108 | ||
109 | static struct irqaction cic_cascade_msp = { | 109 | static struct irqaction cic_cascade_msp = { |
110 | .handler = no_action, | 110 | .handler = no_action, |
111 | .name = "MSP CIC cascade" | 111 | .name = "MSP CIC cascade", |
112 | .flags = IRQF_NO_THREAD, | ||
112 | }; | 113 | }; |
113 | 114 | ||
114 | static struct irqaction per_cascade_msp = { | 115 | static struct irqaction per_cascade_msp = { |
115 | .handler = no_action, | 116 | .handler = no_action, |
116 | .name = "MSP PER cascade" | 117 | .name = "MSP PER cascade", |
118 | .flags = IRQF_NO_THREAD, | ||
117 | }; | 119 | }; |
118 | 120 | ||
119 | void __init arch_init_irq(void) | 121 | void __init arch_init_irq(void) |
diff --git a/arch/mips/pnx8550/common/int.c b/arch/mips/pnx8550/common/int.c index 6b93c81779c1..1ebe22bdadc8 100644 --- a/arch/mips/pnx8550/common/int.c +++ b/arch/mips/pnx8550/common/int.c | |||
@@ -167,7 +167,7 @@ static struct irq_chip level_irq_type = { | |||
167 | 167 | ||
168 | static struct irqaction gic_action = { | 168 | static struct irqaction gic_action = { |
169 | .handler = no_action, | 169 | .handler = no_action, |
170 | .flags = IRQF_DISABLED, | 170 | .flags = IRQF_DISABLED | IRQF_NO_THREAD, |
171 | .name = "GIC", | 171 | .name = "GIC", |
172 | }; | 172 | }; |
173 | 173 | ||
diff --git a/arch/mips/sgi-ip22/ip22-int.c b/arch/mips/sgi-ip22/ip22-int.c index b4d08e4d2ea9..f72c336ea27b 100644 --- a/arch/mips/sgi-ip22/ip22-int.c +++ b/arch/mips/sgi-ip22/ip22-int.c | |||
@@ -155,32 +155,32 @@ static void __irq_entry indy_buserror_irq(void) | |||
155 | 155 | ||
156 | static struct irqaction local0_cascade = { | 156 | static struct irqaction local0_cascade = { |
157 | .handler = no_action, | 157 | .handler = no_action, |
158 | .flags = IRQF_DISABLED, | 158 | .flags = IRQF_DISABLED | IRQF_NO_THREAD, |
159 | .name = "local0 cascade", | 159 | .name = "local0 cascade", |
160 | }; | 160 | }; |
161 | 161 | ||
162 | static struct irqaction local1_cascade = { | 162 | static struct irqaction local1_cascade = { |
163 | .handler = no_action, | 163 | .handler = no_action, |
164 | .flags = IRQF_DISABLED, | 164 | .flags = IRQF_DISABLED | IRQF_NO_THREAD, |
165 | .name = "local1 cascade", | 165 | .name = "local1 cascade", |
166 | }; | 166 | }; |
167 | 167 | ||
168 | static struct irqaction buserr = { | 168 | static struct irqaction buserr = { |
169 | .handler = no_action, | 169 | .handler = no_action, |
170 | .flags = IRQF_DISABLED, | 170 | .flags = IRQF_DISABLED | IRQF_NO_THREAD, |
171 | .name = "Bus Error", | 171 | .name = "Bus Error", |
172 | }; | 172 | }; |
173 | 173 | ||
174 | static struct irqaction map0_cascade = { | 174 | static struct irqaction map0_cascade = { |
175 | .handler = no_action, | 175 | .handler = no_action, |
176 | .flags = IRQF_DISABLED, | 176 | .flags = IRQF_DISABLED | IRQF_NO_THREAD, |
177 | .name = "mapable0 cascade", | 177 | .name = "mapable0 cascade", |
178 | }; | 178 | }; |
179 | 179 | ||
180 | #ifdef USE_LIO3_IRQ | 180 | #ifdef USE_LIO3_IRQ |
181 | static struct irqaction map1_cascade = { | 181 | static struct irqaction map1_cascade = { |
182 | .handler = no_action, | 182 | .handler = no_action, |
183 | .flags = IRQF_DISABLED, | 183 | .flags = IRQF_DISABLED | IRQF_NO_THREAD, |
184 | .name = "mapable1 cascade", | 184 | .name = "mapable1 cascade", |
185 | }; | 185 | }; |
186 | #define SGI_INTERRUPTS SGINT_END | 186 | #define SGI_INTERRUPTS SGINT_END |
diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c index a7e5a6d917b1..3ab5b5d25b0a 100644 --- a/arch/mips/sni/rm200.c +++ b/arch/mips/sni/rm200.c | |||
@@ -359,6 +359,7 @@ void sni_rm200_init_8259A(void) | |||
359 | static struct irqaction sni_rm200_irq2 = { | 359 | static struct irqaction sni_rm200_irq2 = { |
360 | .handler = no_action, | 360 | .handler = no_action, |
361 | .name = "cascade", | 361 | .name = "cascade", |
362 | .flags = IRQF_NO_THREAD, | ||
362 | }; | 363 | }; |
363 | 364 | ||
364 | static struct resource sni_rm200_pic1_resource = { | 365 | static struct resource sni_rm200_pic1_resource = { |
diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c index 70a3b85f3757..fad2bef432cd 100644 --- a/arch/mips/vr41xx/common/irq.c +++ b/arch/mips/vr41xx/common/irq.c | |||
@@ -34,6 +34,7 @@ static irq_cascade_t irq_cascade[NR_IRQS] __cacheline_aligned; | |||
34 | static struct irqaction cascade_irqaction = { | 34 | static struct irqaction cascade_irqaction = { |
35 | .handler = no_action, | 35 | .handler = no_action, |
36 | .name = "cascade", | 36 | .name = "cascade", |
37 | .flags = IRQF_NO_THREAD, | ||
37 | }; | 38 | }; |
38 | 39 | ||
39 | int cascade_irq(unsigned int irq, int (*get_irq)(unsigned int)) | 40 | int cascade_irq(unsigned int irq, int (*get_irq)(unsigned int)) |
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c index 5cc83851ad06..31a7d3a7ce25 100644 --- a/arch/powerpc/platforms/powermac/pci.c +++ b/arch/powerpc/platforms/powermac/pci.c | |||
@@ -561,6 +561,20 @@ static struct pci_ops u4_pcie_pci_ops = | |||
561 | .write = u4_pcie_write_config, | 561 | .write = u4_pcie_write_config, |
562 | }; | 562 | }; |
563 | 563 | ||
564 | static void __devinit pmac_pci_fixup_u4_of_node(struct pci_dev *dev) | ||
565 | { | ||
566 | /* Apple's device-tree "hides" the root complex virtual P2P bridge | ||
567 | * on U4. However, Linux sees it, causing the PCI <-> OF matching | ||
568 | * code to fail to properly match devices below it. This works around | ||
569 | * it by setting the node of the bridge to point to the PHB node, | ||
570 | * which is not entirely correct but fixes the matching code and | ||
571 | * doesn't break anything else. It's also the simplest possible fix. | ||
572 | */ | ||
573 | if (dev->dev.of_node == NULL) | ||
574 | dev->dev.of_node = pcibios_get_phb_of_node(dev->bus); | ||
575 | } | ||
576 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_APPLE, 0x5b, pmac_pci_fixup_u4_of_node); | ||
577 | |||
564 | #endif /* CONFIG_PPC64 */ | 578 | #endif /* CONFIG_PPC64 */ |
565 | 579 | ||
566 | #ifdef CONFIG_PPC32 | 580 | #ifdef CONFIG_PPC32 |
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index 64b61bf72e93..547f1a6a35d4 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h | |||
@@ -188,7 +188,8 @@ extern char elf_platform[]; | |||
188 | #define SET_PERSONALITY(ex) \ | 188 | #define SET_PERSONALITY(ex) \ |
189 | do { \ | 189 | do { \ |
190 | if (personality(current->personality) != PER_LINUX32) \ | 190 | if (personality(current->personality) != PER_LINUX32) \ |
191 | set_personality(PER_LINUX); \ | 191 | set_personality(PER_LINUX | \ |
192 | (current->personality & ~PER_MASK)); \ | ||
192 | if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ | 193 | if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ |
193 | set_thread_flag(TIF_31BIT); \ | 194 | set_thread_flag(TIF_31BIT); \ |
194 | else \ | 195 | else \ |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index f69ff3c13496..5d56c2b95b14 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -303,15 +303,15 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) | |||
303 | /* Walk the guest addr space page table */ | 303 | /* Walk the guest addr space page table */ |
304 | table = gmap->table + (((to + off) >> 53) & 0x7ff); | 304 | table = gmap->table + (((to + off) >> 53) & 0x7ff); |
305 | if (*table & _REGION_ENTRY_INV) | 305 | if (*table & _REGION_ENTRY_INV) |
306 | return 0; | 306 | goto out; |
307 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 307 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
308 | table = table + (((to + off) >> 42) & 0x7ff); | 308 | table = table + (((to + off) >> 42) & 0x7ff); |
309 | if (*table & _REGION_ENTRY_INV) | 309 | if (*table & _REGION_ENTRY_INV) |
310 | return 0; | 310 | goto out; |
311 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 311 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
312 | table = table + (((to + off) >> 31) & 0x7ff); | 312 | table = table + (((to + off) >> 31) & 0x7ff); |
313 | if (*table & _REGION_ENTRY_INV) | 313 | if (*table & _REGION_ENTRY_INV) |
314 | return 0; | 314 | goto out; |
315 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 315 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
316 | table = table + (((to + off) >> 20) & 0x7ff); | 316 | table = table + (((to + off) >> 20) & 0x7ff); |
317 | 317 | ||
@@ -319,6 +319,7 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) | |||
319 | flush |= gmap_unlink_segment(gmap, table); | 319 | flush |= gmap_unlink_segment(gmap, table); |
320 | *table = _SEGMENT_ENTRY_INV; | 320 | *table = _SEGMENT_ENTRY_INV; |
321 | } | 321 | } |
322 | out: | ||
322 | up_read(&gmap->mm->mmap_sem); | 323 | up_read(&gmap->mm->mmap_sem); |
323 | if (flush) | 324 | if (flush) |
324 | gmap_flush_tlb(gmap); | 325 | gmap_flush_tlb(gmap); |
diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h index 1407c07bdade..f6ae2b2b6870 100644 --- a/arch/sparc/include/asm/pgtsrmmu.h +++ b/arch/sparc/include/asm/pgtsrmmu.h | |||
@@ -280,7 +280,7 @@ static inline unsigned long srmmu_hwprobe(unsigned long vaddr) | |||
280 | return retval; | 280 | return retval; |
281 | } | 281 | } |
282 | #else | 282 | #else |
283 | #define srmmu_hwprobe(addr) (srmmu_swprobe(addr, 0) & SRMMU_PTE_PMASK) | 283 | #define srmmu_hwprobe(addr) srmmu_swprobe(addr, 0) |
284 | #endif | 284 | #endif |
285 | 285 | ||
286 | static inline int | 286 | static inline int |
diff --git a/arch/sparc/include/asm/spitfire.h b/arch/sparc/include/asm/spitfire.h index 55a17c6efeb8..d06a26601753 100644 --- a/arch/sparc/include/asm/spitfire.h +++ b/arch/sparc/include/asm/spitfire.h | |||
@@ -43,6 +43,8 @@ | |||
43 | #define SUN4V_CHIP_NIAGARA1 0x01 | 43 | #define SUN4V_CHIP_NIAGARA1 0x01 |
44 | #define SUN4V_CHIP_NIAGARA2 0x02 | 44 | #define SUN4V_CHIP_NIAGARA2 0x02 |
45 | #define SUN4V_CHIP_NIAGARA3 0x03 | 45 | #define SUN4V_CHIP_NIAGARA3 0x03 |
46 | #define SUN4V_CHIP_NIAGARA4 0x04 | ||
47 | #define SUN4V_CHIP_NIAGARA5 0x05 | ||
46 | #define SUN4V_CHIP_UNKNOWN 0xff | 48 | #define SUN4V_CHIP_UNKNOWN 0xff |
47 | 49 | ||
48 | #ifndef __ASSEMBLY__ | 50 | #ifndef __ASSEMBLY__ |
diff --git a/arch/sparc/include/asm/xor_64.h b/arch/sparc/include/asm/xor_64.h index 9ed6ff679ab7..ee8edc68423e 100644 --- a/arch/sparc/include/asm/xor_64.h +++ b/arch/sparc/include/asm/xor_64.h | |||
@@ -66,6 +66,8 @@ static struct xor_block_template xor_block_niagara = { | |||
66 | ((tlb_type == hypervisor && \ | 66 | ((tlb_type == hypervisor && \ |
67 | (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 || \ | 67 | (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 || \ |
68 | sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || \ | 68 | sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || \ |
69 | sun4v_chip_type == SUN4V_CHIP_NIAGARA3)) ? \ | 69 | sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || \ |
70 | sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || \ | ||
71 | sun4v_chip_type == SUN4V_CHIP_NIAGARA5)) ? \ | ||
70 | &xor_block_niagara : \ | 72 | &xor_block_niagara : \ |
71 | &xor_block_VIS) | 73 | &xor_block_VIS) |
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c index 9810fd881058..ba9b1cec4e6b 100644 --- a/arch/sparc/kernel/cpu.c +++ b/arch/sparc/kernel/cpu.c | |||
@@ -481,6 +481,18 @@ static void __init sun4v_cpu_probe(void) | |||
481 | sparc_pmu_type = "niagara3"; | 481 | sparc_pmu_type = "niagara3"; |
482 | break; | 482 | break; |
483 | 483 | ||
484 | case SUN4V_CHIP_NIAGARA4: | ||
485 | sparc_cpu_type = "UltraSparc T4 (Niagara4)"; | ||
486 | sparc_fpu_type = "UltraSparc T4 integrated FPU"; | ||
487 | sparc_pmu_type = "niagara4"; | ||
488 | break; | ||
489 | |||
490 | case SUN4V_CHIP_NIAGARA5: | ||
491 | sparc_cpu_type = "UltraSparc T5 (Niagara5)"; | ||
492 | sparc_fpu_type = "UltraSparc T5 integrated FPU"; | ||
493 | sparc_pmu_type = "niagara5"; | ||
494 | break; | ||
495 | |||
484 | default: | 496 | default: |
485 | printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n", | 497 | printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n", |
486 | prom_cpu_compatible); | 498 | prom_cpu_compatible); |
diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c index 4197e8d62d4c..9323eafccb93 100644 --- a/arch/sparc/kernel/cpumap.c +++ b/arch/sparc/kernel/cpumap.c | |||
@@ -325,6 +325,8 @@ static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index) | |||
325 | case SUN4V_CHIP_NIAGARA1: | 325 | case SUN4V_CHIP_NIAGARA1: |
326 | case SUN4V_CHIP_NIAGARA2: | 326 | case SUN4V_CHIP_NIAGARA2: |
327 | case SUN4V_CHIP_NIAGARA3: | 327 | case SUN4V_CHIP_NIAGARA3: |
328 | case SUN4V_CHIP_NIAGARA4: | ||
329 | case SUN4V_CHIP_NIAGARA5: | ||
328 | rover_inc_table = niagara_iterate_method; | 330 | rover_inc_table = niagara_iterate_method; |
329 | break; | 331 | break; |
330 | default: | 332 | default: |
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S index 0eac1b2fc53d..0d810c2f1d00 100644 --- a/arch/sparc/kernel/head_64.S +++ b/arch/sparc/kernel/head_64.S | |||
@@ -133,7 +133,7 @@ prom_sun4v_name: | |||
133 | prom_niagara_prefix: | 133 | prom_niagara_prefix: |
134 | .asciz "SUNW,UltraSPARC-T" | 134 | .asciz "SUNW,UltraSPARC-T" |
135 | prom_sparc_prefix: | 135 | prom_sparc_prefix: |
136 | .asciz "SPARC-T" | 136 | .asciz "SPARC-" |
137 | .align 4 | 137 | .align 4 |
138 | prom_root_compatible: | 138 | prom_root_compatible: |
139 | .skip 64 | 139 | .skip 64 |
@@ -396,7 +396,7 @@ sun4v_chip_type: | |||
396 | or %g1, %lo(prom_cpu_compatible), %g1 | 396 | or %g1, %lo(prom_cpu_compatible), %g1 |
397 | sethi %hi(prom_sparc_prefix), %g7 | 397 | sethi %hi(prom_sparc_prefix), %g7 |
398 | or %g7, %lo(prom_sparc_prefix), %g7 | 398 | or %g7, %lo(prom_sparc_prefix), %g7 |
399 | mov 7, %g3 | 399 | mov 6, %g3 |
400 | 90: ldub [%g7], %g2 | 400 | 90: ldub [%g7], %g2 |
401 | ldub [%g1], %g4 | 401 | ldub [%g1], %g4 |
402 | cmp %g2, %g4 | 402 | cmp %g2, %g4 |
@@ -408,10 +408,23 @@ sun4v_chip_type: | |||
408 | 408 | ||
409 | sethi %hi(prom_cpu_compatible), %g1 | 409 | sethi %hi(prom_cpu_compatible), %g1 |
410 | or %g1, %lo(prom_cpu_compatible), %g1 | 410 | or %g1, %lo(prom_cpu_compatible), %g1 |
411 | ldub [%g1 + 7], %g2 | 411 | ldub [%g1 + 6], %g2 |
412 | cmp %g2, 'T' | ||
413 | be,pt %xcc, 70f | ||
414 | cmp %g2, 'M' | ||
415 | bne,pn %xcc, 4f | ||
416 | nop | ||
417 | |||
418 | 70: ldub [%g1 + 7], %g2 | ||
412 | cmp %g2, '3' | 419 | cmp %g2, '3' |
413 | be,pt %xcc, 5f | 420 | be,pt %xcc, 5f |
414 | mov SUN4V_CHIP_NIAGARA3, %g4 | 421 | mov SUN4V_CHIP_NIAGARA3, %g4 |
422 | cmp %g2, '4' | ||
423 | be,pt %xcc, 5f | ||
424 | mov SUN4V_CHIP_NIAGARA4, %g4 | ||
425 | cmp %g2, '5' | ||
426 | be,pt %xcc, 5f | ||
427 | mov SUN4V_CHIP_NIAGARA5, %g4 | ||
415 | ba,pt %xcc, 4f | 428 | ba,pt %xcc, 4f |
416 | nop | 429 | nop |
417 | 430 | ||
@@ -545,6 +558,12 @@ niagara_tlb_fixup: | |||
545 | cmp %g1, SUN4V_CHIP_NIAGARA3 | 558 | cmp %g1, SUN4V_CHIP_NIAGARA3 |
546 | be,pt %xcc, niagara2_patch | 559 | be,pt %xcc, niagara2_patch |
547 | nop | 560 | nop |
561 | cmp %g1, SUN4V_CHIP_NIAGARA4 | ||
562 | be,pt %xcc, niagara2_patch | ||
563 | nop | ||
564 | cmp %g1, SUN4V_CHIP_NIAGARA5 | ||
565 | be,pt %xcc, niagara2_patch | ||
566 | nop | ||
548 | 567 | ||
549 | call generic_patch_copyops | 568 | call generic_patch_copyops |
550 | nop | 569 | nop |
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index 1e94f946570e..8aa0d4408586 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c | |||
@@ -230,7 +230,8 @@ static void pci_parse_of_addrs(struct platform_device *op, | |||
230 | res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; | 230 | res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; |
231 | } else if (i == dev->rom_base_reg) { | 231 | } else if (i == dev->rom_base_reg) { |
232 | res = &dev->resource[PCI_ROM_RESOURCE]; | 232 | res = &dev->resource[PCI_ROM_RESOURCE]; |
233 | flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE; | 233 | flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE |
234 | | IORESOURCE_SIZEALIGN; | ||
234 | } else { | 235 | } else { |
235 | printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i); | 236 | printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i); |
236 | continue; | 237 | continue; |
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index c8cc461ff75f..f793742eec2b 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c | |||
@@ -380,8 +380,7 @@ void flush_thread(void) | |||
380 | #endif | 380 | #endif |
381 | } | 381 | } |
382 | 382 | ||
383 | /* Now, this task is no longer a kernel thread. */ | 383 | /* This task is no longer a kernel thread. */ |
384 | current->thread.current_ds = USER_DS; | ||
385 | if (current->thread.flags & SPARC_FLAG_KTHREAD) { | 384 | if (current->thread.flags & SPARC_FLAG_KTHREAD) { |
386 | current->thread.flags &= ~SPARC_FLAG_KTHREAD; | 385 | current->thread.flags &= ~SPARC_FLAG_KTHREAD; |
387 | 386 | ||
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index c158a95ec664..d959cd0a4aa4 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c | |||
@@ -368,9 +368,6 @@ void flush_thread(void) | |||
368 | 368 | ||
369 | /* Clear FPU register state. */ | 369 | /* Clear FPU register state. */ |
370 | t->fpsaved[0] = 0; | 370 | t->fpsaved[0] = 0; |
371 | |||
372 | if (get_thread_current_ds() != ASI_AIUS) | ||
373 | set_fs(USER_DS); | ||
374 | } | 371 | } |
375 | 372 | ||
376 | /* It's a bit more tricky when 64-bit tasks are involved... */ | 373 | /* It's a bit more tricky when 64-bit tasks are involved... */ |
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c index d26e1f6c717a..3e3e2914c70b 100644 --- a/arch/sparc/kernel/setup_32.c +++ b/arch/sparc/kernel/setup_32.c | |||
@@ -137,7 +137,7 @@ static void __init process_switch(char c) | |||
137 | prom_halt(); | 137 | prom_halt(); |
138 | break; | 138 | break; |
139 | case 'p': | 139 | case 'p': |
140 | /* Just ignore, this behavior is now the default. */ | 140 | prom_early_console.flags &= ~CON_BOOT; |
141 | break; | 141 | break; |
142 | default: | 142 | default: |
143 | printk("Unknown boot switch (-%c)\n", c); | 143 | printk("Unknown boot switch (-%c)\n", c); |
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index 3c5bb784214f..c965595aa7e9 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c | |||
@@ -106,7 +106,7 @@ static void __init process_switch(char c) | |||
106 | prom_halt(); | 106 | prom_halt(); |
107 | break; | 107 | break; |
108 | case 'p': | 108 | case 'p': |
109 | /* Just ignore, this behavior is now the default. */ | 109 | prom_early_console.flags &= ~CON_BOOT; |
110 | break; | 110 | break; |
111 | case 'P': | 111 | case 'P': |
112 | /* Force UltraSPARC-III P-Cache on. */ | 112 | /* Force UltraSPARC-III P-Cache on. */ |
@@ -425,10 +425,14 @@ static void __init init_sparc64_elf_hwcap(void) | |||
425 | else if (tlb_type == hypervisor) { | 425 | else if (tlb_type == hypervisor) { |
426 | if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 || | 426 | if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 || |
427 | sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || | 427 | sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || |
428 | sun4v_chip_type == SUN4V_CHIP_NIAGARA3) | 428 | sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || |
429 | sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || | ||
430 | sun4v_chip_type == SUN4V_CHIP_NIAGARA5) | ||
429 | cap |= HWCAP_SPARC_BLKINIT; | 431 | cap |= HWCAP_SPARC_BLKINIT; |
430 | if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || | 432 | if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || |
431 | sun4v_chip_type == SUN4V_CHIP_NIAGARA3) | 433 | sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || |
434 | sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || | ||
435 | sun4v_chip_type == SUN4V_CHIP_NIAGARA5) | ||
432 | cap |= HWCAP_SPARC_N2; | 436 | cap |= HWCAP_SPARC_N2; |
433 | } | 437 | } |
434 | 438 | ||
@@ -452,11 +456,15 @@ static void __init init_sparc64_elf_hwcap(void) | |||
452 | if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1) | 456 | if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1) |
453 | cap |= AV_SPARC_ASI_BLK_INIT; | 457 | cap |= AV_SPARC_ASI_BLK_INIT; |
454 | if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || | 458 | if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || |
455 | sun4v_chip_type == SUN4V_CHIP_NIAGARA3) | 459 | sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || |
460 | sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || | ||
461 | sun4v_chip_type == SUN4V_CHIP_NIAGARA5) | ||
456 | cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 | | 462 | cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 | |
457 | AV_SPARC_ASI_BLK_INIT | | 463 | AV_SPARC_ASI_BLK_INIT | |
458 | AV_SPARC_POPC); | 464 | AV_SPARC_POPC); |
459 | if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3) | 465 | if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || |
466 | sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || | ||
467 | sun4v_chip_type == SUN4V_CHIP_NIAGARA5) | ||
460 | cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC | | 468 | cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC | |
461 | AV_SPARC_FMAF); | 469 | AV_SPARC_FMAF); |
462 | } | 470 | } |
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c index 1ba95aff5d59..2caa556db86d 100644 --- a/arch/sparc/kernel/signal32.c +++ b/arch/sparc/kernel/signal32.c | |||
@@ -273,10 +273,7 @@ void do_sigreturn32(struct pt_regs *regs) | |||
273 | case 1: set.sig[0] = seta[0] + (((long)seta[1]) << 32); | 273 | case 1: set.sig[0] = seta[0] + (((long)seta[1]) << 32); |
274 | } | 274 | } |
275 | sigdelsetmask(&set, ~_BLOCKABLE); | 275 | sigdelsetmask(&set, ~_BLOCKABLE); |
276 | spin_lock_irq(¤t->sighand->siglock); | 276 | set_current_blocked(&set); |
277 | current->blocked = set; | ||
278 | recalc_sigpending(); | ||
279 | spin_unlock_irq(¤t->sighand->siglock); | ||
280 | return; | 277 | return; |
281 | 278 | ||
282 | segv: | 279 | segv: |
@@ -377,10 +374,7 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) | |||
377 | case 1: set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32); | 374 | case 1: set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32); |
378 | } | 375 | } |
379 | sigdelsetmask(&set, ~_BLOCKABLE); | 376 | sigdelsetmask(&set, ~_BLOCKABLE); |
380 | spin_lock_irq(¤t->sighand->siglock); | 377 | set_current_blocked(&set); |
381 | current->blocked = set; | ||
382 | recalc_sigpending(); | ||
383 | spin_unlock_irq(¤t->sighand->siglock); | ||
384 | return; | 378 | return; |
385 | segv: | 379 | segv: |
386 | force_sig(SIGSEGV, current); | 380 | force_sig(SIGSEGV, current); |
@@ -782,6 +776,7 @@ static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka, | |||
782 | siginfo_t *info, | 776 | siginfo_t *info, |
783 | sigset_t *oldset, struct pt_regs *regs) | 777 | sigset_t *oldset, struct pt_regs *regs) |
784 | { | 778 | { |
779 | sigset_t blocked; | ||
785 | int err; | 780 | int err; |
786 | 781 | ||
787 | if (ka->sa.sa_flags & SA_SIGINFO) | 782 | if (ka->sa.sa_flags & SA_SIGINFO) |
@@ -792,12 +787,10 @@ static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka, | |||
792 | if (err) | 787 | if (err) |
793 | return err; | 788 | return err; |
794 | 789 | ||
795 | spin_lock_irq(¤t->sighand->siglock); | 790 | sigorsets(&blocked, ¤t->blocked, &ka->sa.sa_mask); |
796 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); | ||
797 | if (!(ka->sa.sa_flags & SA_NOMASK)) | 791 | if (!(ka->sa.sa_flags & SA_NOMASK)) |
798 | sigaddset(¤t->blocked,signr); | 792 | sigaddset(&blocked, signr); |
799 | recalc_sigpending(); | 793 | set_current_blocked(&blocked); |
800 | spin_unlock_irq(¤t->sighand->siglock); | ||
801 | 794 | ||
802 | tracehook_signal_handler(signr, info, ka, regs, 0); | 795 | tracehook_signal_handler(signr, info, ka, regs, 0); |
803 | 796 | ||
@@ -881,7 +874,7 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs, | |||
881 | */ | 874 | */ |
882 | if (current_thread_info()->status & TS_RESTORE_SIGMASK) { | 875 | if (current_thread_info()->status & TS_RESTORE_SIGMASK) { |
883 | current_thread_info()->status &= ~TS_RESTORE_SIGMASK; | 876 | current_thread_info()->status &= ~TS_RESTORE_SIGMASK; |
884 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); | 877 | set_current_blocked(¤t->saved_sigmask); |
885 | } | 878 | } |
886 | } | 879 | } |
887 | 880 | ||
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index 04ede8f04add..8ce247ac04cc 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c | |||
@@ -62,12 +62,13 @@ struct rt_signal_frame { | |||
62 | 62 | ||
63 | static int _sigpause_common(old_sigset_t set) | 63 | static int _sigpause_common(old_sigset_t set) |
64 | { | 64 | { |
65 | set &= _BLOCKABLE; | 65 | sigset_t blocked; |
66 | spin_lock_irq(¤t->sighand->siglock); | 66 | |
67 | current->saved_sigmask = current->blocked; | 67 | current->saved_sigmask = current->blocked; |
68 | siginitset(¤t->blocked, set); | 68 | |
69 | recalc_sigpending(); | 69 | set &= _BLOCKABLE; |
70 | spin_unlock_irq(¤t->sighand->siglock); | 70 | siginitset(&blocked, set); |
71 | set_current_blocked(&blocked); | ||
71 | 72 | ||
72 | current->state = TASK_INTERRUPTIBLE; | 73 | current->state = TASK_INTERRUPTIBLE; |
73 | schedule(); | 74 | schedule(); |
@@ -139,10 +140,7 @@ asmlinkage void do_sigreturn(struct pt_regs *regs) | |||
139 | goto segv_and_exit; | 140 | goto segv_and_exit; |
140 | 141 | ||
141 | sigdelsetmask(&set, ~_BLOCKABLE); | 142 | sigdelsetmask(&set, ~_BLOCKABLE); |
142 | spin_lock_irq(¤t->sighand->siglock); | 143 | set_current_blocked(&set); |
143 | current->blocked = set; | ||
144 | recalc_sigpending(); | ||
145 | spin_unlock_irq(¤t->sighand->siglock); | ||
146 | return; | 144 | return; |
147 | 145 | ||
148 | segv_and_exit: | 146 | segv_and_exit: |
@@ -209,10 +207,7 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs) | |||
209 | } | 207 | } |
210 | 208 | ||
211 | sigdelsetmask(&set, ~_BLOCKABLE); | 209 | sigdelsetmask(&set, ~_BLOCKABLE); |
212 | spin_lock_irq(¤t->sighand->siglock); | 210 | set_current_blocked(&set); |
213 | current->blocked = set; | ||
214 | recalc_sigpending(); | ||
215 | spin_unlock_irq(¤t->sighand->siglock); | ||
216 | return; | 211 | return; |
217 | segv: | 212 | segv: |
218 | force_sig(SIGSEGV, current); | 213 | force_sig(SIGSEGV, current); |
@@ -470,6 +465,7 @@ static inline int | |||
470 | handle_signal(unsigned long signr, struct k_sigaction *ka, | 465 | handle_signal(unsigned long signr, struct k_sigaction *ka, |
471 | siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) | 466 | siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) |
472 | { | 467 | { |
468 | sigset_t blocked; | ||
473 | int err; | 469 | int err; |
474 | 470 | ||
475 | if (ka->sa.sa_flags & SA_SIGINFO) | 471 | if (ka->sa.sa_flags & SA_SIGINFO) |
@@ -480,12 +476,10 @@ handle_signal(unsigned long signr, struct k_sigaction *ka, | |||
480 | if (err) | 476 | if (err) |
481 | return err; | 477 | return err; |
482 | 478 | ||
483 | spin_lock_irq(¤t->sighand->siglock); | 479 | sigorsets(&blocked, ¤t->blocked, &ka->sa.sa_mask); |
484 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); | ||
485 | if (!(ka->sa.sa_flags & SA_NOMASK)) | 480 | if (!(ka->sa.sa_flags & SA_NOMASK)) |
486 | sigaddset(¤t->blocked, signr); | 481 | sigaddset(&blocked, signr); |
487 | recalc_sigpending(); | 482 | set_current_blocked(&blocked); |
488 | spin_unlock_irq(¤t->sighand->siglock); | ||
489 | 483 | ||
490 | tracehook_signal_handler(signr, info, ka, regs, 0); | 484 | tracehook_signal_handler(signr, info, ka, regs, 0); |
491 | 485 | ||
@@ -581,7 +575,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) | |||
581 | */ | 575 | */ |
582 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) { | 576 | if (test_thread_flag(TIF_RESTORE_SIGMASK)) { |
583 | clear_thread_flag(TIF_RESTORE_SIGMASK); | 577 | clear_thread_flag(TIF_RESTORE_SIGMASK); |
584 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); | 578 | set_current_blocked(¤t->saved_sigmask); |
585 | } | 579 | } |
586 | } | 580 | } |
587 | 581 | ||
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index 47509df3b893..a2b81598d905 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c | |||
@@ -70,10 +70,7 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs) | |||
70 | goto do_sigsegv; | 70 | goto do_sigsegv; |
71 | } | 71 | } |
72 | sigdelsetmask(&set, ~_BLOCKABLE); | 72 | sigdelsetmask(&set, ~_BLOCKABLE); |
73 | spin_lock_irq(¤t->sighand->siglock); | 73 | set_current_blocked(&set); |
74 | current->blocked = set; | ||
75 | recalc_sigpending(); | ||
76 | spin_unlock_irq(¤t->sighand->siglock); | ||
77 | } | 74 | } |
78 | if (test_thread_flag(TIF_32BIT)) { | 75 | if (test_thread_flag(TIF_32BIT)) { |
79 | pc &= 0xffffffff; | 76 | pc &= 0xffffffff; |
@@ -242,12 +239,13 @@ struct rt_signal_frame { | |||
242 | 239 | ||
243 | static long _sigpause_common(old_sigset_t set) | 240 | static long _sigpause_common(old_sigset_t set) |
244 | { | 241 | { |
245 | set &= _BLOCKABLE; | 242 | sigset_t blocked; |
246 | spin_lock_irq(¤t->sighand->siglock); | 243 | |
247 | current->saved_sigmask = current->blocked; | 244 | current->saved_sigmask = current->blocked; |
248 | siginitset(¤t->blocked, set); | 245 | |
249 | recalc_sigpending(); | 246 | set &= _BLOCKABLE; |
250 | spin_unlock_irq(¤t->sighand->siglock); | 247 | siginitset(&blocked, set); |
248 | set_current_blocked(&blocked); | ||
251 | 249 | ||
252 | current->state = TASK_INTERRUPTIBLE; | 250 | current->state = TASK_INTERRUPTIBLE; |
253 | schedule(); | 251 | schedule(); |
@@ -327,10 +325,7 @@ void do_rt_sigreturn(struct pt_regs *regs) | |||
327 | pt_regs_clear_syscall(regs); | 325 | pt_regs_clear_syscall(regs); |
328 | 326 | ||
329 | sigdelsetmask(&set, ~_BLOCKABLE); | 327 | sigdelsetmask(&set, ~_BLOCKABLE); |
330 | spin_lock_irq(¤t->sighand->siglock); | 328 | set_current_blocked(&set); |
331 | current->blocked = set; | ||
332 | recalc_sigpending(); | ||
333 | spin_unlock_irq(¤t->sighand->siglock); | ||
334 | return; | 329 | return; |
335 | segv: | 330 | segv: |
336 | force_sig(SIGSEGV, current); | 331 | force_sig(SIGSEGV, current); |
@@ -484,18 +479,17 @@ static inline int handle_signal(unsigned long signr, struct k_sigaction *ka, | |||
484 | siginfo_t *info, | 479 | siginfo_t *info, |
485 | sigset_t *oldset, struct pt_regs *regs) | 480 | sigset_t *oldset, struct pt_regs *regs) |
486 | { | 481 | { |
482 | sigset_t blocked; | ||
487 | int err; | 483 | int err; |
488 | 484 | ||
489 | err = setup_rt_frame(ka, regs, signr, oldset, | 485 | err = setup_rt_frame(ka, regs, signr, oldset, |
490 | (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL); | 486 | (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL); |
491 | if (err) | 487 | if (err) |
492 | return err; | 488 | return err; |
493 | spin_lock_irq(¤t->sighand->siglock); | 489 | sigorsets(&blocked, ¤t->blocked, &ka->sa.sa_mask); |
494 | sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); | ||
495 | if (!(ka->sa.sa_flags & SA_NOMASK)) | 490 | if (!(ka->sa.sa_flags & SA_NOMASK)) |
496 | sigaddset(¤t->blocked,signr); | 491 | sigaddset(&blocked, signr); |
497 | recalc_sigpending(); | 492 | set_current_blocked(&blocked); |
498 | spin_unlock_irq(¤t->sighand->siglock); | ||
499 | 493 | ||
500 | tracehook_signal_handler(signr, info, ka, regs, 0); | 494 | tracehook_signal_handler(signr, info, ka, regs, 0); |
501 | 495 | ||
@@ -601,7 +595,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) | |||
601 | */ | 595 | */ |
602 | if (current_thread_info()->status & TS_RESTORE_SIGMASK) { | 596 | if (current_thread_info()->status & TS_RESTORE_SIGMASK) { |
603 | current_thread_info()->status &= ~TS_RESTORE_SIGMASK; | 597 | current_thread_info()->status &= ~TS_RESTORE_SIGMASK; |
604 | sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); | 598 | set_current_blocked(¤t->saved_sigmask); |
605 | } | 599 | } |
606 | } | 600 | } |
607 | 601 | ||
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 581531dbc8b5..8e073d802139 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c | |||
@@ -511,6 +511,11 @@ static void __init read_obp_translations(void) | |||
511 | for (i = 0; i < prom_trans_ents; i++) | 511 | for (i = 0; i < prom_trans_ents; i++) |
512 | prom_trans[i].data &= ~0x0003fe0000000000UL; | 512 | prom_trans[i].data &= ~0x0003fe0000000000UL; |
513 | } | 513 | } |
514 | |||
515 | /* Force execute bit on. */ | ||
516 | for (i = 0; i < prom_trans_ents; i++) | ||
517 | prom_trans[i].data |= (tlb_type == hypervisor ? | ||
518 | _PAGE_EXEC_4V : _PAGE_EXEC_4U); | ||
514 | } | 519 | } |
515 | 520 | ||
516 | static void __init hypervisor_tlb_lock(unsigned long vaddr, | 521 | static void __init hypervisor_tlb_lock(unsigned long vaddr, |
diff --git a/arch/sparc/mm/leon_mm.c b/arch/sparc/mm/leon_mm.c index e485a6804998..13c2169822a8 100644 --- a/arch/sparc/mm/leon_mm.c +++ b/arch/sparc/mm/leon_mm.c | |||
@@ -162,7 +162,7 @@ ready: | |||
162 | printk(KERN_INFO "swprobe: padde %x\n", paddr_calc); | 162 | printk(KERN_INFO "swprobe: padde %x\n", paddr_calc); |
163 | if (paddr) | 163 | if (paddr) |
164 | *paddr = paddr_calc; | 164 | *paddr = paddr_calc; |
165 | return paddrbase; | 165 | return pte; |
166 | } | 166 | } |
167 | 167 | ||
168 | void leon_flush_icache_all(void) | 168 | void leon_flush_icache_all(void) |
diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S index fc94607f0bd5..aecc8ed5f39b 100644 --- a/arch/tile/kernel/intvec_32.S +++ b/arch/tile/kernel/intvec_32.S | |||
@@ -21,7 +21,7 @@ | |||
21 | #include <asm/ptrace.h> | 21 | #include <asm/ptrace.h> |
22 | #include <asm/thread_info.h> | 22 | #include <asm/thread_info.h> |
23 | #include <asm/irqflags.h> | 23 | #include <asm/irqflags.h> |
24 | #include <linux/atomic.h> | 24 | #include <asm/atomic_32.h> |
25 | #include <asm/asm-offsets.h> | 25 | #include <asm/asm-offsets.h> |
26 | #include <hv/hypervisor.h> | 26 | #include <hv/hypervisor.h> |
27 | #include <arch/abi.h> | 27 | #include <arch/abi.h> |
diff --git a/arch/tile/lib/atomic_asm_32.S b/arch/tile/lib/atomic_asm_32.S index 1f75a2a56101..30638042691d 100644 --- a/arch/tile/lib/atomic_asm_32.S +++ b/arch/tile/lib/atomic_asm_32.S | |||
@@ -70,7 +70,7 @@ | |||
70 | */ | 70 | */ |
71 | 71 | ||
72 | #include <linux/linkage.h> | 72 | #include <linux/linkage.h> |
73 | #include <linux/atomic.h> | 73 | #include <asm/atomic_32.h> |
74 | #include <asm/page.h> | 74 | #include <asm/page.h> |
75 | #include <asm/processor.h> | 75 | #include <asm/processor.h> |
76 | 76 | ||
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c index 3f2ad2640d85..ccdbc16b8941 100644 --- a/arch/x86/kernel/rtc.c +++ b/arch/x86/kernel/rtc.c | |||
@@ -42,8 +42,11 @@ int mach_set_rtc_mmss(unsigned long nowtime) | |||
42 | { | 42 | { |
43 | int real_seconds, real_minutes, cmos_minutes; | 43 | int real_seconds, real_minutes, cmos_minutes; |
44 | unsigned char save_control, save_freq_select; | 44 | unsigned char save_control, save_freq_select; |
45 | unsigned long flags; | ||
45 | int retval = 0; | 46 | int retval = 0; |
46 | 47 | ||
48 | spin_lock_irqsave(&rtc_lock, flags); | ||
49 | |||
47 | /* tell the clock it's being set */ | 50 | /* tell the clock it's being set */ |
48 | save_control = CMOS_READ(RTC_CONTROL); | 51 | save_control = CMOS_READ(RTC_CONTROL); |
49 | CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); | 52 | CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); |
@@ -93,12 +96,17 @@ int mach_set_rtc_mmss(unsigned long nowtime) | |||
93 | CMOS_WRITE(save_control, RTC_CONTROL); | 96 | CMOS_WRITE(save_control, RTC_CONTROL); |
94 | CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); | 97 | CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); |
95 | 98 | ||
99 | spin_unlock_irqrestore(&rtc_lock, flags); | ||
100 | |||
96 | return retval; | 101 | return retval; |
97 | } | 102 | } |
98 | 103 | ||
99 | unsigned long mach_get_cmos_time(void) | 104 | unsigned long mach_get_cmos_time(void) |
100 | { | 105 | { |
101 | unsigned int status, year, mon, day, hour, min, sec, century = 0; | 106 | unsigned int status, year, mon, day, hour, min, sec, century = 0; |
107 | unsigned long flags; | ||
108 | |||
109 | spin_lock_irqsave(&rtc_lock, flags); | ||
102 | 110 | ||
103 | /* | 111 | /* |
104 | * If UIP is clear, then we have >= 244 microseconds before | 112 | * If UIP is clear, then we have >= 244 microseconds before |
@@ -125,6 +133,8 @@ unsigned long mach_get_cmos_time(void) | |||
125 | status = CMOS_READ(RTC_CONTROL); | 133 | status = CMOS_READ(RTC_CONTROL); |
126 | WARN_ON_ONCE(RTC_ALWAYS_BCD && (status & RTC_DM_BINARY)); | 134 | WARN_ON_ONCE(RTC_ALWAYS_BCD && (status & RTC_DM_BINARY)); |
127 | 135 | ||
136 | spin_unlock_irqrestore(&rtc_lock, flags); | ||
137 | |||
128 | if (RTC_ALWAYS_BCD || !(status & RTC_DM_BINARY)) { | 138 | if (RTC_ALWAYS_BCD || !(status & RTC_DM_BINARY)) { |
129 | sec = bcd2bin(sec); | 139 | sec = bcd2bin(sec); |
130 | min = bcd2bin(min); | 140 | min = bcd2bin(min); |
@@ -169,24 +179,15 @@ EXPORT_SYMBOL(rtc_cmos_write); | |||
169 | 179 | ||
170 | int update_persistent_clock(struct timespec now) | 180 | int update_persistent_clock(struct timespec now) |
171 | { | 181 | { |
172 | unsigned long flags; | 182 | return x86_platform.set_wallclock(now.tv_sec); |
173 | int retval; | ||
174 | |||
175 | spin_lock_irqsave(&rtc_lock, flags); | ||
176 | retval = x86_platform.set_wallclock(now.tv_sec); | ||
177 | spin_unlock_irqrestore(&rtc_lock, flags); | ||
178 | |||
179 | return retval; | ||
180 | } | 183 | } |
181 | 184 | ||
182 | /* not static: needed by APM */ | 185 | /* not static: needed by APM */ |
183 | void read_persistent_clock(struct timespec *ts) | 186 | void read_persistent_clock(struct timespec *ts) |
184 | { | 187 | { |
185 | unsigned long retval, flags; | 188 | unsigned long retval; |
186 | 189 | ||
187 | spin_lock_irqsave(&rtc_lock, flags); | ||
188 | retval = x86_platform.get_wallclock(); | 190 | retval = x86_platform.get_wallclock(); |
189 | spin_unlock_irqrestore(&rtc_lock, flags); | ||
190 | 191 | ||
191 | ts->tv_sec = retval; | 192 | ts->tv_sec = retval; |
192 | ts->tv_nsec = 0; | 193 | ts->tv_nsec = 0; |
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index 18ae83dd1cd7..b56c65de384d 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c | |||
@@ -56,7 +56,7 @@ DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) = | |||
56 | .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock), | 56 | .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock), |
57 | }; | 57 | }; |
58 | 58 | ||
59 | static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE; | 59 | static enum { EMULATE, NATIVE, NONE } vsyscall_mode = NATIVE; |
60 | 60 | ||
61 | static int __init vsyscall_setup(char *str) | 61 | static int __init vsyscall_setup(char *str) |
62 | { | 62 | { |
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 30326443ab81..87488b93a65c 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -63,9 +63,8 @@ static void __init find_early_table_space(unsigned long end, int use_pse, | |||
63 | #ifdef CONFIG_X86_32 | 63 | #ifdef CONFIG_X86_32 |
64 | /* for fixmap */ | 64 | /* for fixmap */ |
65 | tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE); | 65 | tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE); |
66 | |||
67 | good_end = max_pfn_mapped << PAGE_SHIFT; | ||
68 | #endif | 66 | #endif |
67 | good_end = max_pfn_mapped << PAGE_SHIFT; | ||
69 | 68 | ||
70 | base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE); | 69 | base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE); |
71 | if (base == MEMBLOCK_ERROR) | 70 | if (base == MEMBLOCK_ERROR) |
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c index 039d91315bc5..404f21a3ff9e 100644 --- a/arch/x86/pci/acpi.c +++ b/arch/x86/pci/acpi.c | |||
@@ -43,6 +43,17 @@ static const struct dmi_system_id pci_use_crs_table[] __initconst = { | |||
43 | DMI_MATCH(DMI_PRODUCT_NAME, "ALiveSATA2-GLAN"), | 43 | DMI_MATCH(DMI_PRODUCT_NAME, "ALiveSATA2-GLAN"), |
44 | }, | 44 | }, |
45 | }, | 45 | }, |
46 | /* https://bugzilla.kernel.org/show_bug.cgi?id=30552 */ | ||
47 | /* 2006 AMD HT/VIA system with two host bridges */ | ||
48 | { | ||
49 | .callback = set_use_crs, | ||
50 | .ident = "ASUS M2V-MX SE", | ||
51 | .matches = { | ||
52 | DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), | ||
53 | DMI_MATCH(DMI_BOARD_NAME, "M2V-MX SE"), | ||
54 | DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."), | ||
55 | }, | ||
56 | }, | ||
46 | {} | 57 | {} |
47 | }; | 58 | }; |
48 | 59 | ||
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c index 58425adc22c6..fe73276e026b 100644 --- a/arch/x86/platform/mrst/mrst.c +++ b/arch/x86/platform/mrst/mrst.c | |||
@@ -678,38 +678,40 @@ static int __init sfi_parse_devs(struct sfi_table_header *table) | |||
678 | pentry = (struct sfi_device_table_entry *)sb->pentry; | 678 | pentry = (struct sfi_device_table_entry *)sb->pentry; |
679 | 679 | ||
680 | for (i = 0; i < num; i++, pentry++) { | 680 | for (i = 0; i < num; i++, pentry++) { |
681 | if (pentry->irq != (u8)0xff) { /* native RTE case */ | 681 | int irq = pentry->irq; |
682 | |||
683 | if (irq != (u8)0xff) { /* native RTE case */ | ||
682 | /* these SPI2 devices are not exposed to system as PCI | 684 | /* these SPI2 devices are not exposed to system as PCI |
683 | * devices, but they have separate RTE entry in IOAPIC | 685 | * devices, but they have separate RTE entry in IOAPIC |
684 | * so we have to enable them one by one here | 686 | * so we have to enable them one by one here |
685 | */ | 687 | */ |
686 | ioapic = mp_find_ioapic(pentry->irq); | 688 | ioapic = mp_find_ioapic(irq); |
687 | irq_attr.ioapic = ioapic; | 689 | irq_attr.ioapic = ioapic; |
688 | irq_attr.ioapic_pin = pentry->irq; | 690 | irq_attr.ioapic_pin = irq; |
689 | irq_attr.trigger = 1; | 691 | irq_attr.trigger = 1; |
690 | irq_attr.polarity = 1; | 692 | irq_attr.polarity = 1; |
691 | io_apic_set_pci_routing(NULL, pentry->irq, &irq_attr); | 693 | io_apic_set_pci_routing(NULL, irq, &irq_attr); |
692 | } else | 694 | } else |
693 | pentry->irq = 0; /* No irq */ | 695 | irq = 0; /* No irq */ |
694 | 696 | ||
695 | switch (pentry->type) { | 697 | switch (pentry->type) { |
696 | case SFI_DEV_TYPE_IPC: | 698 | case SFI_DEV_TYPE_IPC: |
697 | /* ID as IRQ is a hack that will go away */ | 699 | /* ID as IRQ is a hack that will go away */ |
698 | pdev = platform_device_alloc(pentry->name, pentry->irq); | 700 | pdev = platform_device_alloc(pentry->name, irq); |
699 | if (pdev == NULL) { | 701 | if (pdev == NULL) { |
700 | pr_err("out of memory for SFI platform device '%s'.\n", | 702 | pr_err("out of memory for SFI platform device '%s'.\n", |
701 | pentry->name); | 703 | pentry->name); |
702 | continue; | 704 | continue; |
703 | } | 705 | } |
704 | install_irq_resource(pdev, pentry->irq); | 706 | install_irq_resource(pdev, irq); |
705 | pr_debug("info[%2d]: IPC bus, name = %16.16s, " | 707 | pr_debug("info[%2d]: IPC bus, name = %16.16s, " |
706 | "irq = 0x%2x\n", i, pentry->name, pentry->irq); | 708 | "irq = 0x%2x\n", i, pentry->name, irq); |
707 | sfi_handle_ipc_dev(pdev); | 709 | sfi_handle_ipc_dev(pdev); |
708 | break; | 710 | break; |
709 | case SFI_DEV_TYPE_SPI: | 711 | case SFI_DEV_TYPE_SPI: |
710 | memset(&spi_info, 0, sizeof(spi_info)); | 712 | memset(&spi_info, 0, sizeof(spi_info)); |
711 | strncpy(spi_info.modalias, pentry->name, SFI_NAME_LEN); | 713 | strncpy(spi_info.modalias, pentry->name, SFI_NAME_LEN); |
712 | spi_info.irq = pentry->irq; | 714 | spi_info.irq = irq; |
713 | spi_info.bus_num = pentry->host_num; | 715 | spi_info.bus_num = pentry->host_num; |
714 | spi_info.chip_select = pentry->addr; | 716 | spi_info.chip_select = pentry->addr; |
715 | spi_info.max_speed_hz = pentry->max_freq; | 717 | spi_info.max_speed_hz = pentry->max_freq; |
@@ -726,7 +728,7 @@ static int __init sfi_parse_devs(struct sfi_table_header *table) | |||
726 | memset(&i2c_info, 0, sizeof(i2c_info)); | 728 | memset(&i2c_info, 0, sizeof(i2c_info)); |
727 | bus = pentry->host_num; | 729 | bus = pentry->host_num; |
728 | strncpy(i2c_info.type, pentry->name, SFI_NAME_LEN); | 730 | strncpy(i2c_info.type, pentry->name, SFI_NAME_LEN); |
729 | i2c_info.irq = pentry->irq; | 731 | i2c_info.irq = irq; |
730 | i2c_info.addr = pentry->addr; | 732 | i2c_info.addr = pentry->addr; |
731 | pr_debug("info[%2d]: I2C bus = %d, name = %16.16s, " | 733 | pr_debug("info[%2d]: I2C bus = %d, name = %16.16s, " |
732 | "irq = 0x%2x, addr = 0x%x\n", i, bus, | 734 | "irq = 0x%2x, addr = 0x%x\n", i, bus, |
diff --git a/arch/x86/platform/mrst/vrtc.c b/arch/x86/platform/mrst/vrtc.c index 73d70d65e76e..6d5dbcdd444a 100644 --- a/arch/x86/platform/mrst/vrtc.c +++ b/arch/x86/platform/mrst/vrtc.c | |||
@@ -58,8 +58,11 @@ EXPORT_SYMBOL_GPL(vrtc_cmos_write); | |||
58 | unsigned long vrtc_get_time(void) | 58 | unsigned long vrtc_get_time(void) |
59 | { | 59 | { |
60 | u8 sec, min, hour, mday, mon; | 60 | u8 sec, min, hour, mday, mon; |
61 | unsigned long flags; | ||
61 | u32 year; | 62 | u32 year; |
62 | 63 | ||
64 | spin_lock_irqsave(&rtc_lock, flags); | ||
65 | |||
63 | while ((vrtc_cmos_read(RTC_FREQ_SELECT) & RTC_UIP)) | 66 | while ((vrtc_cmos_read(RTC_FREQ_SELECT) & RTC_UIP)) |
64 | cpu_relax(); | 67 | cpu_relax(); |
65 | 68 | ||
@@ -70,6 +73,8 @@ unsigned long vrtc_get_time(void) | |||
70 | mon = vrtc_cmos_read(RTC_MONTH); | 73 | mon = vrtc_cmos_read(RTC_MONTH); |
71 | year = vrtc_cmos_read(RTC_YEAR); | 74 | year = vrtc_cmos_read(RTC_YEAR); |
72 | 75 | ||
76 | spin_unlock_irqrestore(&rtc_lock, flags); | ||
77 | |||
73 | /* vRTC YEAR reg contains the offset to 1960 */ | 78 | /* vRTC YEAR reg contains the offset to 1960 */ |
74 | year += 1960; | 79 | year += 1960; |
75 | 80 | ||
@@ -83,8 +88,10 @@ unsigned long vrtc_get_time(void) | |||
83 | int vrtc_set_mmss(unsigned long nowtime) | 88 | int vrtc_set_mmss(unsigned long nowtime) |
84 | { | 89 | { |
85 | int real_sec, real_min; | 90 | int real_sec, real_min; |
91 | unsigned long flags; | ||
86 | int vrtc_min; | 92 | int vrtc_min; |
87 | 93 | ||
94 | spin_lock_irqsave(&rtc_lock, flags); | ||
88 | vrtc_min = vrtc_cmos_read(RTC_MINUTES); | 95 | vrtc_min = vrtc_cmos_read(RTC_MINUTES); |
89 | 96 | ||
90 | real_sec = nowtime % 60; | 97 | real_sec = nowtime % 60; |
@@ -95,6 +102,8 @@ int vrtc_set_mmss(unsigned long nowtime) | |||
95 | 102 | ||
96 | vrtc_cmos_write(real_sec, RTC_SECONDS); | 103 | vrtc_cmos_write(real_sec, RTC_SECONDS); |
97 | vrtc_cmos_write(real_min, RTC_MINUTES); | 104 | vrtc_cmos_write(real_min, RTC_MINUTES); |
105 | spin_unlock_irqrestore(&rtc_lock, flags); | ||
106 | |||
98 | return 0; | 107 | return 0; |
99 | } | 108 | } |
100 | 109 | ||
diff --git a/block/blk-core.c b/block/blk-core.c index b2ed78afd9f0..d34433ae7917 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -348,9 +348,10 @@ void blk_put_queue(struct request_queue *q) | |||
348 | EXPORT_SYMBOL(blk_put_queue); | 348 | EXPORT_SYMBOL(blk_put_queue); |
349 | 349 | ||
350 | /* | 350 | /* |
351 | * Note: If a driver supplied the queue lock, it should not zap that lock | 351 | * Note: If a driver supplied the queue lock, it is disconnected |
352 | * unexpectedly as some queue cleanup components like elevator_exit() and | 352 | * by this function. The actual state of the lock doesn't matter |
353 | * blk_throtl_exit() need queue lock. | 353 | * here as the request_queue isn't accessible after this point |
354 | * (QUEUE_FLAG_DEAD is set) and no other requests will be queued. | ||
354 | */ | 355 | */ |
355 | void blk_cleanup_queue(struct request_queue *q) | 356 | void blk_cleanup_queue(struct request_queue *q) |
356 | { | 357 | { |
@@ -367,10 +368,8 @@ void blk_cleanup_queue(struct request_queue *q) | |||
367 | queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); | 368 | queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); |
368 | mutex_unlock(&q->sysfs_lock); | 369 | mutex_unlock(&q->sysfs_lock); |
369 | 370 | ||
370 | if (q->elevator) | 371 | if (q->queue_lock != &q->__queue_lock) |
371 | elevator_exit(q->elevator); | 372 | q->queue_lock = &q->__queue_lock; |
372 | |||
373 | blk_throtl_exit(q); | ||
374 | 373 | ||
375 | blk_put_queue(q); | 374 | blk_put_queue(q); |
376 | } | 375 | } |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index e681805cdb47..60fda88c57f0 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -479,6 +479,11 @@ static void blk_release_queue(struct kobject *kobj) | |||
479 | 479 | ||
480 | blk_sync_queue(q); | 480 | blk_sync_queue(q); |
481 | 481 | ||
482 | if (q->elevator) | ||
483 | elevator_exit(q->elevator); | ||
484 | |||
485 | blk_throtl_exit(q); | ||
486 | |||
482 | if (rl->rq_pool) | 487 | if (rl->rq_pool) |
483 | mempool_destroy(rl->rq_pool); | 488 | mempool_destroy(rl->rq_pool); |
484 | 489 | ||
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c index be4425616931..7835b8fc94db 100644 --- a/crypto/ghash-generic.c +++ b/crypto/ghash-generic.c | |||
@@ -67,6 +67,9 @@ static int ghash_update(struct shash_desc *desc, | |||
67 | struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); | 67 | struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); |
68 | u8 *dst = dctx->buffer; | 68 | u8 *dst = dctx->buffer; |
69 | 69 | ||
70 | if (!ctx->gf128) | ||
71 | return -ENOKEY; | ||
72 | |||
70 | if (dctx->bytes) { | 73 | if (dctx->bytes) { |
71 | int n = min(srclen, dctx->bytes); | 74 | int n = min(srclen, dctx->bytes); |
72 | u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes); | 75 | u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes); |
@@ -119,6 +122,9 @@ static int ghash_final(struct shash_desc *desc, u8 *dst) | |||
119 | struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); | 122 | struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); |
120 | u8 *buf = dctx->buffer; | 123 | u8 *buf = dctx->buffer; |
121 | 124 | ||
125 | if (!ctx->gf128) | ||
126 | return -ENOKEY; | ||
127 | |||
122 | ghash_flush(ctx, dctx); | 128 | ghash_flush(ctx, dctx); |
123 | memcpy(dst, buf, GHASH_BLOCK_SIZE); | 129 | memcpy(dst, buf, GHASH_BLOCK_SIZE); |
124 | 130 | ||
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index 0599854e2217..118ec12d2d5f 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c | |||
@@ -34,8 +34,8 @@ struct gpio_bank { | |||
34 | u16 irq; | 34 | u16 irq; |
35 | u16 virtual_irq_start; | 35 | u16 virtual_irq_start; |
36 | int method; | 36 | int method; |
37 | #if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS) | ||
38 | u32 suspend_wakeup; | 37 | u32 suspend_wakeup; |
38 | #if defined(CONFIG_ARCH_OMAP16XX) || defined(CONFIG_ARCH_OMAP2PLUS) | ||
39 | u32 saved_wakeup; | 39 | u32 saved_wakeup; |
40 | #endif | 40 | #endif |
41 | u32 non_wakeup_gpios; | 41 | u32 non_wakeup_gpios; |
diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index c43b8ff626a7..0550dcb85814 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c | |||
@@ -577,6 +577,7 @@ pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert) | |||
577 | void | 577 | void |
578 | pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert) | 578 | pca953x_get_alt_pdata(struct i2c_client *client, int *gpio_base, int *invert) |
579 | { | 579 | { |
580 | *gpio_base = -1; | ||
580 | } | 581 | } |
581 | #endif | 582 | #endif |
582 | 583 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index ce045a8cf82c..f07e4252b708 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -67,11 +67,11 @@ module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); | |||
67 | MODULE_PARM_DESC(i915_enable_rc6, | 67 | MODULE_PARM_DESC(i915_enable_rc6, |
68 | "Enable power-saving render C-state 6 (default: true)"); | 68 | "Enable power-saving render C-state 6 (default: true)"); |
69 | 69 | ||
70 | unsigned int i915_enable_fbc __read_mostly = 1; | 70 | unsigned int i915_enable_fbc __read_mostly = -1; |
71 | module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); | 71 | module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); |
72 | MODULE_PARM_DESC(i915_enable_fbc, | 72 | MODULE_PARM_DESC(i915_enable_fbc, |
73 | "Enable frame buffer compression for power savings " | 73 | "Enable frame buffer compression for power savings " |
74 | "(default: false)"); | 74 | "(default: -1 (use per-chip default))"); |
75 | 75 | ||
76 | unsigned int i915_lvds_downclock __read_mostly = 0; | 76 | unsigned int i915_lvds_downclock __read_mostly = 0; |
77 | module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); | 77 | module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 56a8554d9039..04411ad2e779 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -1799,6 +1799,7 @@ static void intel_update_fbc(struct drm_device *dev) | |||
1799 | struct drm_framebuffer *fb; | 1799 | struct drm_framebuffer *fb; |
1800 | struct intel_framebuffer *intel_fb; | 1800 | struct intel_framebuffer *intel_fb; |
1801 | struct drm_i915_gem_object *obj; | 1801 | struct drm_i915_gem_object *obj; |
1802 | int enable_fbc; | ||
1802 | 1803 | ||
1803 | DRM_DEBUG_KMS("\n"); | 1804 | DRM_DEBUG_KMS("\n"); |
1804 | 1805 | ||
@@ -1839,8 +1840,15 @@ static void intel_update_fbc(struct drm_device *dev) | |||
1839 | intel_fb = to_intel_framebuffer(fb); | 1840 | intel_fb = to_intel_framebuffer(fb); |
1840 | obj = intel_fb->obj; | 1841 | obj = intel_fb->obj; |
1841 | 1842 | ||
1842 | if (!i915_enable_fbc) { | 1843 | enable_fbc = i915_enable_fbc; |
1843 | DRM_DEBUG_KMS("fbc disabled per module param (default off)\n"); | 1844 | if (enable_fbc < 0) { |
1845 | DRM_DEBUG_KMS("fbc set to per-chip default\n"); | ||
1846 | enable_fbc = 1; | ||
1847 | if (INTEL_INFO(dev)->gen <= 5) | ||
1848 | enable_fbc = 0; | ||
1849 | } | ||
1850 | if (!enable_fbc) { | ||
1851 | DRM_DEBUG_KMS("fbc disabled per module param\n"); | ||
1844 | dev_priv->no_fbc_reason = FBC_MODULE_PARAM; | 1852 | dev_priv->no_fbc_reason = FBC_MODULE_PARAM; |
1845 | goto out_disable; | 1853 | goto out_disable; |
1846 | } | 1854 | } |
@@ -4687,13 +4695,13 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, | |||
4687 | bpc = 6; /* min is 18bpp */ | 4695 | bpc = 6; /* min is 18bpp */ |
4688 | break; | 4696 | break; |
4689 | case 24: | 4697 | case 24: |
4690 | bpc = min((unsigned int)8, display_bpc); | 4698 | bpc = 8; |
4691 | break; | 4699 | break; |
4692 | case 30: | 4700 | case 30: |
4693 | bpc = min((unsigned int)10, display_bpc); | 4701 | bpc = 10; |
4694 | break; | 4702 | break; |
4695 | case 48: | 4703 | case 48: |
4696 | bpc = min((unsigned int)12, display_bpc); | 4704 | bpc = 12; |
4697 | break; | 4705 | break; |
4698 | default: | 4706 | default: |
4699 | DRM_DEBUG("unsupported depth, assuming 24 bits\n"); | 4707 | DRM_DEBUG("unsupported depth, assuming 24 bits\n"); |
@@ -4701,10 +4709,12 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, | |||
4701 | break; | 4709 | break; |
4702 | } | 4710 | } |
4703 | 4711 | ||
4712 | display_bpc = min(display_bpc, bpc); | ||
4713 | |||
4704 | DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n", | 4714 | DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n", |
4705 | bpc, display_bpc); | 4715 | bpc, display_bpc); |
4706 | 4716 | ||
4707 | *pipe_bpp = bpc * 3; | 4717 | *pipe_bpp = display_bpc * 3; |
4708 | 4718 | ||
4709 | return display_bpc != bpc; | 4719 | return display_bpc != bpc; |
4710 | } | 4720 | } |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 0b2ee9d39980..fe1099d8817e 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -337,9 +337,6 @@ extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, | |||
337 | struct drm_connector *connector, | 337 | struct drm_connector *connector, |
338 | struct intel_load_detect_pipe *old); | 338 | struct intel_load_detect_pipe *old); |
339 | 339 | ||
340 | extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB); | ||
341 | extern int intel_sdvo_supports_hotplug(struct drm_connector *connector); | ||
342 | extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable); | ||
343 | extern void intelfb_restore(void); | 340 | extern void intelfb_restore(void); |
344 | extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | 341 | extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, |
345 | u16 blue, int regno); | 342 | u16 blue, int regno); |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 30fe554d8936..6348c499616f 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -92,6 +92,11 @@ struct intel_sdvo { | |||
92 | */ | 92 | */ |
93 | uint16_t attached_output; | 93 | uint16_t attached_output; |
94 | 94 | ||
95 | /* | ||
96 | * Hotplug activation bits for this device | ||
97 | */ | ||
98 | uint8_t hotplug_active[2]; | ||
99 | |||
95 | /** | 100 | /** |
96 | * This is used to select the color range of RBG outputs in HDMI mode. | 101 | * This is used to select the color range of RBG outputs in HDMI mode. |
97 | * It is only valid when using TMDS encoding and 8 bit per color mode. | 102 | * It is only valid when using TMDS encoding and 8 bit per color mode. |
@@ -1208,74 +1213,20 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in | |||
1208 | return true; | 1213 | return true; |
1209 | } | 1214 | } |
1210 | 1215 | ||
1211 | /* No use! */ | 1216 | static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo) |
1212 | #if 0 | ||
1213 | struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB) | ||
1214 | { | ||
1215 | struct drm_connector *connector = NULL; | ||
1216 | struct intel_sdvo *iout = NULL; | ||
1217 | struct intel_sdvo *sdvo; | ||
1218 | |||
1219 | /* find the sdvo connector */ | ||
1220 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
1221 | iout = to_intel_sdvo(connector); | ||
1222 | |||
1223 | if (iout->type != INTEL_OUTPUT_SDVO) | ||
1224 | continue; | ||
1225 | |||
1226 | sdvo = iout->dev_priv; | ||
1227 | |||
1228 | if (sdvo->sdvo_reg == SDVOB && sdvoB) | ||
1229 | return connector; | ||
1230 | |||
1231 | if (sdvo->sdvo_reg == SDVOC && !sdvoB) | ||
1232 | return connector; | ||
1233 | |||
1234 | } | ||
1235 | |||
1236 | return NULL; | ||
1237 | } | ||
1238 | |||
1239 | int intel_sdvo_supports_hotplug(struct drm_connector *connector) | ||
1240 | { | 1217 | { |
1241 | u8 response[2]; | 1218 | u8 response[2]; |
1242 | u8 status; | ||
1243 | struct intel_sdvo *intel_sdvo; | ||
1244 | DRM_DEBUG_KMS("\n"); | ||
1245 | |||
1246 | if (!connector) | ||
1247 | return 0; | ||
1248 | |||
1249 | intel_sdvo = to_intel_sdvo(connector); | ||
1250 | 1219 | ||
1251 | return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, | 1220 | return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, |
1252 | &response, 2) && response[0]; | 1221 | &response, 2) && response[0]; |
1253 | } | 1222 | } |
1254 | 1223 | ||
1255 | void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) | 1224 | static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder) |
1256 | { | 1225 | { |
1257 | u8 response[2]; | 1226 | struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); |
1258 | u8 status; | ||
1259 | struct intel_sdvo *intel_sdvo = to_intel_sdvo(connector); | ||
1260 | |||
1261 | intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); | ||
1262 | intel_sdvo_read_response(intel_sdvo, &response, 2); | ||
1263 | |||
1264 | if (on) { | ||
1265 | intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); | ||
1266 | status = intel_sdvo_read_response(intel_sdvo, &response, 2); | ||
1267 | |||
1268 | intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); | ||
1269 | } else { | ||
1270 | response[0] = 0; | ||
1271 | response[1] = 0; | ||
1272 | intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); | ||
1273 | } | ||
1274 | 1227 | ||
1275 | intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); | 1228 | intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2); |
1276 | intel_sdvo_read_response(intel_sdvo, &response, 2); | ||
1277 | } | 1229 | } |
1278 | #endif | ||
1279 | 1230 | ||
1280 | static bool | 1231 | static bool |
1281 | intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) | 1232 | intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) |
@@ -2045,6 +1996,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) | |||
2045 | { | 1996 | { |
2046 | struct drm_encoder *encoder = &intel_sdvo->base.base; | 1997 | struct drm_encoder *encoder = &intel_sdvo->base.base; |
2047 | struct drm_connector *connector; | 1998 | struct drm_connector *connector; |
1999 | struct intel_encoder *intel_encoder = to_intel_encoder(encoder); | ||
2048 | struct intel_connector *intel_connector; | 2000 | struct intel_connector *intel_connector; |
2049 | struct intel_sdvo_connector *intel_sdvo_connector; | 2001 | struct intel_sdvo_connector *intel_sdvo_connector; |
2050 | 2002 | ||
@@ -2062,7 +2014,17 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) | |||
2062 | 2014 | ||
2063 | intel_connector = &intel_sdvo_connector->base; | 2015 | intel_connector = &intel_sdvo_connector->base; |
2064 | connector = &intel_connector->base; | 2016 | connector = &intel_connector->base; |
2065 | connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; | 2017 | if (intel_sdvo_supports_hotplug(intel_sdvo) & (1 << device)) { |
2018 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
2019 | intel_sdvo->hotplug_active[0] |= 1 << device; | ||
2020 | /* Some SDVO devices have one-shot hotplug interrupts. | ||
2021 | * Ensure that they get re-enabled when an interrupt happens. | ||
2022 | */ | ||
2023 | intel_encoder->hot_plug = intel_sdvo_enable_hotplug; | ||
2024 | intel_sdvo_enable_hotplug(intel_encoder); | ||
2025 | } | ||
2026 | else | ||
2027 | connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; | ||
2066 | encoder->encoder_type = DRM_MODE_ENCODER_TMDS; | 2028 | encoder->encoder_type = DRM_MODE_ENCODER_TMDS; |
2067 | connector->connector_type = DRM_MODE_CONNECTOR_DVID; | 2029 | connector->connector_type = DRM_MODE_CONNECTOR_DVID; |
2068 | 2030 | ||
@@ -2569,6 +2531,14 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) | |||
2569 | if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) | 2531 | if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) |
2570 | goto err; | 2532 | goto err; |
2571 | 2533 | ||
2534 | /* Set up hotplug command - note paranoia about contents of reply. | ||
2535 | * We assume that the hardware is in a sane state, and only touch | ||
2536 | * the bits we think we understand. | ||
2537 | */ | ||
2538 | intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, | ||
2539 | &intel_sdvo->hotplug_active, 2); | ||
2540 | intel_sdvo->hotplug_active[0] &= ~0x3; | ||
2541 | |||
2572 | if (intel_sdvo_output_setup(intel_sdvo, | 2542 | if (intel_sdvo_output_setup(intel_sdvo, |
2573 | intel_sdvo->caps.output_flags) != true) { | 2543 | intel_sdvo->caps.output_flags) != true) { |
2574 | DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", | 2544 | DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", |
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index e88c64417a8a..14cc88aaf3a7 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c | |||
@@ -277,7 +277,12 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr, | |||
277 | case ATOM_ARG_FB: | 277 | case ATOM_ARG_FB: |
278 | idx = U8(*ptr); | 278 | idx = U8(*ptr); |
279 | (*ptr)++; | 279 | (*ptr)++; |
280 | val = gctx->scratch[((gctx->fb_base + idx) / 4)]; | 280 | if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) { |
281 | DRM_ERROR("ATOM: fb read beyond scratch region: %d vs. %d\n", | ||
282 | gctx->fb_base + (idx * 4), gctx->scratch_size_bytes); | ||
283 | val = 0; | ||
284 | } else | ||
285 | val = gctx->scratch[(gctx->fb_base / 4) + idx]; | ||
281 | if (print) | 286 | if (print) |
282 | DEBUG("FB[0x%02X]", idx); | 287 | DEBUG("FB[0x%02X]", idx); |
283 | break; | 288 | break; |
@@ -531,7 +536,11 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr, | |||
531 | case ATOM_ARG_FB: | 536 | case ATOM_ARG_FB: |
532 | idx = U8(*ptr); | 537 | idx = U8(*ptr); |
533 | (*ptr)++; | 538 | (*ptr)++; |
534 | gctx->scratch[((gctx->fb_base + idx) / 4)] = val; | 539 | if ((gctx->fb_base + (idx * 4)) > gctx->scratch_size_bytes) { |
540 | DRM_ERROR("ATOM: fb write beyond scratch region: %d vs. %d\n", | ||
541 | gctx->fb_base + (idx * 4), gctx->scratch_size_bytes); | ||
542 | } else | ||
543 | gctx->scratch[(gctx->fb_base / 4) + idx] = val; | ||
535 | DEBUG("FB[0x%02X]", idx); | 544 | DEBUG("FB[0x%02X]", idx); |
536 | break; | 545 | break; |
537 | case ATOM_ARG_PLL: | 546 | case ATOM_ARG_PLL: |
@@ -1370,11 +1379,13 @@ int atom_allocate_fb_scratch(struct atom_context *ctx) | |||
1370 | 1379 | ||
1371 | usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024; | 1380 | usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024; |
1372 | } | 1381 | } |
1382 | ctx->scratch_size_bytes = 0; | ||
1373 | if (usage_bytes == 0) | 1383 | if (usage_bytes == 0) |
1374 | usage_bytes = 20 * 1024; | 1384 | usage_bytes = 20 * 1024; |
1375 | /* allocate some scratch memory */ | 1385 | /* allocate some scratch memory */ |
1376 | ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL); | 1386 | ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL); |
1377 | if (!ctx->scratch) | 1387 | if (!ctx->scratch) |
1378 | return -ENOMEM; | 1388 | return -ENOMEM; |
1389 | ctx->scratch_size_bytes = usage_bytes; | ||
1379 | return 0; | 1390 | return 0; |
1380 | } | 1391 | } |
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h index a589a55b223e..93cfe2086ba0 100644 --- a/drivers/gpu/drm/radeon/atom.h +++ b/drivers/gpu/drm/radeon/atom.h | |||
@@ -137,6 +137,7 @@ struct atom_context { | |||
137 | int cs_equal, cs_above; | 137 | int cs_equal, cs_above; |
138 | int io_mode; | 138 | int io_mode; |
139 | uint32_t *scratch; | 139 | uint32_t *scratch; |
140 | int scratch_size_bytes; | ||
140 | }; | 141 | }; |
141 | 142 | ||
142 | extern int atom_debug; | 143 | extern int atom_debug; |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index c742944d3805..a515b2a09d85 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -466,7 +466,7 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc, | |||
466 | return; | 466 | return; |
467 | } | 467 | } |
468 | args.v2.ucEnable = enable; | 468 | args.v2.ucEnable = enable; |
469 | if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK)) | 469 | if ((ss->percentage == 0) || (ss->type & ATOM_EXTERNAL_SS_MASK) || ASIC_IS_DCE41(rdev)) |
470 | args.v2.ucEnable = ATOM_DISABLE; | 470 | args.v2.ucEnable = ATOM_DISABLE; |
471 | } else if (ASIC_IS_DCE3(rdev)) { | 471 | } else if (ASIC_IS_DCE3(rdev)) { |
472 | args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); | 472 | args.v1.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage); |
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 7ad43c6b1db7..79e8ebc05307 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
@@ -115,6 +115,7 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, | |||
115 | u8 msg[20]; | 115 | u8 msg[20]; |
116 | int msg_bytes = send_bytes + 4; | 116 | int msg_bytes = send_bytes + 4; |
117 | u8 ack; | 117 | u8 ack; |
118 | unsigned retry; | ||
118 | 119 | ||
119 | if (send_bytes > 16) | 120 | if (send_bytes > 16) |
120 | return -1; | 121 | return -1; |
@@ -125,20 +126,22 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, | |||
125 | msg[3] = (msg_bytes << 4) | (send_bytes - 1); | 126 | msg[3] = (msg_bytes << 4) | (send_bytes - 1); |
126 | memcpy(&msg[4], send, send_bytes); | 127 | memcpy(&msg[4], send, send_bytes); |
127 | 128 | ||
128 | while (1) { | 129 | for (retry = 0; retry < 4; retry++) { |
129 | ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, | 130 | ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, |
130 | msg, msg_bytes, NULL, 0, delay, &ack); | 131 | msg, msg_bytes, NULL, 0, delay, &ack); |
131 | if (ret < 0) | 132 | if (ret == -EBUSY) |
133 | continue; | ||
134 | else if (ret < 0) | ||
132 | return ret; | 135 | return ret; |
133 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) | 136 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) |
134 | break; | 137 | return send_bytes; |
135 | else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) | 138 | else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) |
136 | udelay(400); | 139 | udelay(400); |
137 | else | 140 | else |
138 | return -EIO; | 141 | return -EIO; |
139 | } | 142 | } |
140 | 143 | ||
141 | return send_bytes; | 144 | return -EIO; |
142 | } | 145 | } |
143 | 146 | ||
144 | static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, | 147 | static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, |
@@ -149,26 +152,31 @@ static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, | |||
149 | int msg_bytes = 4; | 152 | int msg_bytes = 4; |
150 | u8 ack; | 153 | u8 ack; |
151 | int ret; | 154 | int ret; |
155 | unsigned retry; | ||
152 | 156 | ||
153 | msg[0] = address; | 157 | msg[0] = address; |
154 | msg[1] = address >> 8; | 158 | msg[1] = address >> 8; |
155 | msg[2] = AUX_NATIVE_READ << 4; | 159 | msg[2] = AUX_NATIVE_READ << 4; |
156 | msg[3] = (msg_bytes << 4) | (recv_bytes - 1); | 160 | msg[3] = (msg_bytes << 4) | (recv_bytes - 1); |
157 | 161 | ||
158 | while (1) { | 162 | for (retry = 0; retry < 4; retry++) { |
159 | ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, | 163 | ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, |
160 | msg, msg_bytes, recv, recv_bytes, delay, &ack); | 164 | msg, msg_bytes, recv, recv_bytes, delay, &ack); |
161 | if (ret == 0) | 165 | if (ret == -EBUSY) |
162 | return -EPROTO; | 166 | continue; |
163 | if (ret < 0) | 167 | else if (ret < 0) |
164 | return ret; | 168 | return ret; |
165 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) | 169 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) |
166 | return ret; | 170 | return ret; |
167 | else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) | 171 | else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) |
168 | udelay(400); | 172 | udelay(400); |
173 | else if (ret == 0) | ||
174 | return -EPROTO; | ||
169 | else | 175 | else |
170 | return -EIO; | 176 | return -EIO; |
171 | } | 177 | } |
178 | |||
179 | return -EIO; | ||
172 | } | 180 | } |
173 | 181 | ||
174 | static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector, | 182 | static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector, |
@@ -232,7 +240,9 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
232 | for (retry = 0; retry < 4; retry++) { | 240 | for (retry = 0; retry < 4; retry++) { |
233 | ret = radeon_process_aux_ch(auxch, | 241 | ret = radeon_process_aux_ch(auxch, |
234 | msg, msg_bytes, reply, reply_bytes, 0, &ack); | 242 | msg, msg_bytes, reply, reply_bytes, 0, &ack); |
235 | if (ret < 0) { | 243 | if (ret == -EBUSY) |
244 | continue; | ||
245 | else if (ret < 0) { | ||
236 | DRM_DEBUG_KMS("aux_ch failed %d\n", ret); | 246 | DRM_DEBUG_KMS("aux_ch failed %d\n", ret); |
237 | return ret; | 247 | return ret; |
238 | } | 248 | } |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index e8a746712b5b..c4ffa14fb2f4 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -1590,48 +1590,6 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev, | |||
1590 | return backend_map; | 1590 | return backend_map; |
1591 | } | 1591 | } |
1592 | 1592 | ||
1593 | static void evergreen_program_channel_remap(struct radeon_device *rdev) | ||
1594 | { | ||
1595 | u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp; | ||
1596 | |||
1597 | tmp = RREG32(MC_SHARED_CHMAP); | ||
1598 | switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { | ||
1599 | case 0: | ||
1600 | case 1: | ||
1601 | case 2: | ||
1602 | case 3: | ||
1603 | default: | ||
1604 | /* default mapping */ | ||
1605 | mc_shared_chremap = 0x00fac688; | ||
1606 | break; | ||
1607 | } | ||
1608 | |||
1609 | switch (rdev->family) { | ||
1610 | case CHIP_HEMLOCK: | ||
1611 | case CHIP_CYPRESS: | ||
1612 | case CHIP_BARTS: | ||
1613 | tcp_chan_steer_lo = 0x54763210; | ||
1614 | tcp_chan_steer_hi = 0x0000ba98; | ||
1615 | break; | ||
1616 | case CHIP_JUNIPER: | ||
1617 | case CHIP_REDWOOD: | ||
1618 | case CHIP_CEDAR: | ||
1619 | case CHIP_PALM: | ||
1620 | case CHIP_SUMO: | ||
1621 | case CHIP_SUMO2: | ||
1622 | case CHIP_TURKS: | ||
1623 | case CHIP_CAICOS: | ||
1624 | default: | ||
1625 | tcp_chan_steer_lo = 0x76543210; | ||
1626 | tcp_chan_steer_hi = 0x0000ba98; | ||
1627 | break; | ||
1628 | } | ||
1629 | |||
1630 | WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo); | ||
1631 | WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi); | ||
1632 | WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); | ||
1633 | } | ||
1634 | |||
1635 | static void evergreen_gpu_init(struct radeon_device *rdev) | 1593 | static void evergreen_gpu_init(struct radeon_device *rdev) |
1636 | { | 1594 | { |
1637 | u32 cc_rb_backend_disable = 0; | 1595 | u32 cc_rb_backend_disable = 0; |
@@ -2078,8 +2036,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
2078 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); | 2036 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); |
2079 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); | 2037 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); |
2080 | 2038 | ||
2081 | evergreen_program_channel_remap(rdev); | ||
2082 | |||
2083 | num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1; | 2039 | num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1; |
2084 | grbm_gfx_index = INSTANCE_BROADCAST_WRITES; | 2040 | grbm_gfx_index = INSTANCE_BROADCAST_WRITES; |
2085 | 2041 | ||
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 99fbd793c08c..8c79ca97753d 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
@@ -569,36 +569,6 @@ static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev, | |||
569 | return backend_map; | 569 | return backend_map; |
570 | } | 570 | } |
571 | 571 | ||
572 | static void cayman_program_channel_remap(struct radeon_device *rdev) | ||
573 | { | ||
574 | u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp; | ||
575 | |||
576 | tmp = RREG32(MC_SHARED_CHMAP); | ||
577 | switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { | ||
578 | case 0: | ||
579 | case 1: | ||
580 | case 2: | ||
581 | case 3: | ||
582 | default: | ||
583 | /* default mapping */ | ||
584 | mc_shared_chremap = 0x00fac688; | ||
585 | break; | ||
586 | } | ||
587 | |||
588 | switch (rdev->family) { | ||
589 | case CHIP_CAYMAN: | ||
590 | default: | ||
591 | //tcp_chan_steer_lo = 0x54763210 | ||
592 | tcp_chan_steer_lo = 0x76543210; | ||
593 | tcp_chan_steer_hi = 0x0000ba98; | ||
594 | break; | ||
595 | } | ||
596 | |||
597 | WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo); | ||
598 | WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi); | ||
599 | WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); | ||
600 | } | ||
601 | |||
602 | static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev, | 572 | static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev, |
603 | u32 disable_mask_per_se, | 573 | u32 disable_mask_per_se, |
604 | u32 max_disable_mask_per_se, | 574 | u32 max_disable_mask_per_se, |
@@ -842,8 +812,6 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
842 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); | 812 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); |
843 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); | 813 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); |
844 | 814 | ||
845 | cayman_program_channel_remap(rdev); | ||
846 | |||
847 | /* primary versions */ | 815 | /* primary versions */ |
848 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); | 816 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); |
849 | WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); | 817 | WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index c4b8741dbf58..449c3d8c6836 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -68,11 +68,11 @@ void radeon_connector_hotplug(struct drm_connector *connector) | |||
68 | if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { | 68 | if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { |
69 | int saved_dpms = connector->dpms; | 69 | int saved_dpms = connector->dpms; |
70 | 70 | ||
71 | if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) && | 71 | /* Only turn off the display it it's physically disconnected */ |
72 | radeon_dp_needs_link_train(radeon_connector)) | 72 | if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) |
73 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); | ||
74 | else | ||
75 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); | 73 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); |
74 | else if (radeon_dp_needs_link_train(radeon_connector)) | ||
75 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); | ||
76 | connector->dpms = saved_dpms; | 76 | connector->dpms = saved_dpms; |
77 | } | 77 | } |
78 | } | 78 | } |
@@ -1303,23 +1303,14 @@ radeon_dp_detect(struct drm_connector *connector, bool force) | |||
1303 | /* get the DPCD from the bridge */ | 1303 | /* get the DPCD from the bridge */ |
1304 | radeon_dp_getdpcd(radeon_connector); | 1304 | radeon_dp_getdpcd(radeon_connector); |
1305 | 1305 | ||
1306 | if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) | 1306 | if (encoder) { |
1307 | ret = connector_status_connected; | 1307 | /* setup ddc on the bridge */ |
1308 | else { | 1308 | radeon_atom_ext_encoder_setup_ddc(encoder); |
1309 | /* need to setup ddc on the bridge */ | ||
1310 | if (encoder) | ||
1311 | radeon_atom_ext_encoder_setup_ddc(encoder); | ||
1312 | if (radeon_ddc_probe(radeon_connector, | 1309 | if (radeon_ddc_probe(radeon_connector, |
1313 | radeon_connector->requires_extended_probe)) | 1310 | radeon_connector->requires_extended_probe)) /* try DDC */ |
1314 | ret = connector_status_connected; | 1311 | ret = connector_status_connected; |
1315 | } | 1312 | else if (radeon_connector->dac_load_detect) { /* try load detection */ |
1316 | 1313 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; | |
1317 | if ((ret == connector_status_disconnected) && | ||
1318 | radeon_connector->dac_load_detect) { | ||
1319 | struct drm_encoder *encoder = radeon_best_single_encoder(connector); | ||
1320 | struct drm_encoder_helper_funcs *encoder_funcs; | ||
1321 | if (encoder) { | ||
1322 | encoder_funcs = encoder->helper_private; | ||
1323 | ret = encoder_funcs->detect(encoder, connector); | 1314 | ret = encoder_funcs->detect(encoder, connector); |
1324 | } | 1315 | } |
1325 | } | 1316 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index 3189a7efb2e9..fde25c0d65a0 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c | |||
@@ -208,23 +208,25 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, | |||
208 | int xorigin = 0, yorigin = 0; | 208 | int xorigin = 0, yorigin = 0; |
209 | int w = radeon_crtc->cursor_width; | 209 | int w = radeon_crtc->cursor_width; |
210 | 210 | ||
211 | if (x < 0) | ||
212 | xorigin = -x + 1; | ||
213 | if (y < 0) | ||
214 | yorigin = -y + 1; | ||
215 | if (xorigin >= CURSOR_WIDTH) | ||
216 | xorigin = CURSOR_WIDTH - 1; | ||
217 | if (yorigin >= CURSOR_HEIGHT) | ||
218 | yorigin = CURSOR_HEIGHT - 1; | ||
219 | |||
220 | if (ASIC_IS_AVIVO(rdev)) { | 211 | if (ASIC_IS_AVIVO(rdev)) { |
221 | int i = 0; | ||
222 | struct drm_crtc *crtc_p; | ||
223 | |||
224 | /* avivo cursor are offset into the total surface */ | 212 | /* avivo cursor are offset into the total surface */ |
225 | x += crtc->x; | 213 | x += crtc->x; |
226 | y += crtc->y; | 214 | y += crtc->y; |
227 | DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); | 215 | } |
216 | DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); | ||
217 | |||
218 | if (x < 0) { | ||
219 | xorigin = min(-x, CURSOR_WIDTH - 1); | ||
220 | x = 0; | ||
221 | } | ||
222 | if (y < 0) { | ||
223 | yorigin = min(-y, CURSOR_HEIGHT - 1); | ||
224 | y = 0; | ||
225 | } | ||
226 | |||
227 | if (ASIC_IS_AVIVO(rdev)) { | ||
228 | int i = 0; | ||
229 | struct drm_crtc *crtc_p; | ||
228 | 230 | ||
229 | /* avivo cursor image can't end on 128 pixel boundary or | 231 | /* avivo cursor image can't end on 128 pixel boundary or |
230 | * go past the end of the frame if both crtcs are enabled | 232 | * go past the end of the frame if both crtcs are enabled |
@@ -253,16 +255,12 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, | |||
253 | 255 | ||
254 | radeon_lock_cursor(crtc, true); | 256 | radeon_lock_cursor(crtc, true); |
255 | if (ASIC_IS_DCE4(rdev)) { | 257 | if (ASIC_IS_DCE4(rdev)) { |
256 | WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, | 258 | WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y); |
257 | ((xorigin ? 0 : x) << 16) | | ||
258 | (yorigin ? 0 : y)); | ||
259 | WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); | 259 | WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); |
260 | WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset, | 260 | WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset, |
261 | ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); | 261 | ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); |
262 | } else if (ASIC_IS_AVIVO(rdev)) { | 262 | } else if (ASIC_IS_AVIVO(rdev)) { |
263 | WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, | 263 | WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y); |
264 | ((xorigin ? 0 : x) << 16) | | ||
265 | (yorigin ? 0 : y)); | ||
266 | WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); | 264 | WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); |
267 | WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset, | 265 | WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset, |
268 | ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); | 266 | ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); |
@@ -276,8 +274,8 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, | |||
276 | | yorigin)); | 274 | | yorigin)); |
277 | WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset, | 275 | WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset, |
278 | (RADEON_CUR_LOCK | 276 | (RADEON_CUR_LOCK |
279 | | ((xorigin ? 0 : x) << 16) | 277 | | (x << 16) |
280 | | (yorigin ? 0 : y))); | 278 | | y)); |
281 | /* offset is from DISP(2)_BASE_ADDRESS */ | 279 | /* offset is from DISP(2)_BASE_ADDRESS */ |
282 | WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset + | 280 | WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset + |
283 | (yorigin * 256))); | 281 | (yorigin * 256))); |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 13690f3eb4a4..eb3f6dc6df83 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
@@ -1638,7 +1638,17 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder) | |||
1638 | break; | 1638 | break; |
1639 | case 2: | 1639 | case 2: |
1640 | args.v2.ucCRTC = radeon_crtc->crtc_id; | 1640 | args.v2.ucCRTC = radeon_crtc->crtc_id; |
1641 | args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); | 1641 | if (radeon_encoder_is_dp_bridge(encoder)) { |
1642 | struct drm_connector *connector = radeon_get_connector_for_encoder(encoder); | ||
1643 | |||
1644 | if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS) | ||
1645 | args.v2.ucEncodeMode = ATOM_ENCODER_MODE_LVDS; | ||
1646 | else if (connector->connector_type == DRM_MODE_CONNECTOR_VGA) | ||
1647 | args.v2.ucEncodeMode = ATOM_ENCODER_MODE_CRT; | ||
1648 | else | ||
1649 | args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); | ||
1650 | } else | ||
1651 | args.v2.ucEncodeMode = atombios_get_encoder_mode(encoder); | ||
1642 | switch (radeon_encoder->encoder_id) { | 1652 | switch (radeon_encoder->encoder_id) { |
1643 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | 1653 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
1644 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: | 1654 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1: |
@@ -1755,9 +1765,17 @@ static int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder) | |||
1755 | /* DCE4/5 */ | 1765 | /* DCE4/5 */ |
1756 | if (ASIC_IS_DCE4(rdev)) { | 1766 | if (ASIC_IS_DCE4(rdev)) { |
1757 | dig = radeon_encoder->enc_priv; | 1767 | dig = radeon_encoder->enc_priv; |
1758 | if (ASIC_IS_DCE41(rdev)) | 1768 | if (ASIC_IS_DCE41(rdev)) { |
1759 | return radeon_crtc->crtc_id; | 1769 | /* ontario follows DCE4 */ |
1760 | else { | 1770 | if (rdev->family == CHIP_PALM) { |
1771 | if (dig->linkb) | ||
1772 | return 1; | ||
1773 | else | ||
1774 | return 0; | ||
1775 | } else | ||
1776 | /* llano follows DCE3.2 */ | ||
1777 | return radeon_crtc->crtc_id; | ||
1778 | } else { | ||
1761 | switch (radeon_encoder->encoder_id) { | 1779 | switch (radeon_encoder->encoder_id) { |
1762 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: | 1780 | case ENCODER_OBJECT_ID_INTERNAL_UNIPHY: |
1763 | if (dig->linkb) | 1781 | if (dig->linkb) |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 4720d000d440..b13c2eedc321 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -536,55 +536,6 @@ static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev, | |||
536 | return backend_map; | 536 | return backend_map; |
537 | } | 537 | } |
538 | 538 | ||
539 | static void rv770_program_channel_remap(struct radeon_device *rdev) | ||
540 | { | ||
541 | u32 tcp_chan_steer, mc_shared_chremap, tmp; | ||
542 | bool force_no_swizzle; | ||
543 | |||
544 | switch (rdev->family) { | ||
545 | case CHIP_RV770: | ||
546 | case CHIP_RV730: | ||
547 | force_no_swizzle = false; | ||
548 | break; | ||
549 | case CHIP_RV710: | ||
550 | case CHIP_RV740: | ||
551 | default: | ||
552 | force_no_swizzle = true; | ||
553 | break; | ||
554 | } | ||
555 | |||
556 | tmp = RREG32(MC_SHARED_CHMAP); | ||
557 | switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { | ||
558 | case 0: | ||
559 | case 1: | ||
560 | default: | ||
561 | /* default mapping */ | ||
562 | mc_shared_chremap = 0x00fac688; | ||
563 | break; | ||
564 | case 2: | ||
565 | case 3: | ||
566 | if (force_no_swizzle) | ||
567 | mc_shared_chremap = 0x00fac688; | ||
568 | else | ||
569 | mc_shared_chremap = 0x00bbc298; | ||
570 | break; | ||
571 | } | ||
572 | |||
573 | if (rdev->family == CHIP_RV740) | ||
574 | tcp_chan_steer = 0x00ef2a60; | ||
575 | else | ||
576 | tcp_chan_steer = 0x00fac688; | ||
577 | |||
578 | /* RV770 CE has special chremap setup */ | ||
579 | if (rdev->pdev->device == 0x944e) { | ||
580 | tcp_chan_steer = 0x00b08b08; | ||
581 | mc_shared_chremap = 0x00b08b08; | ||
582 | } | ||
583 | |||
584 | WREG32(TCP_CHAN_STEER, tcp_chan_steer); | ||
585 | WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); | ||
586 | } | ||
587 | |||
588 | static void rv770_gpu_init(struct radeon_device *rdev) | 539 | static void rv770_gpu_init(struct radeon_device *rdev) |
589 | { | 540 | { |
590 | int i, j, num_qd_pipes; | 541 | int i, j, num_qd_pipes; |
@@ -785,8 +736,6 @@ static void rv770_gpu_init(struct radeon_device *rdev) | |||
785 | WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); | 736 | WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); |
786 | WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); | 737 | WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); |
787 | 738 | ||
788 | rv770_program_channel_remap(rdev); | ||
789 | |||
790 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); | 739 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); |
791 | WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); | 740 | WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); |
792 | WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); | 741 | WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); |
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index ae3c6f5dd2b7..082fcaea583f 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c | |||
@@ -321,7 +321,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, | |||
321 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; | 321 | struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; |
322 | struct ttm_tt *ttm = bo->ttm; | 322 | struct ttm_tt *ttm = bo->ttm; |
323 | struct ttm_mem_reg *old_mem = &bo->mem; | 323 | struct ttm_mem_reg *old_mem = &bo->mem; |
324 | struct ttm_mem_reg old_copy; | 324 | struct ttm_mem_reg old_copy = *old_mem; |
325 | void *old_iomap; | 325 | void *old_iomap; |
326 | void *new_iomap; | 326 | void *new_iomap; |
327 | int ret; | 327 | int ret; |
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index 44b23917d4cc..932383786642 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c | |||
@@ -377,9 +377,9 @@ exit_free: | |||
377 | } | 377 | } |
378 | 378 | ||
379 | 379 | ||
380 | static int __devinit chk_ucode_version(struct platform_device *pdev) | 380 | static int __cpuinit chk_ucode_version(unsigned int cpu) |
381 | { | 381 | { |
382 | struct cpuinfo_x86 *c = &cpu_data(pdev->id); | 382 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
383 | int err; | 383 | int err; |
384 | u32 edx; | 384 | u32 edx; |
385 | 385 | ||
@@ -390,17 +390,15 @@ static int __devinit chk_ucode_version(struct platform_device *pdev) | |||
390 | */ | 390 | */ |
391 | if (c->x86_model == 0xe && c->x86_mask < 0xc) { | 391 | if (c->x86_model == 0xe && c->x86_mask < 0xc) { |
392 | /* check for microcode update */ | 392 | /* check for microcode update */ |
393 | err = smp_call_function_single(pdev->id, get_ucode_rev_on_cpu, | 393 | err = smp_call_function_single(cpu, get_ucode_rev_on_cpu, |
394 | &edx, 1); | 394 | &edx, 1); |
395 | if (err) { | 395 | if (err) { |
396 | dev_err(&pdev->dev, | 396 | pr_err("Cannot determine microcode revision of " |
397 | "Cannot determine microcode revision of " | 397 | "CPU#%u (%d)!\n", cpu, err); |
398 | "CPU#%u (%d)!\n", pdev->id, err); | ||
399 | return -ENODEV; | 398 | return -ENODEV; |
400 | } else if (edx < 0x39) { | 399 | } else if (edx < 0x39) { |
401 | dev_err(&pdev->dev, | 400 | pr_err("Errata AE18 not fixed, update BIOS or " |
402 | "Errata AE18 not fixed, update BIOS or " | 401 | "microcode of the CPU!\n"); |
403 | "microcode of the CPU!\n"); | ||
404 | return -ENODEV; | 402 | return -ENODEV; |
405 | } | 403 | } |
406 | } | 404 | } |
@@ -508,6 +506,7 @@ static int create_core_data(struct platform_device *pdev, | |||
508 | 506 | ||
509 | return 0; | 507 | return 0; |
510 | exit_free: | 508 | exit_free: |
509 | pdata->core_data[attr_no] = NULL; | ||
511 | kfree(tdata); | 510 | kfree(tdata); |
512 | return err; | 511 | return err; |
513 | } | 512 | } |
@@ -544,11 +543,6 @@ static int __devinit coretemp_probe(struct platform_device *pdev) | |||
544 | struct platform_data *pdata; | 543 | struct platform_data *pdata; |
545 | int err; | 544 | int err; |
546 | 545 | ||
547 | /* Check the microcode version of the CPU */ | ||
548 | err = chk_ucode_version(pdev); | ||
549 | if (err) | ||
550 | return err; | ||
551 | |||
552 | /* Initialize the per-package data structures */ | 546 | /* Initialize the per-package data structures */ |
553 | pdata = kzalloc(sizeof(struct platform_data), GFP_KERNEL); | 547 | pdata = kzalloc(sizeof(struct platform_data), GFP_KERNEL); |
554 | if (!pdata) | 548 | if (!pdata) |
@@ -630,7 +624,7 @@ static int __cpuinit coretemp_device_add(unsigned int cpu) | |||
630 | } | 624 | } |
631 | 625 | ||
632 | pdev_entry->pdev = pdev; | 626 | pdev_entry->pdev = pdev; |
633 | pdev_entry->phys_proc_id = TO_PHYS_ID(cpu); | 627 | pdev_entry->phys_proc_id = pdev->id; |
634 | 628 | ||
635 | list_add_tail(&pdev_entry->list, &pdev_list); | 629 | list_add_tail(&pdev_entry->list, &pdev_list); |
636 | mutex_unlock(&pdev_list_mutex); | 630 | mutex_unlock(&pdev_list_mutex); |
@@ -691,6 +685,10 @@ static void __cpuinit get_core_online(unsigned int cpu) | |||
691 | return; | 685 | return; |
692 | 686 | ||
693 | if (!pdev) { | 687 | if (!pdev) { |
688 | /* Check the microcode version of the CPU */ | ||
689 | if (chk_ucode_version(cpu)) | ||
690 | return; | ||
691 | |||
694 | /* | 692 | /* |
695 | * Alright, we have DTS support. | 693 | * Alright, we have DTS support. |
696 | * We are bringing the _first_ core in this pkg | 694 | * We are bringing the _first_ core in this pkg |
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c index f2b377c56a3a..36d7f270b14d 100644 --- a/drivers/hwmon/w83627ehf.c +++ b/drivers/hwmon/w83627ehf.c | |||
@@ -390,7 +390,7 @@ temp_from_reg(u16 reg, s16 regval) | |||
390 | { | 390 | { |
391 | if (is_word_sized(reg)) | 391 | if (is_word_sized(reg)) |
392 | return LM75_TEMP_FROM_REG(regval); | 392 | return LM75_TEMP_FROM_REG(regval); |
393 | return regval * 1000; | 393 | return ((s8)regval) * 1000; |
394 | } | 394 | } |
395 | 395 | ||
396 | static inline u16 | 396 | static inline u16 |
@@ -398,7 +398,8 @@ temp_to_reg(u16 reg, long temp) | |||
398 | { | 398 | { |
399 | if (is_word_sized(reg)) | 399 | if (is_word_sized(reg)) |
400 | return LM75_TEMP_TO_REG(temp); | 400 | return LM75_TEMP_TO_REG(temp); |
401 | return DIV_ROUND_CLOSEST(SENSORS_LIMIT(temp, -127000, 128000), 1000); | 401 | return (s8)DIV_ROUND_CLOSEST(SENSORS_LIMIT(temp, -127000, 128000), |
402 | 1000); | ||
402 | } | 403 | } |
403 | 404 | ||
404 | /* Some of analog inputs have internal scaling (2x), 8mV is ADC LSB */ | 405 | /* Some of analog inputs have internal scaling (2x), 8mV is ADC LSB */ |
@@ -1715,7 +1716,8 @@ static void w83627ehf_device_remove_files(struct device *dev) | |||
1715 | } | 1716 | } |
1716 | 1717 | ||
1717 | /* Get the monitoring functions started */ | 1718 | /* Get the monitoring functions started */ |
1718 | static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data) | 1719 | static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data, |
1720 | enum kinds kind) | ||
1719 | { | 1721 | { |
1720 | int i; | 1722 | int i; |
1721 | u8 tmp, diode; | 1723 | u8 tmp, diode; |
@@ -1746,10 +1748,16 @@ static inline void __devinit w83627ehf_init_device(struct w83627ehf_data *data) | |||
1746 | w83627ehf_write_value(data, W83627EHF_REG_VBAT, tmp | 0x01); | 1748 | w83627ehf_write_value(data, W83627EHF_REG_VBAT, tmp | 0x01); |
1747 | 1749 | ||
1748 | /* Get thermal sensor types */ | 1750 | /* Get thermal sensor types */ |
1749 | diode = w83627ehf_read_value(data, W83627EHF_REG_DIODE); | 1751 | switch (kind) { |
1752 | case w83627ehf: | ||
1753 | diode = w83627ehf_read_value(data, W83627EHF_REG_DIODE); | ||
1754 | break; | ||
1755 | default: | ||
1756 | diode = 0x70; | ||
1757 | } | ||
1750 | for (i = 0; i < 3; i++) { | 1758 | for (i = 0; i < 3; i++) { |
1751 | if ((tmp & (0x02 << i))) | 1759 | if ((tmp & (0x02 << i))) |
1752 | data->temp_type[i] = (diode & (0x10 << i)) ? 1 : 2; | 1760 | data->temp_type[i] = (diode & (0x10 << i)) ? 1 : 3; |
1753 | else | 1761 | else |
1754 | data->temp_type[i] = 4; /* thermistor */ | 1762 | data->temp_type[i] = 4; /* thermistor */ |
1755 | } | 1763 | } |
@@ -2016,7 +2024,7 @@ static int __devinit w83627ehf_probe(struct platform_device *pdev) | |||
2016 | } | 2024 | } |
2017 | 2025 | ||
2018 | /* Initialize the chip */ | 2026 | /* Initialize the chip */ |
2019 | w83627ehf_init_device(data); | 2027 | w83627ehf_init_device(data, sio_data->kind); |
2020 | 2028 | ||
2021 | data->vrm = vid_which_vrm(); | 2029 | data->vrm = vid_which_vrm(); |
2022 | superio_enter(sio_data->sioreg); | 2030 | superio_enter(sio_data->sioreg); |
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig index 9827c5e686cb..811dbbd9306c 100644 --- a/drivers/ide/Kconfig +++ b/drivers/ide/Kconfig | |||
@@ -327,7 +327,7 @@ config BLK_DEV_OPTI621 | |||
327 | select BLK_DEV_IDEPCI | 327 | select BLK_DEV_IDEPCI |
328 | help | 328 | help |
329 | This is a driver for the OPTi 82C621 EIDE controller. | 329 | This is a driver for the OPTi 82C621 EIDE controller. |
330 | Please read the comments at the top of <file:drivers/ide/pci/opti621.c>. | 330 | Please read the comments at the top of <file:drivers/ide/opti621.c>. |
331 | 331 | ||
332 | config BLK_DEV_RZ1000 | 332 | config BLK_DEV_RZ1000 |
333 | tristate "RZ1000 chipset bugfix/support" | 333 | tristate "RZ1000 chipset bugfix/support" |
@@ -365,7 +365,7 @@ config BLK_DEV_ALI15X3 | |||
365 | normal dual channel support. | 365 | normal dual channel support. |
366 | 366 | ||
367 | Please read the comments at the top of | 367 | Please read the comments at the top of |
368 | <file:drivers/ide/pci/alim15x3.c>. | 368 | <file:drivers/ide/alim15x3.c>. |
369 | 369 | ||
370 | If unsure, say N. | 370 | If unsure, say N. |
371 | 371 | ||
@@ -528,7 +528,7 @@ config BLK_DEV_NS87415 | |||
528 | This driver adds detection and support for the NS87415 chip | 528 | This driver adds detection and support for the NS87415 chip |
529 | (used mainly on SPARC64 and PA-RISC machines). | 529 | (used mainly on SPARC64 and PA-RISC machines). |
530 | 530 | ||
531 | Please read the comments at the top of <file:drivers/ide/pci/ns87415.c>. | 531 | Please read the comments at the top of <file:drivers/ide/ns87415.c>. |
532 | 532 | ||
533 | config BLK_DEV_PDC202XX_OLD | 533 | config BLK_DEV_PDC202XX_OLD |
534 | tristate "PROMISE PDC202{46|62|65|67} support" | 534 | tristate "PROMISE PDC202{46|62|65|67} support" |
@@ -547,7 +547,7 @@ config BLK_DEV_PDC202XX_OLD | |||
547 | for more than one card. | 547 | for more than one card. |
548 | 548 | ||
549 | Please read the comments at the top of | 549 | Please read the comments at the top of |
550 | <file:drivers/ide/pci/pdc202xx_old.c>. | 550 | <file:drivers/ide/pdc202xx_old.c>. |
551 | 551 | ||
552 | If unsure, say N. | 552 | If unsure, say N. |
553 | 553 | ||
@@ -593,7 +593,7 @@ config BLK_DEV_SIS5513 | |||
593 | ATA100: SiS635, SiS645, SiS650, SiS730, SiS735, SiS740, | 593 | ATA100: SiS635, SiS645, SiS650, SiS730, SiS735, SiS740, |
594 | SiS745, SiS750 | 594 | SiS745, SiS750 |
595 | 595 | ||
596 | Please read the comments at the top of <file:drivers/ide/pci/sis5513.c>. | 596 | Please read the comments at the top of <file:drivers/ide/sis5513.c>. |
597 | 597 | ||
598 | config BLK_DEV_SL82C105 | 598 | config BLK_DEV_SL82C105 |
599 | tristate "Winbond SL82c105 support" | 599 | tristate "Winbond SL82c105 support" |
@@ -616,7 +616,7 @@ config BLK_DEV_SLC90E66 | |||
616 | look-a-like to the PIIX4 it should be a nice addition. | 616 | look-a-like to the PIIX4 it should be a nice addition. |
617 | 617 | ||
618 | Please read the comments at the top of | 618 | Please read the comments at the top of |
619 | <file:drivers/ide/pci/slc90e66.c>. | 619 | <file:drivers/ide/slc90e66.c>. |
620 | 620 | ||
621 | config BLK_DEV_TRM290 | 621 | config BLK_DEV_TRM290 |
622 | tristate "Tekram TRM290 chipset support" | 622 | tristate "Tekram TRM290 chipset support" |
@@ -625,7 +625,7 @@ config BLK_DEV_TRM290 | |||
625 | This driver adds support for bus master DMA transfers | 625 | This driver adds support for bus master DMA transfers |
626 | using the Tekram TRM290 PCI IDE chip. Volunteers are | 626 | using the Tekram TRM290 PCI IDE chip. Volunteers are |
627 | needed for further tweaking and development. | 627 | needed for further tweaking and development. |
628 | Please read the comments at the top of <file:drivers/ide/pci/trm290.c>. | 628 | Please read the comments at the top of <file:drivers/ide/trm290.c>. |
629 | 629 | ||
630 | config BLK_DEV_VIA82CXXX | 630 | config BLK_DEV_VIA82CXXX |
631 | tristate "VIA82CXXX chipset support" | 631 | tristate "VIA82CXXX chipset support" |
@@ -836,7 +836,7 @@ config BLK_DEV_ALI14XX | |||
836 | of the ALI M1439/1443/1445/1487/1489 chipsets, and permits faster | 836 | of the ALI M1439/1443/1445/1487/1489 chipsets, and permits faster |
837 | I/O speeds to be set as well. | 837 | I/O speeds to be set as well. |
838 | See the files <file:Documentation/ide/ide.txt> and | 838 | See the files <file:Documentation/ide/ide.txt> and |
839 | <file:drivers/ide/legacy/ali14xx.c> for more info. | 839 | <file:drivers/ide/ali14xx.c> for more info. |
840 | 840 | ||
841 | config BLK_DEV_DTC2278 | 841 | config BLK_DEV_DTC2278 |
842 | tristate "DTC-2278 support" | 842 | tristate "DTC-2278 support" |
@@ -847,7 +847,7 @@ config BLK_DEV_DTC2278 | |||
847 | boot parameter. It enables support for the secondary IDE interface | 847 | boot parameter. It enables support for the secondary IDE interface |
848 | of the DTC-2278 card, and permits faster I/O speeds to be set as | 848 | of the DTC-2278 card, and permits faster I/O speeds to be set as |
849 | well. See the <file:Documentation/ide/ide.txt> and | 849 | well. See the <file:Documentation/ide/ide.txt> and |
850 | <file:drivers/ide/legacy/dtc2278.c> files for more info. | 850 | <file:drivers/ide/dtc2278.c> files for more info. |
851 | 851 | ||
852 | config BLK_DEV_HT6560B | 852 | config BLK_DEV_HT6560B |
853 | tristate "Holtek HT6560B support" | 853 | tristate "Holtek HT6560B support" |
@@ -858,7 +858,7 @@ config BLK_DEV_HT6560B | |||
858 | boot parameter. It enables support for the secondary IDE interface | 858 | boot parameter. It enables support for the secondary IDE interface |
859 | of the Holtek card, and permits faster I/O speeds to be set as well. | 859 | of the Holtek card, and permits faster I/O speeds to be set as well. |
860 | See the <file:Documentation/ide/ide.txt> and | 860 | See the <file:Documentation/ide/ide.txt> and |
861 | <file:drivers/ide/legacy/ht6560b.c> files for more info. | 861 | <file:drivers/ide/ht6560b.c> files for more info. |
862 | 862 | ||
863 | config BLK_DEV_QD65XX | 863 | config BLK_DEV_QD65XX |
864 | tristate "QDI QD65xx support" | 864 | tristate "QDI QD65xx support" |
@@ -867,7 +867,7 @@ config BLK_DEV_QD65XX | |||
867 | help | 867 | help |
868 | This driver is enabled at runtime using the "qd65xx.probe" kernel | 868 | This driver is enabled at runtime using the "qd65xx.probe" kernel |
869 | boot parameter. It permits faster I/O speeds to be set. See the | 869 | boot parameter. It permits faster I/O speeds to be set. See the |
870 | <file:Documentation/ide/ide.txt> and <file:drivers/ide/legacy/qd65xx.c> | 870 | <file:Documentation/ide/ide.txt> and <file:drivers/ide/qd65xx.c> |
871 | for more info. | 871 | for more info. |
872 | 872 | ||
873 | config BLK_DEV_UMC8672 | 873 | config BLK_DEV_UMC8672 |
@@ -879,7 +879,7 @@ config BLK_DEV_UMC8672 | |||
879 | boot parameter. It enables support for the secondary IDE interface | 879 | boot parameter. It enables support for the secondary IDE interface |
880 | of the UMC-8672, and permits faster I/O speeds to be set as well. | 880 | of the UMC-8672, and permits faster I/O speeds to be set as well. |
881 | See the files <file:Documentation/ide/ide.txt> and | 881 | See the files <file:Documentation/ide/ide.txt> and |
882 | <file:drivers/ide/legacy/umc8672.c> for more info. | 882 | <file:drivers/ide/umc8672.c> for more info. |
883 | 883 | ||
884 | endif | 884 | endif |
885 | 885 | ||
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 274798068a54..16f69be820c7 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c | |||
@@ -435,7 +435,12 @@ static int idedisk_prep_fn(struct request_queue *q, struct request *rq) | |||
435 | if (!(rq->cmd_flags & REQ_FLUSH)) | 435 | if (!(rq->cmd_flags & REQ_FLUSH)) |
436 | return BLKPREP_OK; | 436 | return BLKPREP_OK; |
437 | 437 | ||
438 | cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); | 438 | if (rq->special) { |
439 | cmd = rq->special; | ||
440 | memset(cmd, 0, sizeof(*cmd)); | ||
441 | } else { | ||
442 | cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); | ||
443 | } | ||
439 | 444 | ||
440 | /* FIXME: map struct ide_taskfile on rq->cmd[] */ | 445 | /* FIXME: map struct ide_taskfile on rq->cmd[] */ |
441 | BUG_ON(cmd == NULL); | 446 | BUG_ON(cmd == NULL); |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index 17bf9d95463c..6cd642aaa4de 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c | |||
@@ -287,7 +287,7 @@ void __free_ep(struct kref *kref) | |||
287 | if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { | 287 | if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { |
288 | cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid); | 288 | cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid); |
289 | dst_release(ep->dst); | 289 | dst_release(ep->dst); |
290 | l2t_release(L2DATA(ep->com.tdev), ep->l2t); | 290 | l2t_release(ep->com.tdev, ep->l2t); |
291 | } | 291 | } |
292 | kfree(ep); | 292 | kfree(ep); |
293 | } | 293 | } |
@@ -1178,7 +1178,7 @@ static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |||
1178 | release_tid(ep->com.tdev, GET_TID(rpl), NULL); | 1178 | release_tid(ep->com.tdev, GET_TID(rpl), NULL); |
1179 | cxgb3_free_atid(ep->com.tdev, ep->atid); | 1179 | cxgb3_free_atid(ep->com.tdev, ep->atid); |
1180 | dst_release(ep->dst); | 1180 | dst_release(ep->dst); |
1181 | l2t_release(L2DATA(ep->com.tdev), ep->l2t); | 1181 | l2t_release(ep->com.tdev, ep->l2t); |
1182 | put_ep(&ep->com); | 1182 | put_ep(&ep->com); |
1183 | return CPL_RET_BUF_DONE; | 1183 | return CPL_RET_BUF_DONE; |
1184 | } | 1184 | } |
@@ -1377,7 +1377,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |||
1377 | if (!child_ep) { | 1377 | if (!child_ep) { |
1378 | printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", | 1378 | printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", |
1379 | __func__); | 1379 | __func__); |
1380 | l2t_release(L2DATA(tdev), l2t); | 1380 | l2t_release(tdev, l2t); |
1381 | dst_release(dst); | 1381 | dst_release(dst); |
1382 | goto reject; | 1382 | goto reject; |
1383 | } | 1383 | } |
@@ -1956,7 +1956,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
1956 | if (!err) | 1956 | if (!err) |
1957 | goto out; | 1957 | goto out; |
1958 | 1958 | ||
1959 | l2t_release(L2DATA(h->rdev.t3cdev_p), ep->l2t); | 1959 | l2t_release(h->rdev.t3cdev_p, ep->l2t); |
1960 | fail4: | 1960 | fail4: |
1961 | dst_release(ep->dst); | 1961 | dst_release(ep->dst); |
1962 | fail3: | 1962 | fail3: |
@@ -2127,7 +2127,7 @@ int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new, | |||
2127 | PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new, | 2127 | PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new, |
2128 | l2t); | 2128 | l2t); |
2129 | dst_hold(new); | 2129 | dst_hold(new); |
2130 | l2t_release(L2DATA(ep->com.tdev), ep->l2t); | 2130 | l2t_release(ep->com.tdev, ep->l2t); |
2131 | ep->l2t = l2t; | 2131 | ep->l2t = l2t; |
2132 | dst_release(old); | 2132 | dst_release(old); |
2133 | ep->dst = new; | 2133 | ep->dst = new; |
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c index 0dc97ec15c28..9dea71849f40 100644 --- a/drivers/input/tablet/wacom_wac.c +++ b/drivers/input/tablet/wacom_wac.c | |||
@@ -1124,11 +1124,8 @@ void wacom_setup_input_capabilities(struct input_dev *input_dev, | |||
1124 | for (i = 0; i < 8; i++) | 1124 | for (i = 0; i < 8; i++) |
1125 | __set_bit(BTN_0 + i, input_dev->keybit); | 1125 | __set_bit(BTN_0 + i, input_dev->keybit); |
1126 | 1126 | ||
1127 | if (wacom_wac->features.type != WACOM_21UX2) { | 1127 | input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0); |
1128 | input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0); | 1128 | input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0); |
1129 | input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0); | ||
1130 | } | ||
1131 | |||
1132 | input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); | 1129 | input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); |
1133 | 1130 | ||
1134 | __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); | 1131 | __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index c621c98c99da..a88f3cbb100b 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -306,6 +306,11 @@ static inline bool dma_pte_present(struct dma_pte *pte) | |||
306 | return (pte->val & 3) != 0; | 306 | return (pte->val & 3) != 0; |
307 | } | 307 | } |
308 | 308 | ||
309 | static inline bool dma_pte_superpage(struct dma_pte *pte) | ||
310 | { | ||
311 | return (pte->val & (1 << 7)); | ||
312 | } | ||
313 | |||
309 | static inline int first_pte_in_page(struct dma_pte *pte) | 314 | static inline int first_pte_in_page(struct dma_pte *pte) |
310 | { | 315 | { |
311 | return !((unsigned long)pte & ~VTD_PAGE_MASK); | 316 | return !((unsigned long)pte & ~VTD_PAGE_MASK); |
@@ -404,6 +409,9 @@ static int dmar_forcedac; | |||
404 | static int intel_iommu_strict; | 409 | static int intel_iommu_strict; |
405 | static int intel_iommu_superpage = 1; | 410 | static int intel_iommu_superpage = 1; |
406 | 411 | ||
412 | int intel_iommu_gfx_mapped; | ||
413 | EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); | ||
414 | |||
407 | #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1)) | 415 | #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1)) |
408 | static DEFINE_SPINLOCK(device_domain_lock); | 416 | static DEFINE_SPINLOCK(device_domain_lock); |
409 | static LIST_HEAD(device_domain_list); | 417 | static LIST_HEAD(device_domain_list); |
@@ -577,17 +585,18 @@ static void domain_update_iommu_snooping(struct dmar_domain *domain) | |||
577 | 585 | ||
578 | static void domain_update_iommu_superpage(struct dmar_domain *domain) | 586 | static void domain_update_iommu_superpage(struct dmar_domain *domain) |
579 | { | 587 | { |
580 | int i, mask = 0xf; | 588 | struct dmar_drhd_unit *drhd; |
589 | struct intel_iommu *iommu = NULL; | ||
590 | int mask = 0xf; | ||
581 | 591 | ||
582 | if (!intel_iommu_superpage) { | 592 | if (!intel_iommu_superpage) { |
583 | domain->iommu_superpage = 0; | 593 | domain->iommu_superpage = 0; |
584 | return; | 594 | return; |
585 | } | 595 | } |
586 | 596 | ||
587 | domain->iommu_superpage = 4; /* 1TiB */ | 597 | /* set iommu_superpage to the smallest common denominator */ |
588 | 598 | for_each_active_iommu(iommu, drhd) { | |
589 | for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) { | 599 | mask &= cap_super_page_val(iommu->cap); |
590 | mask |= cap_super_page_val(g_iommus[i]->cap); | ||
591 | if (!mask) { | 600 | if (!mask) { |
592 | break; | 601 | break; |
593 | } | 602 | } |
@@ -730,29 +739,23 @@ out: | |||
730 | } | 739 | } |
731 | 740 | ||
732 | static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, | 741 | static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, |
733 | unsigned long pfn, int large_level) | 742 | unsigned long pfn, int target_level) |
734 | { | 743 | { |
735 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; | 744 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; |
736 | struct dma_pte *parent, *pte = NULL; | 745 | struct dma_pte *parent, *pte = NULL; |
737 | int level = agaw_to_level(domain->agaw); | 746 | int level = agaw_to_level(domain->agaw); |
738 | int offset, target_level; | 747 | int offset; |
739 | 748 | ||
740 | BUG_ON(!domain->pgd); | 749 | BUG_ON(!domain->pgd); |
741 | BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width); | 750 | BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width); |
742 | parent = domain->pgd; | 751 | parent = domain->pgd; |
743 | 752 | ||
744 | /* Search pte */ | ||
745 | if (!large_level) | ||
746 | target_level = 1; | ||
747 | else | ||
748 | target_level = large_level; | ||
749 | |||
750 | while (level > 0) { | 753 | while (level > 0) { |
751 | void *tmp_page; | 754 | void *tmp_page; |
752 | 755 | ||
753 | offset = pfn_level_offset(pfn, level); | 756 | offset = pfn_level_offset(pfn, level); |
754 | pte = &parent[offset]; | 757 | pte = &parent[offset]; |
755 | if (!large_level && (pte->val & DMA_PTE_LARGE_PAGE)) | 758 | if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte))) |
756 | break; | 759 | break; |
757 | if (level == target_level) | 760 | if (level == target_level) |
758 | break; | 761 | break; |
@@ -816,13 +819,14 @@ static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain, | |||
816 | } | 819 | } |
817 | 820 | ||
818 | /* clear last level pte, a tlb flush should be followed */ | 821 | /* clear last level pte, a tlb flush should be followed */ |
819 | static void dma_pte_clear_range(struct dmar_domain *domain, | 822 | static int dma_pte_clear_range(struct dmar_domain *domain, |
820 | unsigned long start_pfn, | 823 | unsigned long start_pfn, |
821 | unsigned long last_pfn) | 824 | unsigned long last_pfn) |
822 | { | 825 | { |
823 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; | 826 | int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; |
824 | unsigned int large_page = 1; | 827 | unsigned int large_page = 1; |
825 | struct dma_pte *first_pte, *pte; | 828 | struct dma_pte *first_pte, *pte; |
829 | int order; | ||
826 | 830 | ||
827 | BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); | 831 | BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); |
828 | BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); | 832 | BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); |
@@ -846,6 +850,9 @@ static void dma_pte_clear_range(struct dmar_domain *domain, | |||
846 | (void *)pte - (void *)first_pte); | 850 | (void *)pte - (void *)first_pte); |
847 | 851 | ||
848 | } while (start_pfn && start_pfn <= last_pfn); | 852 | } while (start_pfn && start_pfn <= last_pfn); |
853 | |||
854 | order = (large_page - 1) * 9; | ||
855 | return order; | ||
849 | } | 856 | } |
850 | 857 | ||
851 | /* free page table pages. last level pte should already be cleared */ | 858 | /* free page table pages. last level pte should already be cleared */ |
@@ -3226,9 +3233,6 @@ static void __init init_no_remapping_devices(void) | |||
3226 | } | 3233 | } |
3227 | } | 3234 | } |
3228 | 3235 | ||
3229 | if (dmar_map_gfx) | ||
3230 | return; | ||
3231 | |||
3232 | for_each_drhd_unit(drhd) { | 3236 | for_each_drhd_unit(drhd) { |
3233 | int i; | 3237 | int i; |
3234 | if (drhd->ignored || drhd->include_all) | 3238 | if (drhd->ignored || drhd->include_all) |
@@ -3236,18 +3240,23 @@ static void __init init_no_remapping_devices(void) | |||
3236 | 3240 | ||
3237 | for (i = 0; i < drhd->devices_cnt; i++) | 3241 | for (i = 0; i < drhd->devices_cnt; i++) |
3238 | if (drhd->devices[i] && | 3242 | if (drhd->devices[i] && |
3239 | !IS_GFX_DEVICE(drhd->devices[i])) | 3243 | !IS_GFX_DEVICE(drhd->devices[i])) |
3240 | break; | 3244 | break; |
3241 | 3245 | ||
3242 | if (i < drhd->devices_cnt) | 3246 | if (i < drhd->devices_cnt) |
3243 | continue; | 3247 | continue; |
3244 | 3248 | ||
3245 | /* bypass IOMMU if it is just for gfx devices */ | 3249 | /* This IOMMU has *only* gfx devices. Either bypass it or |
3246 | drhd->ignored = 1; | 3250 | set the gfx_mapped flag, as appropriate */ |
3247 | for (i = 0; i < drhd->devices_cnt; i++) { | 3251 | if (dmar_map_gfx) { |
3248 | if (!drhd->devices[i]) | 3252 | intel_iommu_gfx_mapped = 1; |
3249 | continue; | 3253 | } else { |
3250 | drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; | 3254 | drhd->ignored = 1; |
3255 | for (i = 0; i < drhd->devices_cnt; i++) { | ||
3256 | if (!drhd->devices[i]) | ||
3257 | continue; | ||
3258 | drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; | ||
3259 | } | ||
3251 | } | 3260 | } |
3252 | } | 3261 | } |
3253 | } | 3262 | } |
@@ -3568,6 +3577,8 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, | |||
3568 | found = 1; | 3577 | found = 1; |
3569 | } | 3578 | } |
3570 | 3579 | ||
3580 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
3581 | |||
3571 | if (found == 0) { | 3582 | if (found == 0) { |
3572 | unsigned long tmp_flags; | 3583 | unsigned long tmp_flags; |
3573 | spin_lock_irqsave(&domain->iommu_lock, tmp_flags); | 3584 | spin_lock_irqsave(&domain->iommu_lock, tmp_flags); |
@@ -3584,8 +3595,6 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, | |||
3584 | spin_unlock_irqrestore(&iommu->lock, tmp_flags); | 3595 | spin_unlock_irqrestore(&iommu->lock, tmp_flags); |
3585 | } | 3596 | } |
3586 | } | 3597 | } |
3587 | |||
3588 | spin_unlock_irqrestore(&device_domain_lock, flags); | ||
3589 | } | 3598 | } |
3590 | 3599 | ||
3591 | static void vm_domain_remove_all_dev_info(struct dmar_domain *domain) | 3600 | static void vm_domain_remove_all_dev_info(struct dmar_domain *domain) |
@@ -3739,6 +3748,7 @@ static int intel_iommu_domain_init(struct iommu_domain *domain) | |||
3739 | vm_domain_exit(dmar_domain); | 3748 | vm_domain_exit(dmar_domain); |
3740 | return -ENOMEM; | 3749 | return -ENOMEM; |
3741 | } | 3750 | } |
3751 | domain_update_iommu_cap(dmar_domain); | ||
3742 | domain->priv = dmar_domain; | 3752 | domain->priv = dmar_domain; |
3743 | 3753 | ||
3744 | return 0; | 3754 | return 0; |
@@ -3864,14 +3874,15 @@ static int intel_iommu_unmap(struct iommu_domain *domain, | |||
3864 | { | 3874 | { |
3865 | struct dmar_domain *dmar_domain = domain->priv; | 3875 | struct dmar_domain *dmar_domain = domain->priv; |
3866 | size_t size = PAGE_SIZE << gfp_order; | 3876 | size_t size = PAGE_SIZE << gfp_order; |
3877 | int order; | ||
3867 | 3878 | ||
3868 | dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, | 3879 | order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, |
3869 | (iova + size - 1) >> VTD_PAGE_SHIFT); | 3880 | (iova + size - 1) >> VTD_PAGE_SHIFT); |
3870 | 3881 | ||
3871 | if (dmar_domain->max_addr == iova + size) | 3882 | if (dmar_domain->max_addr == iova + size) |
3872 | dmar_domain->max_addr = iova; | 3883 | dmar_domain->max_addr = iova; |
3873 | 3884 | ||
3874 | return gfp_order; | 3885 | return order; |
3875 | } | 3886 | } |
3876 | 3887 | ||
3877 | static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, | 3888 | static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, |
@@ -3950,7 +3961,11 @@ static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev) | |||
3950 | if (!(ggc & GGC_MEMORY_VT_ENABLED)) { | 3961 | if (!(ggc & GGC_MEMORY_VT_ENABLED)) { |
3951 | printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n"); | 3962 | printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n"); |
3952 | dmar_map_gfx = 0; | 3963 | dmar_map_gfx = 0; |
3953 | } | 3964 | } else if (dmar_map_gfx) { |
3965 | /* we have to ensure the gfx device is idle before we flush */ | ||
3966 | printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n"); | ||
3967 | intel_iommu_strict = 1; | ||
3968 | } | ||
3954 | } | 3969 | } |
3955 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt); | 3970 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt); |
3956 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt); | 3971 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt); |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 49da55c1528a..8c2a000cf3f5 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -1698,6 +1698,8 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |||
1698 | } | 1698 | } |
1699 | 1699 | ||
1700 | ti->num_flush_requests = 1; | 1700 | ti->num_flush_requests = 1; |
1701 | ti->discard_zeroes_data_unsupported = 1; | ||
1702 | |||
1701 | return 0; | 1703 | return 0; |
1702 | 1704 | ||
1703 | bad: | 1705 | bad: |
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index 89f73ca22cfa..f84c08029b21 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c | |||
@@ -81,8 +81,10 @@ static int parse_features(struct dm_arg_set *as, struct flakey_c *fc, | |||
81 | * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags> | 81 | * corrupt_bio_byte <Nth_byte> <direction> <value> <bio_flags> |
82 | */ | 82 | */ |
83 | if (!strcasecmp(arg_name, "corrupt_bio_byte")) { | 83 | if (!strcasecmp(arg_name, "corrupt_bio_byte")) { |
84 | if (!argc) | 84 | if (!argc) { |
85 | ti->error = "Feature corrupt_bio_byte requires parameters"; | 85 | ti->error = "Feature corrupt_bio_byte requires parameters"; |
86 | return -EINVAL; | ||
87 | } | ||
86 | 88 | ||
87 | r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error); | 89 | r = dm_read_arg(_args + 1, as, &fc->corrupt_bio_byte, &ti->error); |
88 | if (r) | 90 | if (r) |
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index f82147029636..32ac70861d66 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c | |||
@@ -628,6 +628,7 @@ void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc, | |||
628 | job->kc = kc; | 628 | job->kc = kc; |
629 | job->fn = fn; | 629 | job->fn = fn; |
630 | job->context = context; | 630 | job->context = context; |
631 | job->master_job = job; | ||
631 | 632 | ||
632 | atomic_inc(&kc->nr_jobs); | 633 | atomic_inc(&kc->nr_jobs); |
633 | 634 | ||
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index a002dd85db1e..86df8b2cf927 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c | |||
@@ -449,7 +449,7 @@ static int parse_raid_params(struct raid_set *rs, char **argv, | |||
449 | rs->ti->error = "write_mostly option is only valid for RAID1"; | 449 | rs->ti->error = "write_mostly option is only valid for RAID1"; |
450 | return -EINVAL; | 450 | return -EINVAL; |
451 | } | 451 | } |
452 | if (value > rs->md.raid_disks) { | 452 | if (value >= rs->md.raid_disks) { |
453 | rs->ti->error = "Invalid write_mostly drive index given"; | 453 | rs->ti->error = "Invalid write_mostly drive index given"; |
454 | return -EINVAL; | 454 | return -EINVAL; |
455 | } | 455 | } |
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 986b8754bb08..bc04518e9d8b 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -1238,14 +1238,15 @@ static void dm_table_set_integrity(struct dm_table *t) | |||
1238 | return; | 1238 | return; |
1239 | 1239 | ||
1240 | template_disk = dm_table_get_integrity_disk(t, true); | 1240 | template_disk = dm_table_get_integrity_disk(t, true); |
1241 | if (!template_disk && | 1241 | if (template_disk) |
1242 | blk_integrity_is_initialized(dm_disk(t->md))) { | 1242 | blk_integrity_register(dm_disk(t->md), |
1243 | blk_get_integrity(template_disk)); | ||
1244 | else if (blk_integrity_is_initialized(dm_disk(t->md))) | ||
1243 | DMWARN("%s: device no longer has a valid integrity profile", | 1245 | DMWARN("%s: device no longer has a valid integrity profile", |
1244 | dm_device_name(t->md)); | 1246 | dm_device_name(t->md)); |
1245 | return; | 1247 | else |
1246 | } | 1248 | DMWARN("%s: unable to establish an integrity profile", |
1247 | blk_integrity_register(dm_disk(t->md), | 1249 | dm_device_name(t->md)); |
1248 | blk_get_integrity(template_disk)); | ||
1249 | } | 1250 | } |
1250 | 1251 | ||
1251 | static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, | 1252 | static int device_flush_capable(struct dm_target *ti, struct dm_dev *dev, |
@@ -1282,6 +1283,22 @@ static bool dm_table_supports_flush(struct dm_table *t, unsigned flush) | |||
1282 | return 0; | 1283 | return 0; |
1283 | } | 1284 | } |
1284 | 1285 | ||
1286 | static bool dm_table_discard_zeroes_data(struct dm_table *t) | ||
1287 | { | ||
1288 | struct dm_target *ti; | ||
1289 | unsigned i = 0; | ||
1290 | |||
1291 | /* Ensure that all targets supports discard_zeroes_data. */ | ||
1292 | while (i < dm_table_get_num_targets(t)) { | ||
1293 | ti = dm_table_get_target(t, i++); | ||
1294 | |||
1295 | if (ti->discard_zeroes_data_unsupported) | ||
1296 | return 0; | ||
1297 | } | ||
1298 | |||
1299 | return 1; | ||
1300 | } | ||
1301 | |||
1285 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | 1302 | void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, |
1286 | struct queue_limits *limits) | 1303 | struct queue_limits *limits) |
1287 | { | 1304 | { |
@@ -1304,6 +1321,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, | |||
1304 | } | 1321 | } |
1305 | blk_queue_flush(q, flush); | 1322 | blk_queue_flush(q, flush); |
1306 | 1323 | ||
1324 | if (!dm_table_discard_zeroes_data(t)) | ||
1325 | q->limits.discard_zeroes_data = 0; | ||
1326 | |||
1307 | dm_table_set_integrity(t); | 1327 | dm_table_set_integrity(t); |
1308 | 1328 | ||
1309 | /* | 1329 | /* |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 5404b2295820..5c95ccb59500 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -61,6 +61,11 @@ | |||
61 | static void autostart_arrays(int part); | 61 | static void autostart_arrays(int part); |
62 | #endif | 62 | #endif |
63 | 63 | ||
64 | /* pers_list is a list of registered personalities protected | ||
65 | * by pers_lock. | ||
66 | * pers_lock does extra service to protect accesses to | ||
67 | * mddev->thread when the mutex cannot be held. | ||
68 | */ | ||
64 | static LIST_HEAD(pers_list); | 69 | static LIST_HEAD(pers_list); |
65 | static DEFINE_SPINLOCK(pers_lock); | 70 | static DEFINE_SPINLOCK(pers_lock); |
66 | 71 | ||
@@ -739,7 +744,12 @@ static void mddev_unlock(mddev_t * mddev) | |||
739 | } else | 744 | } else |
740 | mutex_unlock(&mddev->reconfig_mutex); | 745 | mutex_unlock(&mddev->reconfig_mutex); |
741 | 746 | ||
747 | /* was we've dropped the mutex we need a spinlock to | ||
748 | * make sur the thread doesn't disappear | ||
749 | */ | ||
750 | spin_lock(&pers_lock); | ||
742 | md_wakeup_thread(mddev->thread); | 751 | md_wakeup_thread(mddev->thread); |
752 | spin_unlock(&pers_lock); | ||
743 | } | 753 | } |
744 | 754 | ||
745 | static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) | 755 | static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) |
@@ -6429,11 +6439,18 @@ mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev, | |||
6429 | return thread; | 6439 | return thread; |
6430 | } | 6440 | } |
6431 | 6441 | ||
6432 | void md_unregister_thread(mdk_thread_t *thread) | 6442 | void md_unregister_thread(mdk_thread_t **threadp) |
6433 | { | 6443 | { |
6444 | mdk_thread_t *thread = *threadp; | ||
6434 | if (!thread) | 6445 | if (!thread) |
6435 | return; | 6446 | return; |
6436 | dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); | 6447 | dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); |
6448 | /* Locking ensures that mddev_unlock does not wake_up a | ||
6449 | * non-existent thread | ||
6450 | */ | ||
6451 | spin_lock(&pers_lock); | ||
6452 | *threadp = NULL; | ||
6453 | spin_unlock(&pers_lock); | ||
6437 | 6454 | ||
6438 | kthread_stop(thread->tsk); | 6455 | kthread_stop(thread->tsk); |
6439 | kfree(thread); | 6456 | kfree(thread); |
@@ -7340,8 +7357,7 @@ static void reap_sync_thread(mddev_t *mddev) | |||
7340 | mdk_rdev_t *rdev; | 7357 | mdk_rdev_t *rdev; |
7341 | 7358 | ||
7342 | /* resync has finished, collect result */ | 7359 | /* resync has finished, collect result */ |
7343 | md_unregister_thread(mddev->sync_thread); | 7360 | md_unregister_thread(&mddev->sync_thread); |
7344 | mddev->sync_thread = NULL; | ||
7345 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && | 7361 | if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && |
7346 | !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { | 7362 | !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { |
7347 | /* success...*/ | 7363 | /* success...*/ |
diff --git a/drivers/md/md.h b/drivers/md/md.h index 1e586bb4452e..0a309dc29b45 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
@@ -560,7 +560,7 @@ extern int register_md_personality(struct mdk_personality *p); | |||
560 | extern int unregister_md_personality(struct mdk_personality *p); | 560 | extern int unregister_md_personality(struct mdk_personality *p); |
561 | extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev), | 561 | extern mdk_thread_t * md_register_thread(void (*run) (mddev_t *mddev), |
562 | mddev_t *mddev, const char *name); | 562 | mddev_t *mddev, const char *name); |
563 | extern void md_unregister_thread(mdk_thread_t *thread); | 563 | extern void md_unregister_thread(mdk_thread_t **threadp); |
564 | extern void md_wakeup_thread(mdk_thread_t *thread); | 564 | extern void md_wakeup_thread(mdk_thread_t *thread); |
565 | extern void md_check_recovery(mddev_t *mddev); | 565 | extern void md_check_recovery(mddev_t *mddev); |
566 | extern void md_write_start(mddev_t *mddev, struct bio *bi); | 566 | extern void md_write_start(mddev_t *mddev, struct bio *bi); |
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 3535c23af288..d5b5fb300171 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
@@ -514,8 +514,7 @@ static int multipath_stop (mddev_t *mddev) | |||
514 | { | 514 | { |
515 | multipath_conf_t *conf = mddev->private; | 515 | multipath_conf_t *conf = mddev->private; |
516 | 516 | ||
517 | md_unregister_thread(mddev->thread); | 517 | md_unregister_thread(&mddev->thread); |
518 | mddev->thread = NULL; | ||
519 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ | 518 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ |
520 | mempool_destroy(conf->pool); | 519 | mempool_destroy(conf->pool); |
521 | kfree(conf->multipaths); | 520 | kfree(conf->multipaths); |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index f4622dd8fc59..d9587dffe533 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -2562,8 +2562,7 @@ static int stop(mddev_t *mddev) | |||
2562 | raise_barrier(conf); | 2562 | raise_barrier(conf); |
2563 | lower_barrier(conf); | 2563 | lower_barrier(conf); |
2564 | 2564 | ||
2565 | md_unregister_thread(mddev->thread); | 2565 | md_unregister_thread(&mddev->thread); |
2566 | mddev->thread = NULL; | ||
2567 | if (conf->r1bio_pool) | 2566 | if (conf->r1bio_pool) |
2568 | mempool_destroy(conf->r1bio_pool); | 2567 | mempool_destroy(conf->r1bio_pool); |
2569 | kfree(conf->mirrors); | 2568 | kfree(conf->mirrors); |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index d7a8468ddeab..0cd9672cf9cb 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -2955,7 +2955,7 @@ static int run(mddev_t *mddev) | |||
2955 | return 0; | 2955 | return 0; |
2956 | 2956 | ||
2957 | out_free_conf: | 2957 | out_free_conf: |
2958 | md_unregister_thread(mddev->thread); | 2958 | md_unregister_thread(&mddev->thread); |
2959 | if (conf->r10bio_pool) | 2959 | if (conf->r10bio_pool) |
2960 | mempool_destroy(conf->r10bio_pool); | 2960 | mempool_destroy(conf->r10bio_pool); |
2961 | safe_put_page(conf->tmppage); | 2961 | safe_put_page(conf->tmppage); |
@@ -2973,8 +2973,7 @@ static int stop(mddev_t *mddev) | |||
2973 | raise_barrier(conf, 0); | 2973 | raise_barrier(conf, 0); |
2974 | lower_barrier(conf); | 2974 | lower_barrier(conf); |
2975 | 2975 | ||
2976 | md_unregister_thread(mddev->thread); | 2976 | md_unregister_thread(&mddev->thread); |
2977 | mddev->thread = NULL; | ||
2978 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ | 2977 | blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ |
2979 | if (conf->r10bio_pool) | 2978 | if (conf->r10bio_pool) |
2980 | mempool_destroy(conf->r10bio_pool); | 2979 | mempool_destroy(conf->r10bio_pool); |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 43709fa6b6df..ac5e8b57e50f 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -4941,8 +4941,7 @@ static int run(mddev_t *mddev) | |||
4941 | 4941 | ||
4942 | return 0; | 4942 | return 0; |
4943 | abort: | 4943 | abort: |
4944 | md_unregister_thread(mddev->thread); | 4944 | md_unregister_thread(&mddev->thread); |
4945 | mddev->thread = NULL; | ||
4946 | if (conf) { | 4945 | if (conf) { |
4947 | print_raid5_conf(conf); | 4946 | print_raid5_conf(conf); |
4948 | free_conf(conf); | 4947 | free_conf(conf); |
@@ -4956,8 +4955,7 @@ static int stop(mddev_t *mddev) | |||
4956 | { | 4955 | { |
4957 | raid5_conf_t *conf = mddev->private; | 4956 | raid5_conf_t *conf = mddev->private; |
4958 | 4957 | ||
4959 | md_unregister_thread(mddev->thread); | 4958 | md_unregister_thread(&mddev->thread); |
4960 | mddev->thread = NULL; | ||
4961 | if (mddev->queue) | 4959 | if (mddev->queue) |
4962 | mddev->queue->backing_dev_info.congested_fn = NULL; | 4960 | mddev->queue->backing_dev_info.congested_fn = NULL; |
4963 | free_conf(conf); | 4961 | free_conf(conf); |
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c index b5ef36222440..b3a5ecdb33ac 100644 --- a/drivers/media/video/omap/omap_vout.c +++ b/drivers/media/video/omap/omap_vout.c | |||
@@ -2194,19 +2194,6 @@ static int __init omap_vout_probe(struct platform_device *pdev) | |||
2194 | "'%s' Display already enabled\n", | 2194 | "'%s' Display already enabled\n", |
2195 | def_display->name); | 2195 | def_display->name); |
2196 | } | 2196 | } |
2197 | /* set the update mode */ | ||
2198 | if (def_display->caps & | ||
2199 | OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) { | ||
2200 | if (dssdrv->enable_te) | ||
2201 | dssdrv->enable_te(def_display, 0); | ||
2202 | if (dssdrv->set_update_mode) | ||
2203 | dssdrv->set_update_mode(def_display, | ||
2204 | OMAP_DSS_UPDATE_MANUAL); | ||
2205 | } else { | ||
2206 | if (dssdrv->set_update_mode) | ||
2207 | dssdrv->set_update_mode(def_display, | ||
2208 | OMAP_DSS_UPDATE_AUTO); | ||
2209 | } | ||
2210 | } | 2197 | } |
2211 | } | 2198 | } |
2212 | 2199 | ||
diff --git a/drivers/media/video/omap3isp/ispccdc.c b/drivers/media/video/omap3isp/ispccdc.c index 9d3459de04b2..80796eb0c53e 100644 --- a/drivers/media/video/omap3isp/ispccdc.c +++ b/drivers/media/video/omap3isp/ispccdc.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/dma-mapping.h> | 31 | #include <linux/dma-mapping.h> |
32 | #include <linux/mm.h> | 32 | #include <linux/mm.h> |
33 | #include <linux/sched.h> | 33 | #include <linux/sched.h> |
34 | #include <linux/slab.h> | ||
34 | #include <media/v4l2-event.h> | 35 | #include <media/v4l2-event.h> |
35 | 36 | ||
36 | #include "isp.h" | 37 | #include "isp.h" |
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c index d29f9c2d0854..e4100b1f68df 100644 --- a/drivers/media/video/uvc/uvc_driver.c +++ b/drivers/media/video/uvc/uvc_driver.c | |||
@@ -1961,7 +1961,7 @@ static int __uvc_resume(struct usb_interface *intf, int reset) | |||
1961 | 1961 | ||
1962 | list_for_each_entry(stream, &dev->streams, list) { | 1962 | list_for_each_entry(stream, &dev->streams, list) { |
1963 | if (stream->intf == intf) | 1963 | if (stream->intf == intf) |
1964 | return uvc_video_resume(stream); | 1964 | return uvc_video_resume(stream, reset); |
1965 | } | 1965 | } |
1966 | 1966 | ||
1967 | uvc_trace(UVC_TRACE_SUSPEND, "Resume: video streaming USB interface " | 1967 | uvc_trace(UVC_TRACE_SUSPEND, "Resume: video streaming USB interface " |
diff --git a/drivers/media/video/uvc/uvc_entity.c b/drivers/media/video/uvc/uvc_entity.c index 48fea373c25a..29e239911d0e 100644 --- a/drivers/media/video/uvc/uvc_entity.c +++ b/drivers/media/video/uvc/uvc_entity.c | |||
@@ -49,7 +49,7 @@ static int uvc_mc_register_entity(struct uvc_video_chain *chain, | |||
49 | if (remote == NULL) | 49 | if (remote == NULL) |
50 | return -EINVAL; | 50 | return -EINVAL; |
51 | 51 | ||
52 | source = (UVC_ENTITY_TYPE(remote) != UVC_TT_STREAMING) | 52 | source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING) |
53 | ? (remote->vdev ? &remote->vdev->entity : NULL) | 53 | ? (remote->vdev ? &remote->vdev->entity : NULL) |
54 | : &remote->subdev.entity; | 54 | : &remote->subdev.entity; |
55 | if (source == NULL) | 55 | if (source == NULL) |
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c index 8244167c8915..ffd1158628b6 100644 --- a/drivers/media/video/uvc/uvc_video.c +++ b/drivers/media/video/uvc/uvc_video.c | |||
@@ -1104,10 +1104,18 @@ int uvc_video_suspend(struct uvc_streaming *stream) | |||
1104 | * buffers, making sure userspace applications are notified of the problem | 1104 | * buffers, making sure userspace applications are notified of the problem |
1105 | * instead of waiting forever. | 1105 | * instead of waiting forever. |
1106 | */ | 1106 | */ |
1107 | int uvc_video_resume(struct uvc_streaming *stream) | 1107 | int uvc_video_resume(struct uvc_streaming *stream, int reset) |
1108 | { | 1108 | { |
1109 | int ret; | 1109 | int ret; |
1110 | 1110 | ||
1111 | /* If the bus has been reset on resume, set the alternate setting to 0. | ||
1112 | * This should be the default value, but some devices crash or otherwise | ||
1113 | * misbehave if they don't receive a SET_INTERFACE request before any | ||
1114 | * other video control request. | ||
1115 | */ | ||
1116 | if (reset) | ||
1117 | usb_set_interface(stream->dev->udev, stream->intfnum, 0); | ||
1118 | |||
1111 | stream->frozen = 0; | 1119 | stream->frozen = 0; |
1112 | 1120 | ||
1113 | ret = uvc_commit_video(stream, &stream->ctrl); | 1121 | ret = uvc_commit_video(stream, &stream->ctrl); |
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h index df32a43ca86a..cbdd49bf8b67 100644 --- a/drivers/media/video/uvc/uvcvideo.h +++ b/drivers/media/video/uvc/uvcvideo.h | |||
@@ -638,7 +638,7 @@ extern void uvc_mc_cleanup_entity(struct uvc_entity *entity); | |||
638 | /* Video */ | 638 | /* Video */ |
639 | extern int uvc_video_init(struct uvc_streaming *stream); | 639 | extern int uvc_video_init(struct uvc_streaming *stream); |
640 | extern int uvc_video_suspend(struct uvc_streaming *stream); | 640 | extern int uvc_video_suspend(struct uvc_streaming *stream); |
641 | extern int uvc_video_resume(struct uvc_streaming *stream); | 641 | extern int uvc_video_resume(struct uvc_streaming *stream, int reset); |
642 | extern int uvc_video_enable(struct uvc_streaming *stream, int enable); | 642 | extern int uvc_video_enable(struct uvc_streaming *stream, int enable); |
643 | extern int uvc_probe_video(struct uvc_streaming *stream, | 643 | extern int uvc_probe_video(struct uvc_streaming *stream, |
644 | struct uvc_streaming_control *probe); | 644 | struct uvc_streaming_control *probe); |
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c index 06f14008b346..a5c9ed128b97 100644 --- a/drivers/media/video/v4l2-dev.c +++ b/drivers/media/video/v4l2-dev.c | |||
@@ -173,6 +173,17 @@ static void v4l2_device_release(struct device *cd) | |||
173 | media_device_unregister_entity(&vdev->entity); | 173 | media_device_unregister_entity(&vdev->entity); |
174 | #endif | 174 | #endif |
175 | 175 | ||
176 | /* Do not call v4l2_device_put if there is no release callback set. | ||
177 | * Drivers that have no v4l2_device release callback might free the | ||
178 | * v4l2_dev instance in the video_device release callback below, so we | ||
179 | * must perform this check here. | ||
180 | * | ||
181 | * TODO: In the long run all drivers that use v4l2_device should use the | ||
182 | * v4l2_device release callback. This check will then be unnecessary. | ||
183 | */ | ||
184 | if (v4l2_dev && v4l2_dev->release == NULL) | ||
185 | v4l2_dev = NULL; | ||
186 | |||
176 | /* Release video_device and perform other | 187 | /* Release video_device and perform other |
177 | cleanups as needed. */ | 188 | cleanups as needed. */ |
178 | vdev->release(vdev); | 189 | vdev->release(vdev); |
diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c index c72856c41434..e6a2c3b302d4 100644 --- a/drivers/media/video/v4l2-device.c +++ b/drivers/media/video/v4l2-device.c | |||
@@ -38,6 +38,7 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev) | |||
38 | mutex_init(&v4l2_dev->ioctl_lock); | 38 | mutex_init(&v4l2_dev->ioctl_lock); |
39 | v4l2_prio_init(&v4l2_dev->prio); | 39 | v4l2_prio_init(&v4l2_dev->prio); |
40 | kref_init(&v4l2_dev->ref); | 40 | kref_init(&v4l2_dev->ref); |
41 | get_device(dev); | ||
41 | v4l2_dev->dev = dev; | 42 | v4l2_dev->dev = dev; |
42 | if (dev == NULL) { | 43 | if (dev == NULL) { |
43 | /* If dev == NULL, then name must be filled in by the caller */ | 44 | /* If dev == NULL, then name must be filled in by the caller */ |
@@ -93,6 +94,7 @@ void v4l2_device_disconnect(struct v4l2_device *v4l2_dev) | |||
93 | 94 | ||
94 | if (dev_get_drvdata(v4l2_dev->dev) == v4l2_dev) | 95 | if (dev_get_drvdata(v4l2_dev->dev) == v4l2_dev) |
95 | dev_set_drvdata(v4l2_dev->dev, NULL); | 96 | dev_set_drvdata(v4l2_dev->dev, NULL); |
97 | put_device(v4l2_dev->dev); | ||
96 | v4l2_dev->dev = NULL; | 98 | v4l2_dev->dev = NULL; |
97 | } | 99 | } |
98 | EXPORT_SYMBOL_GPL(v4l2_device_disconnect); | 100 | EXPORT_SYMBOL_GPL(v4l2_device_disconnect); |
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c index 21131c7b0f1e..563654c9b19e 100644 --- a/drivers/mfd/jz4740-adc.c +++ b/drivers/mfd/jz4740-adc.c | |||
@@ -273,7 +273,7 @@ static int __devinit jz4740_adc_probe(struct platform_device *pdev) | |||
273 | ct->regs.ack = JZ_REG_ADC_STATUS; | 273 | ct->regs.ack = JZ_REG_ADC_STATUS; |
274 | ct->chip.irq_mask = irq_gc_mask_set_bit; | 274 | ct->chip.irq_mask = irq_gc_mask_set_bit; |
275 | ct->chip.irq_unmask = irq_gc_mask_clr_bit; | 275 | ct->chip.irq_unmask = irq_gc_mask_clr_bit; |
276 | ct->chip.irq_ack = irq_gc_ack; | 276 | ct->chip.irq_ack = irq_gc_ack_set_bit; |
277 | 277 | ||
278 | irq_setup_generic_chip(gc, IRQ_MSK(5), 0, 0, IRQ_NOPROBE | IRQ_LEVEL); | 278 | irq_setup_generic_chip(gc, IRQ_MSK(5), 0, 0, IRQ_NOPROBE | IRQ_LEVEL); |
279 | 279 | ||
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c index b928bc14e97b..8b51cd62d067 100644 --- a/drivers/misc/lis3lv02d/lis3lv02d.c +++ b/drivers/misc/lis3lv02d/lis3lv02d.c | |||
@@ -375,12 +375,14 @@ void lis3lv02d_poweron(struct lis3lv02d *lis3) | |||
375 | * both have been read. So the value read will always be correct. | 375 | * both have been read. So the value read will always be correct. |
376 | * Set BOOT bit to refresh factory tuning values. | 376 | * Set BOOT bit to refresh factory tuning values. |
377 | */ | 377 | */ |
378 | lis3->read(lis3, CTRL_REG2, ®); | 378 | if (lis3->pdata) { |
379 | if (lis3->whoami == WAI_12B) | 379 | lis3->read(lis3, CTRL_REG2, ®); |
380 | reg |= CTRL2_BDU | CTRL2_BOOT; | 380 | if (lis3->whoami == WAI_12B) |
381 | else | 381 | reg |= CTRL2_BDU | CTRL2_BOOT; |
382 | reg |= CTRL2_BOOT_8B; | 382 | else |
383 | lis3->write(lis3, CTRL_REG2, reg); | 383 | reg |= CTRL2_BOOT_8B; |
384 | lis3->write(lis3, CTRL_REG2, reg); | ||
385 | } | ||
384 | 386 | ||
385 | /* LIS3 power on delay is quite long */ | 387 | /* LIS3 power on delay is quite long */ |
386 | msleep(lis3->pwron_delay / lis3lv02d_get_odr()); | 388 | msleep(lis3->pwron_delay / lis3lv02d_get_odr()); |
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index e46df5331c55..9a7eb3b36cf3 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h | |||
@@ -239,13 +239,19 @@ void bnx2x_int_disable(struct bnx2x *bp); | |||
239 | * FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X | 239 | * FUNC_N_CLID_X = N * NUM_SPECIAL_CLIENTS + FUNC_0_CLID_X |
240 | * | 240 | * |
241 | */ | 241 | */ |
242 | /* iSCSI L2 */ | 242 | enum { |
243 | #define BNX2X_ISCSI_ETH_CL_ID_IDX 1 | 243 | BNX2X_ISCSI_ETH_CL_ID_IDX, |
244 | #define BNX2X_ISCSI_ETH_CID 49 | 244 | BNX2X_FCOE_ETH_CL_ID_IDX, |
245 | BNX2X_MAX_CNIC_ETH_CL_ID_IDX, | ||
246 | }; | ||
245 | 247 | ||
246 | /* FCoE L2 */ | 248 | #define BNX2X_CNIC_START_ETH_CID 48 |
247 | #define BNX2X_FCOE_ETH_CL_ID_IDX 2 | 249 | enum { |
248 | #define BNX2X_FCOE_ETH_CID 50 | 250 | /* iSCSI L2 */ |
251 | BNX2X_ISCSI_ETH_CID = BNX2X_CNIC_START_ETH_CID, | ||
252 | /* FCoE L2 */ | ||
253 | BNX2X_FCOE_ETH_CID, | ||
254 | }; | ||
249 | 255 | ||
250 | /** Additional rings budgeting */ | 256 | /** Additional rings budgeting */ |
251 | #ifdef BCM_CNIC | 257 | #ifdef BCM_CNIC |
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h index 223bfeebc597..2dc1199239d0 100644 --- a/drivers/net/bnx2x/bnx2x_cmn.h +++ b/drivers/net/bnx2x/bnx2x_cmn.h | |||
@@ -1297,7 +1297,7 @@ static inline void bnx2x_init_txdata(struct bnx2x *bp, | |||
1297 | static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) | 1297 | static inline u8 bnx2x_cnic_eth_cl_id(struct bnx2x *bp, u8 cl_idx) |
1298 | { | 1298 | { |
1299 | return bp->cnic_base_cl_id + cl_idx + | 1299 | return bp->cnic_base_cl_id + cl_idx + |
1300 | (bp->pf_num >> 1) * NON_ETH_CONTEXT_USE; | 1300 | (bp->pf_num >> 1) * BNX2X_MAX_CNIC_ETH_CL_ID_IDX; |
1301 | } | 1301 | } |
1302 | 1302 | ||
1303 | static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp) | 1303 | static inline u8 bnx2x_cnic_fw_sb_id(struct bnx2x *bp) |
diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c index a1e004a82f7a..0b4acf67e0c6 100644 --- a/drivers/net/bnx2x/bnx2x_dcb.c +++ b/drivers/net/bnx2x/bnx2x_dcb.c | |||
@@ -2120,6 +2120,7 @@ static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap) | |||
2120 | break; | 2120 | break; |
2121 | case DCB_CAP_ATTR_DCBX: | 2121 | case DCB_CAP_ATTR_DCBX: |
2122 | *cap = BNX2X_DCBX_CAPS; | 2122 | *cap = BNX2X_DCBX_CAPS; |
2123 | break; | ||
2123 | default: | 2124 | default: |
2124 | rval = -EINVAL; | 2125 | rval = -EINVAL; |
2125 | break; | 2126 | break; |
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index c027e9341a1a..15f800085bb2 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c | |||
@@ -4943,7 +4943,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) | |||
4943 | int igu_seg_id; | 4943 | int igu_seg_id; |
4944 | int port = BP_PORT(bp); | 4944 | int port = BP_PORT(bp); |
4945 | int func = BP_FUNC(bp); | 4945 | int func = BP_FUNC(bp); |
4946 | int reg_offset; | 4946 | int reg_offset, reg_offset_en5; |
4947 | u64 section; | 4947 | u64 section; |
4948 | int index; | 4948 | int index; |
4949 | struct hc_sp_status_block_data sp_sb_data; | 4949 | struct hc_sp_status_block_data sp_sb_data; |
@@ -4966,6 +4966,8 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) | |||
4966 | 4966 | ||
4967 | reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : | 4967 | reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : |
4968 | MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); | 4968 | MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); |
4969 | reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : | ||
4970 | MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0); | ||
4969 | for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { | 4971 | for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { |
4970 | int sindex; | 4972 | int sindex; |
4971 | /* take care of sig[0]..sig[4] */ | 4973 | /* take care of sig[0]..sig[4] */ |
@@ -4980,7 +4982,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) | |||
4980 | * and not 16 between the different groups | 4982 | * and not 16 between the different groups |
4981 | */ | 4983 | */ |
4982 | bp->attn_group[index].sig[4] = REG_RD(bp, | 4984 | bp->attn_group[index].sig[4] = REG_RD(bp, |
4983 | reg_offset + 0x10 + 0x4*index); | 4985 | reg_offset_en5 + 0x4*index); |
4984 | else | 4986 | else |
4985 | bp->attn_group[index].sig[4] = 0; | 4987 | bp->attn_group[index].sig[4] = 0; |
4986 | } | 4988 | } |
@@ -7625,8 +7627,11 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) | |||
7625 | u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; | 7627 | u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; |
7626 | u8 *mac_addr = bp->dev->dev_addr; | 7628 | u8 *mac_addr = bp->dev->dev_addr; |
7627 | u32 val; | 7629 | u32 val; |
7630 | u16 pmc; | ||
7631 | |||
7628 | /* The mac address is written to entries 1-4 to | 7632 | /* The mac address is written to entries 1-4 to |
7629 | preserve entry 0 which is used by the PMF */ | 7633 | * preserve entry 0 which is used by the PMF |
7634 | */ | ||
7630 | u8 entry = (BP_VN(bp) + 1)*8; | 7635 | u8 entry = (BP_VN(bp) + 1)*8; |
7631 | 7636 | ||
7632 | val = (mac_addr[0] << 8) | mac_addr[1]; | 7637 | val = (mac_addr[0] << 8) | mac_addr[1]; |
@@ -7636,6 +7641,11 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) | |||
7636 | (mac_addr[4] << 8) | mac_addr[5]; | 7641 | (mac_addr[4] << 8) | mac_addr[5]; |
7637 | EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); | 7642 | EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); |
7638 | 7643 | ||
7644 | /* Enable the PME and clear the status */ | ||
7645 | pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc); | ||
7646 | pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS; | ||
7647 | pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc); | ||
7648 | |||
7639 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; | 7649 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; |
7640 | 7650 | ||
7641 | } else | 7651 | } else |
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h index 750e8445dac4..fc7bd0f23c0b 100644 --- a/drivers/net/bnx2x/bnx2x_reg.h +++ b/drivers/net/bnx2x/bnx2x_reg.h | |||
@@ -1384,6 +1384,18 @@ | |||
1384 | Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ | 1384 | Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ |
1385 | #define MISC_REG_AEU_ENABLE4_PXP_0 0xa108 | 1385 | #define MISC_REG_AEU_ENABLE4_PXP_0 0xa108 |
1386 | #define MISC_REG_AEU_ENABLE4_PXP_1 0xa1a8 | 1386 | #define MISC_REG_AEU_ENABLE4_PXP_1 0xa1a8 |
1387 | /* [RW 32] fifth 32b for enabling the output for function 0 output0. Mapped | ||
1388 | * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC | ||
1389 | * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6] | ||
1390 | * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1 | ||
1391 | * parity; [31-10] Reserved; */ | ||
1392 | #define MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0 0xa688 | ||
1393 | /* [RW 32] Fifth 32b for enabling the output for function 1 output0. Mapped | ||
1394 | * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC | ||
1395 | * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6] | ||
1396 | * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1 | ||
1397 | * parity; [31-10] Reserved; */ | ||
1398 | #define MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 0xa6b0 | ||
1387 | /* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu | 1399 | /* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu |
1388 | 128 bit vector */ | 1400 | 128 bit vector */ |
1389 | #define MISC_REG_AEU_GENERAL_ATTN_0 0xa000 | 1401 | #define MISC_REG_AEU_GENERAL_ATTN_0 0xa000 |
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index a047eb973e3b..47b928ed08f8 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c | |||
@@ -2168,7 +2168,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work) | |||
2168 | } | 2168 | } |
2169 | 2169 | ||
2170 | re_arm: | 2170 | re_arm: |
2171 | queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks); | 2171 | if (!bond->kill_timers) |
2172 | queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks); | ||
2172 | out: | 2173 | out: |
2173 | read_unlock(&bond->lock); | 2174 | read_unlock(&bond->lock); |
2174 | } | 2175 | } |
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 7f8b20a34ee3..d4fbd2e62616 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c | |||
@@ -1440,7 +1440,8 @@ void bond_alb_monitor(struct work_struct *work) | |||
1440 | } | 1440 | } |
1441 | 1441 | ||
1442 | re_arm: | 1442 | re_arm: |
1443 | queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks); | 1443 | if (!bond->kill_timers) |
1444 | queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks); | ||
1444 | out: | 1445 | out: |
1445 | read_unlock(&bond->lock); | 1446 | read_unlock(&bond->lock); |
1446 | } | 1447 | } |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 43f2ea541088..de3d351ccb6b 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -777,6 +777,9 @@ static void bond_resend_igmp_join_requests(struct bonding *bond) | |||
777 | 777 | ||
778 | read_lock(&bond->lock); | 778 | read_lock(&bond->lock); |
779 | 779 | ||
780 | if (bond->kill_timers) | ||
781 | goto out; | ||
782 | |||
780 | /* rejoin all groups on bond device */ | 783 | /* rejoin all groups on bond device */ |
781 | __bond_resend_igmp_join_requests(bond->dev); | 784 | __bond_resend_igmp_join_requests(bond->dev); |
782 | 785 | ||
@@ -790,9 +793,9 @@ static void bond_resend_igmp_join_requests(struct bonding *bond) | |||
790 | __bond_resend_igmp_join_requests(vlan_dev); | 793 | __bond_resend_igmp_join_requests(vlan_dev); |
791 | } | 794 | } |
792 | 795 | ||
793 | if (--bond->igmp_retrans > 0) | 796 | if ((--bond->igmp_retrans > 0) && !bond->kill_timers) |
794 | queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); | 797 | queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); |
795 | 798 | out: | |
796 | read_unlock(&bond->lock); | 799 | read_unlock(&bond->lock); |
797 | } | 800 | } |
798 | 801 | ||
@@ -1432,6 +1435,8 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) | |||
1432 | struct sk_buff *skb = *pskb; | 1435 | struct sk_buff *skb = *pskb; |
1433 | struct slave *slave; | 1436 | struct slave *slave; |
1434 | struct bonding *bond; | 1437 | struct bonding *bond; |
1438 | void (*recv_probe)(struct sk_buff *, struct bonding *, | ||
1439 | struct slave *); | ||
1435 | 1440 | ||
1436 | skb = skb_share_check(skb, GFP_ATOMIC); | 1441 | skb = skb_share_check(skb, GFP_ATOMIC); |
1437 | if (unlikely(!skb)) | 1442 | if (unlikely(!skb)) |
@@ -1445,11 +1450,12 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) | |||
1445 | if (bond->params.arp_interval) | 1450 | if (bond->params.arp_interval) |
1446 | slave->dev->last_rx = jiffies; | 1451 | slave->dev->last_rx = jiffies; |
1447 | 1452 | ||
1448 | if (bond->recv_probe) { | 1453 | recv_probe = ACCESS_ONCE(bond->recv_probe); |
1454 | if (recv_probe) { | ||
1449 | struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); | 1455 | struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC); |
1450 | 1456 | ||
1451 | if (likely(nskb)) { | 1457 | if (likely(nskb)) { |
1452 | bond->recv_probe(nskb, bond, slave); | 1458 | recv_probe(nskb, bond, slave); |
1453 | dev_kfree_skb(nskb); | 1459 | dev_kfree_skb(nskb); |
1454 | } | 1460 | } |
1455 | } | 1461 | } |
@@ -2538,7 +2544,7 @@ void bond_mii_monitor(struct work_struct *work) | |||
2538 | } | 2544 | } |
2539 | 2545 | ||
2540 | re_arm: | 2546 | re_arm: |
2541 | if (bond->params.miimon) | 2547 | if (bond->params.miimon && !bond->kill_timers) |
2542 | queue_delayed_work(bond->wq, &bond->mii_work, | 2548 | queue_delayed_work(bond->wq, &bond->mii_work, |
2543 | msecs_to_jiffies(bond->params.miimon)); | 2549 | msecs_to_jiffies(bond->params.miimon)); |
2544 | out: | 2550 | out: |
@@ -2886,7 +2892,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work) | |||
2886 | } | 2892 | } |
2887 | 2893 | ||
2888 | re_arm: | 2894 | re_arm: |
2889 | if (bond->params.arp_interval) | 2895 | if (bond->params.arp_interval && !bond->kill_timers) |
2890 | queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); | 2896 | queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); |
2891 | out: | 2897 | out: |
2892 | read_unlock(&bond->lock); | 2898 | read_unlock(&bond->lock); |
@@ -3154,7 +3160,7 @@ void bond_activebackup_arp_mon(struct work_struct *work) | |||
3154 | bond_ab_arp_probe(bond); | 3160 | bond_ab_arp_probe(bond); |
3155 | 3161 | ||
3156 | re_arm: | 3162 | re_arm: |
3157 | if (bond->params.arp_interval) | 3163 | if (bond->params.arp_interval && !bond->kill_timers) |
3158 | queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); | 3164 | queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); |
3159 | out: | 3165 | out: |
3160 | read_unlock(&bond->lock); | 3166 | read_unlock(&bond->lock); |
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c index 92feac68b66e..4cc6f44c2ba2 100644 --- a/drivers/net/can/mscan/mscan.c +++ b/drivers/net/can/mscan/mscan.c | |||
@@ -261,11 +261,13 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
261 | void __iomem *data = ®s->tx.dsr1_0; | 261 | void __iomem *data = ®s->tx.dsr1_0; |
262 | u16 *payload = (u16 *)frame->data; | 262 | u16 *payload = (u16 *)frame->data; |
263 | 263 | ||
264 | /* It is safe to write into dsr[dlc+1] */ | 264 | for (i = 0; i < frame->can_dlc / 2; i++) { |
265 | for (i = 0; i < (frame->can_dlc + 1) / 2; i++) { | ||
266 | out_be16(data, *payload++); | 265 | out_be16(data, *payload++); |
267 | data += 2 + _MSCAN_RESERVED_DSR_SIZE; | 266 | data += 2 + _MSCAN_RESERVED_DSR_SIZE; |
268 | } | 267 | } |
268 | /* write remaining byte if necessary */ | ||
269 | if (frame->can_dlc & 1) | ||
270 | out_8(data, frame->data[frame->can_dlc - 1]); | ||
269 | } | 271 | } |
270 | 272 | ||
271 | out_8(®s->tx.dlr, frame->can_dlc); | 273 | out_8(®s->tx.dlr, frame->can_dlc); |
@@ -330,10 +332,13 @@ static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame) | |||
330 | void __iomem *data = ®s->rx.dsr1_0; | 332 | void __iomem *data = ®s->rx.dsr1_0; |
331 | u16 *payload = (u16 *)frame->data; | 333 | u16 *payload = (u16 *)frame->data; |
332 | 334 | ||
333 | for (i = 0; i < (frame->can_dlc + 1) / 2; i++) { | 335 | for (i = 0; i < frame->can_dlc / 2; i++) { |
334 | *payload++ = in_be16(data); | 336 | *payload++ = in_be16(data); |
335 | data += 2 + _MSCAN_RESERVED_DSR_SIZE; | 337 | data += 2 + _MSCAN_RESERVED_DSR_SIZE; |
336 | } | 338 | } |
339 | /* read remaining byte if necessary */ | ||
340 | if (frame->can_dlc & 1) | ||
341 | frame->data[frame->can_dlc - 1] = in_8(data); | ||
337 | } | 342 | } |
338 | 343 | ||
339 | out_8(®s->canrflg, MSCAN_RXF); | 344 | out_8(®s->canrflg, MSCAN_RXF); |
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c index 805076c54f1b..da5a5d9b8aff 100644 --- a/drivers/net/cxgb3/cxgb3_offload.c +++ b/drivers/net/cxgb3/cxgb3_offload.c | |||
@@ -1146,12 +1146,14 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new) | |||
1146 | if (te && te->ctx && te->client && te->client->redirect) { | 1146 | if (te && te->ctx && te->client && te->client->redirect) { |
1147 | update_tcb = te->client->redirect(te->ctx, old, new, e); | 1147 | update_tcb = te->client->redirect(te->ctx, old, new, e); |
1148 | if (update_tcb) { | 1148 | if (update_tcb) { |
1149 | rcu_read_lock(); | ||
1149 | l2t_hold(L2DATA(tdev), e); | 1150 | l2t_hold(L2DATA(tdev), e); |
1151 | rcu_read_unlock(); | ||
1150 | set_l2t_ix(tdev, tid, e); | 1152 | set_l2t_ix(tdev, tid, e); |
1151 | } | 1153 | } |
1152 | } | 1154 | } |
1153 | } | 1155 | } |
1154 | l2t_release(L2DATA(tdev), e); | 1156 | l2t_release(tdev, e); |
1155 | } | 1157 | } |
1156 | 1158 | ||
1157 | /* | 1159 | /* |
@@ -1264,7 +1266,7 @@ int cxgb3_offload_activate(struct adapter *adapter) | |||
1264 | goto out_free; | 1266 | goto out_free; |
1265 | 1267 | ||
1266 | err = -ENOMEM; | 1268 | err = -ENOMEM; |
1267 | L2DATA(dev) = t3_init_l2t(l2t_capacity); | 1269 | RCU_INIT_POINTER(dev->l2opt, t3_init_l2t(l2t_capacity)); |
1268 | if (!L2DATA(dev)) | 1270 | if (!L2DATA(dev)) |
1269 | goto out_free; | 1271 | goto out_free; |
1270 | 1272 | ||
@@ -1298,16 +1300,24 @@ int cxgb3_offload_activate(struct adapter *adapter) | |||
1298 | 1300 | ||
1299 | out_free_l2t: | 1301 | out_free_l2t: |
1300 | t3_free_l2t(L2DATA(dev)); | 1302 | t3_free_l2t(L2DATA(dev)); |
1301 | L2DATA(dev) = NULL; | 1303 | rcu_assign_pointer(dev->l2opt, NULL); |
1302 | out_free: | 1304 | out_free: |
1303 | kfree(t); | 1305 | kfree(t); |
1304 | return err; | 1306 | return err; |
1305 | } | 1307 | } |
1306 | 1308 | ||
1309 | static void clean_l2_data(struct rcu_head *head) | ||
1310 | { | ||
1311 | struct l2t_data *d = container_of(head, struct l2t_data, rcu_head); | ||
1312 | t3_free_l2t(d); | ||
1313 | } | ||
1314 | |||
1315 | |||
1307 | void cxgb3_offload_deactivate(struct adapter *adapter) | 1316 | void cxgb3_offload_deactivate(struct adapter *adapter) |
1308 | { | 1317 | { |
1309 | struct t3cdev *tdev = &adapter->tdev; | 1318 | struct t3cdev *tdev = &adapter->tdev; |
1310 | struct t3c_data *t = T3C_DATA(tdev); | 1319 | struct t3c_data *t = T3C_DATA(tdev); |
1320 | struct l2t_data *d; | ||
1311 | 1321 | ||
1312 | remove_adapter(adapter); | 1322 | remove_adapter(adapter); |
1313 | if (list_empty(&adapter_list)) | 1323 | if (list_empty(&adapter_list)) |
@@ -1315,8 +1325,11 @@ void cxgb3_offload_deactivate(struct adapter *adapter) | |||
1315 | 1325 | ||
1316 | free_tid_maps(&t->tid_maps); | 1326 | free_tid_maps(&t->tid_maps); |
1317 | T3C_DATA(tdev) = NULL; | 1327 | T3C_DATA(tdev) = NULL; |
1318 | t3_free_l2t(L2DATA(tdev)); | 1328 | rcu_read_lock(); |
1319 | L2DATA(tdev) = NULL; | 1329 | d = L2DATA(tdev); |
1330 | rcu_read_unlock(); | ||
1331 | rcu_assign_pointer(tdev->l2opt, NULL); | ||
1332 | call_rcu(&d->rcu_head, clean_l2_data); | ||
1320 | if (t->nofail_skb) | 1333 | if (t->nofail_skb) |
1321 | kfree_skb(t->nofail_skb); | 1334 | kfree_skb(t->nofail_skb); |
1322 | kfree(t); | 1335 | kfree(t); |
diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c index f452c4003253..41540978a173 100644 --- a/drivers/net/cxgb3/l2t.c +++ b/drivers/net/cxgb3/l2t.c | |||
@@ -300,14 +300,21 @@ static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh) | |||
300 | struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh, | 300 | struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh, |
301 | struct net_device *dev) | 301 | struct net_device *dev) |
302 | { | 302 | { |
303 | struct l2t_entry *e; | 303 | struct l2t_entry *e = NULL; |
304 | struct l2t_data *d = L2DATA(cdev); | 304 | struct l2t_data *d; |
305 | int hash; | ||
305 | u32 addr = *(u32 *) neigh->primary_key; | 306 | u32 addr = *(u32 *) neigh->primary_key; |
306 | int ifidx = neigh->dev->ifindex; | 307 | int ifidx = neigh->dev->ifindex; |
307 | int hash = arp_hash(addr, ifidx, d); | ||
308 | struct port_info *p = netdev_priv(dev); | 308 | struct port_info *p = netdev_priv(dev); |
309 | int smt_idx = p->port_id; | 309 | int smt_idx = p->port_id; |
310 | 310 | ||
311 | rcu_read_lock(); | ||
312 | d = L2DATA(cdev); | ||
313 | if (!d) | ||
314 | goto done_rcu; | ||
315 | |||
316 | hash = arp_hash(addr, ifidx, d); | ||
317 | |||
311 | write_lock_bh(&d->lock); | 318 | write_lock_bh(&d->lock); |
312 | for (e = d->l2tab[hash].first; e; e = e->next) | 319 | for (e = d->l2tab[hash].first; e; e = e->next) |
313 | if (e->addr == addr && e->ifindex == ifidx && | 320 | if (e->addr == addr && e->ifindex == ifidx && |
@@ -338,6 +345,8 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh, | |||
338 | } | 345 | } |
339 | done: | 346 | done: |
340 | write_unlock_bh(&d->lock); | 347 | write_unlock_bh(&d->lock); |
348 | done_rcu: | ||
349 | rcu_read_unlock(); | ||
341 | return e; | 350 | return e; |
342 | } | 351 | } |
343 | 352 | ||
diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h index 7a12d52ed4fc..c5f54796e2cb 100644 --- a/drivers/net/cxgb3/l2t.h +++ b/drivers/net/cxgb3/l2t.h | |||
@@ -76,6 +76,7 @@ struct l2t_data { | |||
76 | atomic_t nfree; /* number of free entries */ | 76 | atomic_t nfree; /* number of free entries */ |
77 | rwlock_t lock; | 77 | rwlock_t lock; |
78 | struct l2t_entry l2tab[0]; | 78 | struct l2t_entry l2tab[0]; |
79 | struct rcu_head rcu_head; /* to handle rcu cleanup */ | ||
79 | }; | 80 | }; |
80 | 81 | ||
81 | typedef void (*arp_failure_handler_func)(struct t3cdev * dev, | 82 | typedef void (*arp_failure_handler_func)(struct t3cdev * dev, |
@@ -99,7 +100,7 @@ static inline void set_arp_failure_handler(struct sk_buff *skb, | |||
99 | /* | 100 | /* |
100 | * Getting to the L2 data from an offload device. | 101 | * Getting to the L2 data from an offload device. |
101 | */ | 102 | */ |
102 | #define L2DATA(dev) ((dev)->l2opt) | 103 | #define L2DATA(cdev) (rcu_dereference((cdev)->l2opt)) |
103 | 104 | ||
104 | #define W_TCB_L2T_IX 0 | 105 | #define W_TCB_L2T_IX 0 |
105 | #define S_TCB_L2T_IX 7 | 106 | #define S_TCB_L2T_IX 7 |
@@ -126,15 +127,22 @@ static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb, | |||
126 | return t3_l2t_send_slow(dev, skb, e); | 127 | return t3_l2t_send_slow(dev, skb, e); |
127 | } | 128 | } |
128 | 129 | ||
129 | static inline void l2t_release(struct l2t_data *d, struct l2t_entry *e) | 130 | static inline void l2t_release(struct t3cdev *t, struct l2t_entry *e) |
130 | { | 131 | { |
131 | if (atomic_dec_and_test(&e->refcnt)) | 132 | struct l2t_data *d; |
133 | |||
134 | rcu_read_lock(); | ||
135 | d = L2DATA(t); | ||
136 | |||
137 | if (atomic_dec_and_test(&e->refcnt) && d) | ||
132 | t3_l2e_free(d, e); | 138 | t3_l2e_free(d, e); |
139 | |||
140 | rcu_read_unlock(); | ||
133 | } | 141 | } |
134 | 142 | ||
135 | static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) | 143 | static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) |
136 | { | 144 | { |
137 | if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ | 145 | if (d && atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ |
138 | atomic_dec(&d->nfree); | 146 | atomic_dec(&d->nfree); |
139 | } | 147 | } |
140 | 148 | ||
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c index c9957b7f17b5..b4efa292fd6f 100644 --- a/drivers/net/cxgb4/cxgb4_main.c +++ b/drivers/net/cxgb4/cxgb4_main.c | |||
@@ -3712,6 +3712,9 @@ static int __devinit init_one(struct pci_dev *pdev, | |||
3712 | setup_debugfs(adapter); | 3712 | setup_debugfs(adapter); |
3713 | } | 3713 | } |
3714 | 3714 | ||
3715 | /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ | ||
3716 | pdev->needs_freset = 1; | ||
3717 | |||
3715 | if (is_offload(adapter)) | 3718 | if (is_offload(adapter)) |
3716 | attach_ulds(adapter); | 3719 | attach_ulds(adapter); |
3717 | 3720 | ||
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index 8dd5fccef725..d393f1e764ed 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c | |||
@@ -636,8 +636,8 @@ static int ibmveth_open(struct net_device *netdev) | |||
636 | netdev_err(netdev, "unable to request irq 0x%x, rc %d\n", | 636 | netdev_err(netdev, "unable to request irq 0x%x, rc %d\n", |
637 | netdev->irq, rc); | 637 | netdev->irq, rc); |
638 | do { | 638 | do { |
639 | rc = h_free_logical_lan(adapter->vdev->unit_address); | 639 | lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); |
640 | } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); | 640 | } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY)); |
641 | 641 | ||
642 | goto err_out; | 642 | goto err_out; |
643 | } | 643 | } |
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 05172c39a0ce..376e3e94bae0 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c | |||
@@ -239,7 +239,7 @@ static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) | |||
239 | dest = macvlan_hash_lookup(port, eth->h_dest); | 239 | dest = macvlan_hash_lookup(port, eth->h_dest); |
240 | if (dest && dest->mode == MACVLAN_MODE_BRIDGE) { | 240 | if (dest && dest->mode == MACVLAN_MODE_BRIDGE) { |
241 | /* send to lowerdev first for its network taps */ | 241 | /* send to lowerdev first for its network taps */ |
242 | vlan->forward(vlan->lowerdev, skb); | 242 | dev_forward_skb(vlan->lowerdev, skb); |
243 | 243 | ||
244 | return NET_XMIT_SUCCESS; | 244 | return NET_XMIT_SUCCESS; |
245 | } | 245 | } |
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c index 6e03de034ac7..f76ab6bf3096 100644 --- a/drivers/net/mlx4/en_tx.c +++ b/drivers/net/mlx4/en_tx.c | |||
@@ -172,7 +172,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv, | |||
172 | memset(ring->buf, 0, ring->buf_size); | 172 | memset(ring->buf, 0, ring->buf_size); |
173 | 173 | ||
174 | ring->qp_state = MLX4_QP_STATE_RST; | 174 | ring->qp_state = MLX4_QP_STATE_RST; |
175 | ring->doorbell_qpn = swab32(ring->qp.qpn << 8); | 175 | ring->doorbell_qpn = ring->qp.qpn << 8; |
176 | 176 | ||
177 | mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, | 177 | mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn, |
178 | ring->cqn, &ring->context); | 178 | ring->cqn, &ring->context); |
@@ -791,7 +791,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
791 | skb_orphan(skb); | 791 | skb_orphan(skb); |
792 | 792 | ||
793 | if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) { | 793 | if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tag) { |
794 | *(u32 *) (&tx_desc->ctrl.vlan_tag) |= ring->doorbell_qpn; | 794 | *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn); |
795 | op_own |= htonl((bf_index & 0xffff) << 8); | 795 | op_own |= htonl((bf_index & 0xffff) << 8); |
796 | /* Ensure new descirptor hits memory | 796 | /* Ensure new descirptor hits memory |
797 | * before setting ownership of this descriptor to HW */ | 797 | * before setting ownership of this descriptor to HW */ |
@@ -812,7 +812,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) | |||
812 | wmb(); | 812 | wmb(); |
813 | tx_desc->ctrl.owner_opcode = op_own; | 813 | tx_desc->ctrl.owner_opcode = op_own; |
814 | wmb(); | 814 | wmb(); |
815 | writel(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL); | 815 | iowrite32be(ring->doorbell_qpn, ring->bf.uar->map + MLX4_SEND_DOORBELL); |
816 | } | 816 | } |
817 | 817 | ||
818 | /* Poll CQ here */ | 818 | /* Poll CQ here */ |
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index ed2a3977c6e7..e8882023576b 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c | |||
@@ -307,6 +307,11 @@ static ssize_t store_enabled(struct netconsole_target *nt, | |||
307 | return err; | 307 | return err; |
308 | if (enabled < 0 || enabled > 1) | 308 | if (enabled < 0 || enabled > 1) |
309 | return -EINVAL; | 309 | return -EINVAL; |
310 | if (enabled == nt->enabled) { | ||
311 | printk(KERN_INFO "netconsole: network logging has already %s\n", | ||
312 | nt->enabled ? "started" : "stopped"); | ||
313 | return -EINVAL; | ||
314 | } | ||
310 | 315 | ||
311 | if (enabled) { /* 1 */ | 316 | if (enabled) { /* 1 */ |
312 | 317 | ||
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c index 567ff10889be..b8b4ba27b0e7 100644 --- a/drivers/net/pch_gbe/pch_gbe_main.c +++ b/drivers/net/pch_gbe/pch_gbe_main.c | |||
@@ -1199,6 +1199,8 @@ static irqreturn_t pch_gbe_intr(int irq, void *data) | |||
1199 | iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR), | 1199 | iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR), |
1200 | &hw->reg->INT_EN); | 1200 | &hw->reg->INT_EN); |
1201 | pch_gbe_stop_receive(adapter); | 1201 | pch_gbe_stop_receive(adapter); |
1202 | int_st |= ioread32(&hw->reg->INT_ST); | ||
1203 | int_st = int_st & ioread32(&hw->reg->INT_EN); | ||
1202 | } | 1204 | } |
1203 | if (int_st & PCH_GBE_INT_RX_DMA_ERR) | 1205 | if (int_st & PCH_GBE_INT_RX_DMA_ERR) |
1204 | adapter->stats.intr_rx_dma_err_count++; | 1206 | adapter->stats.intr_rx_dma_err_count++; |
@@ -1218,14 +1220,11 @@ static irqreturn_t pch_gbe_intr(int irq, void *data) | |||
1218 | /* Set Pause packet */ | 1220 | /* Set Pause packet */ |
1219 | pch_gbe_mac_set_pause_packet(hw); | 1221 | pch_gbe_mac_set_pause_packet(hw); |
1220 | } | 1222 | } |
1221 | if ((int_en & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) | ||
1222 | == 0) { | ||
1223 | return IRQ_HANDLED; | ||
1224 | } | ||
1225 | } | 1223 | } |
1226 | 1224 | ||
1227 | /* When request status is Receive interruption */ | 1225 | /* When request status is Receive interruption */ |
1228 | if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))) { | 1226 | if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) || |
1227 | (adapter->rx_stop_flag == true)) { | ||
1229 | if (likely(napi_schedule_prep(&adapter->napi))) { | 1228 | if (likely(napi_schedule_prep(&adapter->napi))) { |
1230 | /* Enable only Rx Descriptor empty */ | 1229 | /* Enable only Rx Descriptor empty */ |
1231 | atomic_inc(&adapter->irq_sem); | 1230 | atomic_inc(&adapter->irq_sem); |
@@ -1385,7 +1384,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, | |||
1385 | struct sk_buff *skb; | 1384 | struct sk_buff *skb; |
1386 | unsigned int i; | 1385 | unsigned int i; |
1387 | unsigned int cleaned_count = 0; | 1386 | unsigned int cleaned_count = 0; |
1388 | bool cleaned = false; | 1387 | bool cleaned = true; |
1389 | 1388 | ||
1390 | pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean); | 1389 | pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean); |
1391 | 1390 | ||
@@ -1396,7 +1395,6 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, | |||
1396 | 1395 | ||
1397 | while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) { | 1396 | while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) { |
1398 | pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status); | 1397 | pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status); |
1399 | cleaned = true; | ||
1400 | buffer_info = &tx_ring->buffer_info[i]; | 1398 | buffer_info = &tx_ring->buffer_info[i]; |
1401 | skb = buffer_info->skb; | 1399 | skb = buffer_info->skb; |
1402 | 1400 | ||
@@ -1439,8 +1437,10 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, | |||
1439 | tx_desc = PCH_GBE_TX_DESC(*tx_ring, i); | 1437 | tx_desc = PCH_GBE_TX_DESC(*tx_ring, i); |
1440 | 1438 | ||
1441 | /* weight of a sort for tx, to avoid endless transmit cleanup */ | 1439 | /* weight of a sort for tx, to avoid endless transmit cleanup */ |
1442 | if (cleaned_count++ == PCH_GBE_TX_WEIGHT) | 1440 | if (cleaned_count++ == PCH_GBE_TX_WEIGHT) { |
1441 | cleaned = false; | ||
1443 | break; | 1442 | break; |
1443 | } | ||
1444 | } | 1444 | } |
1445 | pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n", | 1445 | pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n", |
1446 | cleaned_count); | 1446 | cleaned_count); |
@@ -2168,7 +2168,6 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget) | |||
2168 | { | 2168 | { |
2169 | struct pch_gbe_adapter *adapter = | 2169 | struct pch_gbe_adapter *adapter = |
2170 | container_of(napi, struct pch_gbe_adapter, napi); | 2170 | container_of(napi, struct pch_gbe_adapter, napi); |
2171 | struct net_device *netdev = adapter->netdev; | ||
2172 | int work_done = 0; | 2171 | int work_done = 0; |
2173 | bool poll_end_flag = false; | 2172 | bool poll_end_flag = false; |
2174 | bool cleaned = false; | 2173 | bool cleaned = false; |
@@ -2176,33 +2175,32 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget) | |||
2176 | 2175 | ||
2177 | pr_debug("budget : %d\n", budget); | 2176 | pr_debug("budget : %d\n", budget); |
2178 | 2177 | ||
2179 | /* Keep link state information with original netdev */ | 2178 | pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); |
2180 | if (!netif_carrier_ok(netdev)) { | 2179 | cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring); |
2180 | |||
2181 | if (!cleaned) | ||
2182 | work_done = budget; | ||
2183 | /* If no Tx and not enough Rx work done, | ||
2184 | * exit the polling mode | ||
2185 | */ | ||
2186 | if (work_done < budget) | ||
2181 | poll_end_flag = true; | 2187 | poll_end_flag = true; |
2182 | } else { | 2188 | |
2183 | pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); | 2189 | if (poll_end_flag) { |
2190 | napi_complete(napi); | ||
2191 | if (adapter->rx_stop_flag) { | ||
2192 | adapter->rx_stop_flag = false; | ||
2193 | pch_gbe_start_receive(&adapter->hw); | ||
2194 | } | ||
2195 | pch_gbe_irq_enable(adapter); | ||
2196 | } else | ||
2184 | if (adapter->rx_stop_flag) { | 2197 | if (adapter->rx_stop_flag) { |
2185 | adapter->rx_stop_flag = false; | 2198 | adapter->rx_stop_flag = false; |
2186 | pch_gbe_start_receive(&adapter->hw); | 2199 | pch_gbe_start_receive(&adapter->hw); |
2187 | int_en = ioread32(&adapter->hw.reg->INT_EN); | 2200 | int_en = ioread32(&adapter->hw.reg->INT_EN); |
2188 | iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR), | 2201 | iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR), |
2189 | &adapter->hw.reg->INT_EN); | 2202 | &adapter->hw.reg->INT_EN); |
2190 | } | 2203 | } |
2191 | cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring); | ||
2192 | |||
2193 | if (cleaned) | ||
2194 | work_done = budget; | ||
2195 | /* If no Tx and not enough Rx work done, | ||
2196 | * exit the polling mode | ||
2197 | */ | ||
2198 | if ((work_done < budget) || !netif_running(netdev)) | ||
2199 | poll_end_flag = true; | ||
2200 | } | ||
2201 | |||
2202 | if (poll_end_flag) { | ||
2203 | napi_complete(napi); | ||
2204 | pch_gbe_irq_enable(adapter); | ||
2205 | } | ||
2206 | 2204 | ||
2207 | pr_debug("poll_end_flag : %d work_done : %d budget : %d\n", | 2205 | pr_debug("poll_end_flag : %d work_done : %d budget : %d\n", |
2208 | poll_end_flag, work_done, budget); | 2206 | poll_end_flag, work_done, budget); |
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index cb6e0b486b1e..edd7304773eb 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c | |||
@@ -589,7 +589,7 @@ static void decode_rxts(struct dp83640_private *dp83640, | |||
589 | prune_rx_ts(dp83640); | 589 | prune_rx_ts(dp83640); |
590 | 590 | ||
591 | if (list_empty(&dp83640->rxpool)) { | 591 | if (list_empty(&dp83640->rxpool)) { |
592 | pr_warning("dp83640: rx timestamp pool is empty\n"); | 592 | pr_debug("dp83640: rx timestamp pool is empty\n"); |
593 | goto out; | 593 | goto out; |
594 | } | 594 | } |
595 | rxts = list_first_entry(&dp83640->rxpool, struct rxts, list); | 595 | rxts = list_first_entry(&dp83640->rxpool, struct rxts, list); |
@@ -612,7 +612,7 @@ static void decode_txts(struct dp83640_private *dp83640, | |||
612 | skb = skb_dequeue(&dp83640->tx_queue); | 612 | skb = skb_dequeue(&dp83640->tx_queue); |
613 | 613 | ||
614 | if (!skb) { | 614 | if (!skb) { |
615 | pr_warning("dp83640: have timestamp but tx_queue empty\n"); | 615 | pr_debug("dp83640: have timestamp but tx_queue empty\n"); |
616 | return; | 616 | return; |
617 | } | 617 | } |
618 | ns = phy2txts(phy_txts); | 618 | ns = phy2txts(phy_txts); |
diff --git a/drivers/net/pptp.c b/drivers/net/pptp.c index eae542a7e987..89f829f5f725 100644 --- a/drivers/net/pptp.c +++ b/drivers/net/pptp.c | |||
@@ -285,8 +285,10 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb) | |||
285 | ip_send_check(iph); | 285 | ip_send_check(iph); |
286 | 286 | ||
287 | ip_local_out(skb); | 287 | ip_local_out(skb); |
288 | return 1; | ||
288 | 289 | ||
289 | tx_error: | 290 | tx_error: |
291 | kfree_skb(skb); | ||
290 | return 1; | 292 | return 1; |
291 | } | 293 | } |
292 | 294 | ||
@@ -305,11 +307,18 @@ static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb) | |||
305 | } | 307 | } |
306 | 308 | ||
307 | header = (struct pptp_gre_header *)(skb->data); | 309 | header = (struct pptp_gre_header *)(skb->data); |
310 | headersize = sizeof(*header); | ||
308 | 311 | ||
309 | /* test if acknowledgement present */ | 312 | /* test if acknowledgement present */ |
310 | if (PPTP_GRE_IS_A(header->ver)) { | 313 | if (PPTP_GRE_IS_A(header->ver)) { |
311 | __u32 ack = (PPTP_GRE_IS_S(header->flags)) ? | 314 | __u32 ack; |
312 | header->ack : header->seq; /* ack in different place if S = 0 */ | 315 | |
316 | if (!pskb_may_pull(skb, headersize)) | ||
317 | goto drop; | ||
318 | header = (struct pptp_gre_header *)(skb->data); | ||
319 | |||
320 | /* ack in different place if S = 0 */ | ||
321 | ack = PPTP_GRE_IS_S(header->flags) ? header->ack : header->seq; | ||
313 | 322 | ||
314 | ack = ntohl(ack); | 323 | ack = ntohl(ack); |
315 | 324 | ||
@@ -318,21 +327,18 @@ static int pptp_rcv_core(struct sock *sk, struct sk_buff *skb) | |||
318 | /* also handle sequence number wrap-around */ | 327 | /* also handle sequence number wrap-around */ |
319 | if (WRAPPED(ack, opt->ack_recv)) | 328 | if (WRAPPED(ack, opt->ack_recv)) |
320 | opt->ack_recv = ack; | 329 | opt->ack_recv = ack; |
330 | } else { | ||
331 | headersize -= sizeof(header->ack); | ||
321 | } | 332 | } |
322 | |||
323 | /* test if payload present */ | 333 | /* test if payload present */ |
324 | if (!PPTP_GRE_IS_S(header->flags)) | 334 | if (!PPTP_GRE_IS_S(header->flags)) |
325 | goto drop; | 335 | goto drop; |
326 | 336 | ||
327 | headersize = sizeof(*header); | ||
328 | payload_len = ntohs(header->payload_len); | 337 | payload_len = ntohs(header->payload_len); |
329 | seq = ntohl(header->seq); | 338 | seq = ntohl(header->seq); |
330 | 339 | ||
331 | /* no ack present? */ | ||
332 | if (!PPTP_GRE_IS_A(header->ver)) | ||
333 | headersize -= sizeof(header->ack); | ||
334 | /* check for incomplete packet (length smaller than expected) */ | 340 | /* check for incomplete packet (length smaller than expected) */ |
335 | if (skb->len - headersize < payload_len) | 341 | if (!pskb_may_pull(skb, headersize + payload_len)) |
336 | goto drop; | 342 | goto drop; |
337 | 343 | ||
338 | payload = skb->data + headersize; | 344 | payload = skb->data + headersize; |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index c23667017922..6d657cabb951 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -2859,7 +2859,7 @@ static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp) | |||
2859 | rtl_writephy(tp, 0x1f, 0x0004); | 2859 | rtl_writephy(tp, 0x1f, 0x0004); |
2860 | rtl_writephy(tp, 0x1f, 0x0007); | 2860 | rtl_writephy(tp, 0x1f, 0x0007); |
2861 | rtl_writephy(tp, 0x1e, 0x0020); | 2861 | rtl_writephy(tp, 0x1e, 0x0020); |
2862 | rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100); | 2862 | rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100); |
2863 | rtl_writephy(tp, 0x1f, 0x0002); | 2863 | rtl_writephy(tp, 0x1f, 0x0002); |
2864 | rtl_writephy(tp, 0x1f, 0x0000); | 2864 | rtl_writephy(tp, 0x1f, 0x0000); |
2865 | rtl_writephy(tp, 0x0d, 0x0007); | 2865 | rtl_writephy(tp, 0x0d, 0x0007); |
@@ -3316,6 +3316,37 @@ static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp) | |||
3316 | } | 3316 | } |
3317 | } | 3317 | } |
3318 | 3318 | ||
3319 | static void rtl_wol_suspend_quirk(struct rtl8169_private *tp) | ||
3320 | { | ||
3321 | void __iomem *ioaddr = tp->mmio_addr; | ||
3322 | |||
3323 | switch (tp->mac_version) { | ||
3324 | case RTL_GIGA_MAC_VER_29: | ||
3325 | case RTL_GIGA_MAC_VER_30: | ||
3326 | case RTL_GIGA_MAC_VER_32: | ||
3327 | case RTL_GIGA_MAC_VER_33: | ||
3328 | case RTL_GIGA_MAC_VER_34: | ||
3329 | RTL_W32(RxConfig, RTL_R32(RxConfig) | | ||
3330 | AcceptBroadcast | AcceptMulticast | AcceptMyPhys); | ||
3331 | break; | ||
3332 | default: | ||
3333 | break; | ||
3334 | } | ||
3335 | } | ||
3336 | |||
3337 | static bool rtl_wol_pll_power_down(struct rtl8169_private *tp) | ||
3338 | { | ||
3339 | if (!(__rtl8169_get_wol(tp) & WAKE_ANY)) | ||
3340 | return false; | ||
3341 | |||
3342 | rtl_writephy(tp, 0x1f, 0x0000); | ||
3343 | rtl_writephy(tp, MII_BMCR, 0x0000); | ||
3344 | |||
3345 | rtl_wol_suspend_quirk(tp); | ||
3346 | |||
3347 | return true; | ||
3348 | } | ||
3349 | |||
3319 | static void r810x_phy_power_down(struct rtl8169_private *tp) | 3350 | static void r810x_phy_power_down(struct rtl8169_private *tp) |
3320 | { | 3351 | { |
3321 | rtl_writephy(tp, 0x1f, 0x0000); | 3352 | rtl_writephy(tp, 0x1f, 0x0000); |
@@ -3330,18 +3361,8 @@ static void r810x_phy_power_up(struct rtl8169_private *tp) | |||
3330 | 3361 | ||
3331 | static void r810x_pll_power_down(struct rtl8169_private *tp) | 3362 | static void r810x_pll_power_down(struct rtl8169_private *tp) |
3332 | { | 3363 | { |
3333 | void __iomem *ioaddr = tp->mmio_addr; | 3364 | if (rtl_wol_pll_power_down(tp)) |
3334 | |||
3335 | if (__rtl8169_get_wol(tp) & WAKE_ANY) { | ||
3336 | rtl_writephy(tp, 0x1f, 0x0000); | ||
3337 | rtl_writephy(tp, MII_BMCR, 0x0000); | ||
3338 | |||
3339 | if (tp->mac_version == RTL_GIGA_MAC_VER_29 || | ||
3340 | tp->mac_version == RTL_GIGA_MAC_VER_30) | ||
3341 | RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast | | ||
3342 | AcceptMulticast | AcceptMyPhys); | ||
3343 | return; | 3365 | return; |
3344 | } | ||
3345 | 3366 | ||
3346 | r810x_phy_power_down(tp); | 3367 | r810x_phy_power_down(tp); |
3347 | } | 3368 | } |
@@ -3430,17 +3451,8 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) | |||
3430 | tp->mac_version == RTL_GIGA_MAC_VER_33) | 3451 | tp->mac_version == RTL_GIGA_MAC_VER_33) |
3431 | rtl_ephy_write(ioaddr, 0x19, 0xff64); | 3452 | rtl_ephy_write(ioaddr, 0x19, 0xff64); |
3432 | 3453 | ||
3433 | if (__rtl8169_get_wol(tp) & WAKE_ANY) { | 3454 | if (rtl_wol_pll_power_down(tp)) |
3434 | rtl_writephy(tp, 0x1f, 0x0000); | ||
3435 | rtl_writephy(tp, MII_BMCR, 0x0000); | ||
3436 | |||
3437 | if (tp->mac_version == RTL_GIGA_MAC_VER_32 || | ||
3438 | tp->mac_version == RTL_GIGA_MAC_VER_33 || | ||
3439 | tp->mac_version == RTL_GIGA_MAC_VER_34) | ||
3440 | RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast | | ||
3441 | AcceptMulticast | AcceptMyPhys); | ||
3442 | return; | 3455 | return; |
3443 | } | ||
3444 | 3456 | ||
3445 | r8168_phy_power_down(tp); | 3457 | r8168_phy_power_down(tp); |
3446 | 3458 | ||
@@ -5788,11 +5800,30 @@ static const struct dev_pm_ops rtl8169_pm_ops = { | |||
5788 | 5800 | ||
5789 | #endif /* !CONFIG_PM */ | 5801 | #endif /* !CONFIG_PM */ |
5790 | 5802 | ||
5803 | static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp) | ||
5804 | { | ||
5805 | void __iomem *ioaddr = tp->mmio_addr; | ||
5806 | |||
5807 | /* WoL fails with 8168b when the receiver is disabled. */ | ||
5808 | switch (tp->mac_version) { | ||
5809 | case RTL_GIGA_MAC_VER_11: | ||
5810 | case RTL_GIGA_MAC_VER_12: | ||
5811 | case RTL_GIGA_MAC_VER_17: | ||
5812 | pci_clear_master(tp->pci_dev); | ||
5813 | |||
5814 | RTL_W8(ChipCmd, CmdRxEnb); | ||
5815 | /* PCI commit */ | ||
5816 | RTL_R8(ChipCmd); | ||
5817 | break; | ||
5818 | default: | ||
5819 | break; | ||
5820 | } | ||
5821 | } | ||
5822 | |||
5791 | static void rtl_shutdown(struct pci_dev *pdev) | 5823 | static void rtl_shutdown(struct pci_dev *pdev) |
5792 | { | 5824 | { |
5793 | struct net_device *dev = pci_get_drvdata(pdev); | 5825 | struct net_device *dev = pci_get_drvdata(pdev); |
5794 | struct rtl8169_private *tp = netdev_priv(dev); | 5826 | struct rtl8169_private *tp = netdev_priv(dev); |
5795 | void __iomem *ioaddr = tp->mmio_addr; | ||
5796 | 5827 | ||
5797 | rtl8169_net_suspend(dev); | 5828 | rtl8169_net_suspend(dev); |
5798 | 5829 | ||
@@ -5806,16 +5837,9 @@ static void rtl_shutdown(struct pci_dev *pdev) | |||
5806 | spin_unlock_irq(&tp->lock); | 5837 | spin_unlock_irq(&tp->lock); |
5807 | 5838 | ||
5808 | if (system_state == SYSTEM_POWER_OFF) { | 5839 | if (system_state == SYSTEM_POWER_OFF) { |
5809 | /* WoL fails with 8168b when the receiver is disabled. */ | 5840 | if (__rtl8169_get_wol(tp) & WAKE_ANY) { |
5810 | if ((tp->mac_version == RTL_GIGA_MAC_VER_11 || | 5841 | rtl_wol_suspend_quirk(tp); |
5811 | tp->mac_version == RTL_GIGA_MAC_VER_12 || | 5842 | rtl_wol_shutdown_quirk(tp); |
5812 | tp->mac_version == RTL_GIGA_MAC_VER_17) && | ||
5813 | (tp->features & RTL_FEATURE_WOL)) { | ||
5814 | pci_clear_master(pdev); | ||
5815 | |||
5816 | RTL_W8(ChipCmd, CmdRxEnb); | ||
5817 | /* PCI commit */ | ||
5818 | RTL_R8(ChipCmd); | ||
5819 | } | 5843 | } |
5820 | 5844 | ||
5821 | pci_wake_from_d3(pdev, true); | 5845 | pci_wake_from_d3(pdev, true); |
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c index b9016a30cdc5..c90ddb61cc56 100644 --- a/drivers/net/smsc911x.c +++ b/drivers/net/smsc911x.c | |||
@@ -26,6 +26,7 @@ | |||
26 | * LAN9215, LAN9216, LAN9217, LAN9218 | 26 | * LAN9215, LAN9216, LAN9217, LAN9218 |
27 | * LAN9210, LAN9211 | 27 | * LAN9210, LAN9211 |
28 | * LAN9220, LAN9221 | 28 | * LAN9220, LAN9221 |
29 | * LAN89218 | ||
29 | * | 30 | * |
30 | */ | 31 | */ |
31 | 32 | ||
@@ -1983,6 +1984,7 @@ static int __devinit smsc911x_init(struct net_device *dev) | |||
1983 | case 0x01170000: | 1984 | case 0x01170000: |
1984 | case 0x01160000: | 1985 | case 0x01160000: |
1985 | case 0x01150000: | 1986 | case 0x01150000: |
1987 | case 0x218A0000: | ||
1986 | /* LAN911[5678] family */ | 1988 | /* LAN911[5678] family */ |
1987 | pdata->generation = pdata->idrev & 0x0000FFFF; | 1989 | pdata->generation = pdata->idrev & 0x0000FFFF; |
1988 | break; | 1990 | break; |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 4a1374df6084..c11a2b8327f3 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -15577,7 +15577,7 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev) | |||
15577 | 15577 | ||
15578 | cancel_work_sync(&tp->reset_task); | 15578 | cancel_work_sync(&tp->reset_task); |
15579 | 15579 | ||
15580 | if (!tg3_flag(tp, USE_PHYLIB)) { | 15580 | if (tg3_flag(tp, USE_PHYLIB)) { |
15581 | tg3_phy_fini(tp); | 15581 | tg3_phy_fini(tp); |
15582 | tg3_mdio_fini(tp); | 15582 | tg3_mdio_fini(tp); |
15583 | } | 15583 | } |
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h index 2339728a7306..3e69c631ebb4 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h | |||
@@ -1514,7 +1514,7 @@ static const u32 ar9300_2p2_mac_core[][2] = { | |||
1514 | {0x00008258, 0x00000000}, | 1514 | {0x00008258, 0x00000000}, |
1515 | {0x0000825c, 0x40000000}, | 1515 | {0x0000825c, 0x40000000}, |
1516 | {0x00008260, 0x00080922}, | 1516 | {0x00008260, 0x00080922}, |
1517 | {0x00008264, 0x9bc00010}, | 1517 | {0x00008264, 0x9d400010}, |
1518 | {0x00008268, 0xffffffff}, | 1518 | {0x00008268, 0xffffffff}, |
1519 | {0x0000826c, 0x0000ffff}, | 1519 | {0x0000826c, 0x0000ffff}, |
1520 | {0x00008270, 0x00000000}, | 1520 | {0x00008270, 0x00000000}, |
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 9a4850154fb2..4c21f8cbdeb5 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c | |||
@@ -205,14 +205,22 @@ static void ath_rx_remove_buffer(struct ath_softc *sc, | |||
205 | 205 | ||
206 | static void ath_rx_edma_cleanup(struct ath_softc *sc) | 206 | static void ath_rx_edma_cleanup(struct ath_softc *sc) |
207 | { | 207 | { |
208 | struct ath_hw *ah = sc->sc_ah; | ||
209 | struct ath_common *common = ath9k_hw_common(ah); | ||
208 | struct ath_buf *bf; | 210 | struct ath_buf *bf; |
209 | 211 | ||
210 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); | 212 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); |
211 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); | 213 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); |
212 | 214 | ||
213 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { | 215 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { |
214 | if (bf->bf_mpdu) | 216 | if (bf->bf_mpdu) { |
217 | dma_unmap_single(sc->dev, bf->bf_buf_addr, | ||
218 | common->rx_bufsize, | ||
219 | DMA_BIDIRECTIONAL); | ||
215 | dev_kfree_skb_any(bf->bf_mpdu); | 220 | dev_kfree_skb_any(bf->bf_mpdu); |
221 | bf->bf_buf_addr = 0; | ||
222 | bf->bf_mpdu = NULL; | ||
223 | } | ||
216 | } | 224 | } |
217 | 225 | ||
218 | INIT_LIST_HEAD(&sc->rx.rxbuf); | 226 | INIT_LIST_HEAD(&sc->rx.rxbuf); |
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c index 35cd2537e7fd..e5971fe9d169 100644 --- a/drivers/net/wireless/iwlegacy/iwl-core.c +++ b/drivers/net/wireless/iwlegacy/iwl-core.c | |||
@@ -937,7 +937,7 @@ void iwl_legacy_irq_handle_error(struct iwl_priv *priv) | |||
937 | &priv->contexts[IWL_RXON_CTX_BSS]); | 937 | &priv->contexts[IWL_RXON_CTX_BSS]); |
938 | #endif | 938 | #endif |
939 | 939 | ||
940 | wake_up_interruptible(&priv->wait_command_queue); | 940 | wake_up(&priv->wait_command_queue); |
941 | 941 | ||
942 | /* Keep the restart process from trying to send host | 942 | /* Keep the restart process from trying to send host |
943 | * commands by clearing the INIT status bit */ | 943 | * commands by clearing the INIT status bit */ |
@@ -1746,7 +1746,7 @@ int iwl_legacy_force_reset(struct iwl_priv *priv, bool external) | |||
1746 | 1746 | ||
1747 | /* Set the FW error flag -- cleared on iwl_down */ | 1747 | /* Set the FW error flag -- cleared on iwl_down */ |
1748 | set_bit(STATUS_FW_ERROR, &priv->status); | 1748 | set_bit(STATUS_FW_ERROR, &priv->status); |
1749 | wake_up_interruptible(&priv->wait_command_queue); | 1749 | wake_up(&priv->wait_command_queue); |
1750 | /* | 1750 | /* |
1751 | * Keep the restart process from trying to send host | 1751 | * Keep the restart process from trying to send host |
1752 | * commands by clearing the INIT status bit | 1752 | * commands by clearing the INIT status bit |
diff --git a/drivers/net/wireless/iwlegacy/iwl-hcmd.c b/drivers/net/wireless/iwlegacy/iwl-hcmd.c index 62b4b09122cb..ce1fc9feb61f 100644 --- a/drivers/net/wireless/iwlegacy/iwl-hcmd.c +++ b/drivers/net/wireless/iwlegacy/iwl-hcmd.c | |||
@@ -167,7 +167,7 @@ int iwl_legacy_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |||
167 | goto out; | 167 | goto out; |
168 | } | 168 | } |
169 | 169 | ||
170 | ret = wait_event_interruptible_timeout(priv->wait_command_queue, | 170 | ret = wait_event_timeout(priv->wait_command_queue, |
171 | !test_bit(STATUS_HCMD_ACTIVE, &priv->status), | 171 | !test_bit(STATUS_HCMD_ACTIVE, &priv->status), |
172 | HOST_COMPLETE_TIMEOUT); | 172 | HOST_COMPLETE_TIMEOUT); |
173 | if (!ret) { | 173 | if (!ret) { |
diff --git a/drivers/net/wireless/iwlegacy/iwl-tx.c b/drivers/net/wireless/iwlegacy/iwl-tx.c index 4fff995c6f3e..ef9e268bf8a0 100644 --- a/drivers/net/wireless/iwlegacy/iwl-tx.c +++ b/drivers/net/wireless/iwlegacy/iwl-tx.c | |||
@@ -625,6 +625,8 @@ iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
625 | cmd = txq->cmd[cmd_index]; | 625 | cmd = txq->cmd[cmd_index]; |
626 | meta = &txq->meta[cmd_index]; | 626 | meta = &txq->meta[cmd_index]; |
627 | 627 | ||
628 | txq->time_stamp = jiffies; | ||
629 | |||
628 | pci_unmap_single(priv->pci_dev, | 630 | pci_unmap_single(priv->pci_dev, |
629 | dma_unmap_addr(meta, mapping), | 631 | dma_unmap_addr(meta, mapping), |
630 | dma_unmap_len(meta, len), | 632 | dma_unmap_len(meta, len), |
@@ -645,7 +647,7 @@ iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
645 | clear_bit(STATUS_HCMD_ACTIVE, &priv->status); | 647 | clear_bit(STATUS_HCMD_ACTIVE, &priv->status); |
646 | IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n", | 648 | IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n", |
647 | iwl_legacy_get_cmd_string(cmd->hdr.cmd)); | 649 | iwl_legacy_get_cmd_string(cmd->hdr.cmd)); |
648 | wake_up_interruptible(&priv->wait_command_queue); | 650 | wake_up(&priv->wait_command_queue); |
649 | } | 651 | } |
650 | 652 | ||
651 | /* Mark as unmapped */ | 653 | /* Mark as unmapped */ |
diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c index 795826a014ed..66ee15629a76 100644 --- a/drivers/net/wireless/iwlegacy/iwl3945-base.c +++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c | |||
@@ -841,7 +841,7 @@ static void iwl3945_rx_card_state_notif(struct iwl_priv *priv, | |||
841 | wiphy_rfkill_set_hw_state(priv->hw->wiphy, | 841 | wiphy_rfkill_set_hw_state(priv->hw->wiphy, |
842 | test_bit(STATUS_RF_KILL_HW, &priv->status)); | 842 | test_bit(STATUS_RF_KILL_HW, &priv->status)); |
843 | else | 843 | else |
844 | wake_up_interruptible(&priv->wait_command_queue); | 844 | wake_up(&priv->wait_command_queue); |
845 | } | 845 | } |
846 | 846 | ||
847 | /** | 847 | /** |
@@ -2269,7 +2269,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv) | |||
2269 | iwl3945_reg_txpower_periodic(priv); | 2269 | iwl3945_reg_txpower_periodic(priv); |
2270 | 2270 | ||
2271 | IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); | 2271 | IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); |
2272 | wake_up_interruptible(&priv->wait_command_queue); | 2272 | wake_up(&priv->wait_command_queue); |
2273 | 2273 | ||
2274 | return; | 2274 | return; |
2275 | 2275 | ||
@@ -2300,7 +2300,7 @@ static void __iwl3945_down(struct iwl_priv *priv) | |||
2300 | iwl_legacy_clear_driver_stations(priv); | 2300 | iwl_legacy_clear_driver_stations(priv); |
2301 | 2301 | ||
2302 | /* Unblock any waiting calls */ | 2302 | /* Unblock any waiting calls */ |
2303 | wake_up_interruptible_all(&priv->wait_command_queue); | 2303 | wake_up_all(&priv->wait_command_queue); |
2304 | 2304 | ||
2305 | /* Wipe out the EXIT_PENDING status bit if we are not actually | 2305 | /* Wipe out the EXIT_PENDING status bit if we are not actually |
2306 | * exiting the module */ | 2306 | * exiting the module */ |
@@ -2853,7 +2853,7 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw) | |||
2853 | 2853 | ||
2854 | /* Wait for START_ALIVE from ucode. Otherwise callbacks from | 2854 | /* Wait for START_ALIVE from ucode. Otherwise callbacks from |
2855 | * mac80211 will not be run successfully. */ | 2855 | * mac80211 will not be run successfully. */ |
2856 | ret = wait_event_interruptible_timeout(priv->wait_command_queue, | 2856 | ret = wait_event_timeout(priv->wait_command_queue, |
2857 | test_bit(STATUS_READY, &priv->status), | 2857 | test_bit(STATUS_READY, &priv->status), |
2858 | UCODE_READY_TIMEOUT); | 2858 | UCODE_READY_TIMEOUT); |
2859 | if (!ret) { | 2859 | if (!ret) { |
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c index 14334668034e..aa0c2539761e 100644 --- a/drivers/net/wireless/iwlegacy/iwl4965-base.c +++ b/drivers/net/wireless/iwlegacy/iwl4965-base.c | |||
@@ -576,7 +576,7 @@ static void iwl4965_rx_card_state_notif(struct iwl_priv *priv, | |||
576 | wiphy_rfkill_set_hw_state(priv->hw->wiphy, | 576 | wiphy_rfkill_set_hw_state(priv->hw->wiphy, |
577 | test_bit(STATUS_RF_KILL_HW, &priv->status)); | 577 | test_bit(STATUS_RF_KILL_HW, &priv->status)); |
578 | else | 578 | else |
579 | wake_up_interruptible(&priv->wait_command_queue); | 579 | wake_up(&priv->wait_command_queue); |
580 | } | 580 | } |
581 | 581 | ||
582 | /** | 582 | /** |
@@ -926,7 +926,7 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv) | |||
926 | handled |= CSR_INT_BIT_FH_TX; | 926 | handled |= CSR_INT_BIT_FH_TX; |
927 | /* Wake up uCode load routine, now that load is complete */ | 927 | /* Wake up uCode load routine, now that load is complete */ |
928 | priv->ucode_write_complete = 1; | 928 | priv->ucode_write_complete = 1; |
929 | wake_up_interruptible(&priv->wait_command_queue); | 929 | wake_up(&priv->wait_command_queue); |
930 | } | 930 | } |
931 | 931 | ||
932 | if (inta & ~handled) { | 932 | if (inta & ~handled) { |
@@ -1795,7 +1795,7 @@ static void iwl4965_alive_start(struct iwl_priv *priv) | |||
1795 | iwl4965_rf_kill_ct_config(priv); | 1795 | iwl4965_rf_kill_ct_config(priv); |
1796 | 1796 | ||
1797 | IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); | 1797 | IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); |
1798 | wake_up_interruptible(&priv->wait_command_queue); | 1798 | wake_up(&priv->wait_command_queue); |
1799 | 1799 | ||
1800 | iwl_legacy_power_update_mode(priv, true); | 1800 | iwl_legacy_power_update_mode(priv, true); |
1801 | IWL_DEBUG_INFO(priv, "Updated power mode\n"); | 1801 | IWL_DEBUG_INFO(priv, "Updated power mode\n"); |
@@ -1828,7 +1828,7 @@ static void __iwl4965_down(struct iwl_priv *priv) | |||
1828 | iwl_legacy_clear_driver_stations(priv); | 1828 | iwl_legacy_clear_driver_stations(priv); |
1829 | 1829 | ||
1830 | /* Unblock any waiting calls */ | 1830 | /* Unblock any waiting calls */ |
1831 | wake_up_interruptible_all(&priv->wait_command_queue); | 1831 | wake_up_all(&priv->wait_command_queue); |
1832 | 1832 | ||
1833 | /* Wipe out the EXIT_PENDING status bit if we are not actually | 1833 | /* Wipe out the EXIT_PENDING status bit if we are not actually |
1834 | * exiting the module */ | 1834 | * exiting the module */ |
@@ -2266,7 +2266,7 @@ int iwl4965_mac_start(struct ieee80211_hw *hw) | |||
2266 | 2266 | ||
2267 | /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from | 2267 | /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from |
2268 | * mac80211 will not be run successfully. */ | 2268 | * mac80211 will not be run successfully. */ |
2269 | ret = wait_event_interruptible_timeout(priv->wait_command_queue, | 2269 | ret = wait_event_timeout(priv->wait_command_queue, |
2270 | test_bit(STATUS_READY, &priv->status), | 2270 | test_bit(STATUS_READY, &priv->status), |
2271 | UCODE_READY_TIMEOUT); | 2271 | UCODE_READY_TIMEOUT); |
2272 | if (!ret) { | 2272 | if (!ret) { |
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c index dd6937e97055..77e528f5db88 100644 --- a/drivers/net/wireless/iwlwifi/iwl-scan.c +++ b/drivers/net/wireless/iwlwifi/iwl-scan.c | |||
@@ -405,31 +405,33 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw, | |||
405 | 405 | ||
406 | mutex_lock(&priv->mutex); | 406 | mutex_lock(&priv->mutex); |
407 | 407 | ||
408 | if (test_bit(STATUS_SCANNING, &priv->status) && | ||
409 | priv->scan_type != IWL_SCAN_NORMAL) { | ||
410 | IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); | ||
411 | ret = -EAGAIN; | ||
412 | goto out_unlock; | ||
413 | } | ||
414 | |||
415 | /* mac80211 will only ask for one band at a time */ | ||
416 | priv->scan_request = req; | ||
417 | priv->scan_vif = vif; | ||
418 | |||
419 | /* | 408 | /* |
420 | * If an internal scan is in progress, just set | 409 | * If an internal scan is in progress, just set |
421 | * up the scan_request as per above. | 410 | * up the scan_request as per above. |
422 | */ | 411 | */ |
423 | if (priv->scan_type != IWL_SCAN_NORMAL) { | 412 | if (priv->scan_type != IWL_SCAN_NORMAL) { |
424 | IWL_DEBUG_SCAN(priv, "SCAN request during internal scan\n"); | 413 | IWL_DEBUG_SCAN(priv, |
414 | "SCAN request during internal scan - defer\n"); | ||
415 | priv->scan_request = req; | ||
416 | priv->scan_vif = vif; | ||
425 | ret = 0; | 417 | ret = 0; |
426 | } else | 418 | } else { |
419 | priv->scan_request = req; | ||
420 | priv->scan_vif = vif; | ||
421 | /* | ||
422 | * mac80211 will only ask for one band at a time | ||
423 | * so using channels[0] here is ok | ||
424 | */ | ||
427 | ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL, | 425 | ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL, |
428 | req->channels[0]->band); | 426 | req->channels[0]->band); |
427 | if (ret) { | ||
428 | priv->scan_request = NULL; | ||
429 | priv->scan_vif = NULL; | ||
430 | } | ||
431 | } | ||
429 | 432 | ||
430 | IWL_DEBUG_MAC80211(priv, "leave\n"); | 433 | IWL_DEBUG_MAC80211(priv, "leave\n"); |
431 | 434 | ||
432 | out_unlock: | ||
433 | mutex_unlock(&priv->mutex); | 435 | mutex_unlock(&priv->mutex); |
434 | 436 | ||
435 | return ret; | 437 | return ret; |
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c index 8b1cef0ffde6..4bf3cf457ef0 100644 --- a/drivers/net/wireless/rtlwifi/usb.c +++ b/drivers/net/wireless/rtlwifi/usb.c | |||
@@ -863,6 +863,7 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
863 | u8 tid = 0; | 863 | u8 tid = 0; |
864 | u16 seq_number = 0; | 864 | u16 seq_number = 0; |
865 | 865 | ||
866 | memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); | ||
866 | if (ieee80211_is_auth(fc)) { | 867 | if (ieee80211_is_auth(fc)) { |
867 | RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n")); | 868 | RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n")); |
868 | rtl_ips_nic_on(hw); | 869 | rtl_ips_nic_on(hw); |
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 0ca86f9ec4ed..182562952c79 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
@@ -327,12 +327,12 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, | |||
327 | xenvif_get(vif); | 327 | xenvif_get(vif); |
328 | 328 | ||
329 | rtnl_lock(); | 329 | rtnl_lock(); |
330 | if (netif_running(vif->dev)) | ||
331 | xenvif_up(vif); | ||
332 | if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) | 330 | if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) |
333 | dev_set_mtu(vif->dev, ETH_DATA_LEN); | 331 | dev_set_mtu(vif->dev, ETH_DATA_LEN); |
334 | netdev_update_features(vif->dev); | 332 | netdev_update_features(vif->dev); |
335 | netif_carrier_on(vif->dev); | 333 | netif_carrier_on(vif->dev); |
334 | if (netif_running(vif->dev)) | ||
335 | xenvif_up(vif); | ||
336 | rtnl_unlock(); | 336 | rtnl_unlock(); |
337 | 337 | ||
338 | return 0; | 338 | return 0; |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 4e84fd4a4312..e9651f0a8817 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -77,7 +77,7 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE; | |||
77 | unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; | 77 | unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; |
78 | unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; | 78 | unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; |
79 | 79 | ||
80 | enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE; | 80 | enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF; |
81 | 81 | ||
82 | /* | 82 | /* |
83 | * The default CLS is used if arch didn't set CLS explicitly and not | 83 | * The default CLS is used if arch didn't set CLS explicitly and not |
@@ -3568,10 +3568,14 @@ static int __init pci_setup(char *str) | |||
3568 | pci_hotplug_io_size = memparse(str + 9, &str); | 3568 | pci_hotplug_io_size = memparse(str + 9, &str); |
3569 | } else if (!strncmp(str, "hpmemsize=", 10)) { | 3569 | } else if (!strncmp(str, "hpmemsize=", 10)) { |
3570 | pci_hotplug_mem_size = memparse(str + 10, &str); | 3570 | pci_hotplug_mem_size = memparse(str + 10, &str); |
3571 | } else if (!strncmp(str, "pcie_bus_tune_off", 17)) { | ||
3572 | pcie_bus_config = PCIE_BUS_TUNE_OFF; | ||
3571 | } else if (!strncmp(str, "pcie_bus_safe", 13)) { | 3573 | } else if (!strncmp(str, "pcie_bus_safe", 13)) { |
3572 | pcie_bus_config = PCIE_BUS_SAFE; | 3574 | pcie_bus_config = PCIE_BUS_SAFE; |
3573 | } else if (!strncmp(str, "pcie_bus_perf", 13)) { | 3575 | } else if (!strncmp(str, "pcie_bus_perf", 13)) { |
3574 | pcie_bus_config = PCIE_BUS_PERFORMANCE; | 3576 | pcie_bus_config = PCIE_BUS_PERFORMANCE; |
3577 | } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) { | ||
3578 | pcie_bus_config = PCIE_BUS_PEER2PEER; | ||
3575 | } else { | 3579 | } else { |
3576 | printk(KERN_ERR "PCI: Unknown option `%s'\n", | 3580 | printk(KERN_ERR "PCI: Unknown option `%s'\n", |
3577 | str); | 3581 | str); |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index f3f94a5c068f..6ab6bd3df4b2 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -1458,12 +1458,24 @@ static int pcie_bus_configure_set(struct pci_dev *dev, void *data) | |||
1458 | */ | 1458 | */ |
1459 | void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss) | 1459 | void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss) |
1460 | { | 1460 | { |
1461 | u8 smpss = mpss; | 1461 | u8 smpss; |
1462 | 1462 | ||
1463 | if (!pci_is_pcie(bus->self)) | 1463 | if (!pci_is_pcie(bus->self)) |
1464 | return; | 1464 | return; |
1465 | 1465 | ||
1466 | if (pcie_bus_config == PCIE_BUS_TUNE_OFF) | ||
1467 | return; | ||
1468 | |||
1469 | /* FIXME - Peer to peer DMA is possible, though the endpoint would need | ||
1470 | * to be aware to the MPS of the destination. To work around this, | ||
1471 | * simply force the MPS of the entire system to the smallest possible. | ||
1472 | */ | ||
1473 | if (pcie_bus_config == PCIE_BUS_PEER2PEER) | ||
1474 | smpss = 0; | ||
1475 | |||
1466 | if (pcie_bus_config == PCIE_BUS_SAFE) { | 1476 | if (pcie_bus_config == PCIE_BUS_SAFE) { |
1477 | smpss = mpss; | ||
1478 | |||
1467 | pcie_find_smpss(bus->self, &smpss); | 1479 | pcie_find_smpss(bus->self, &smpss); |
1468 | pci_walk_bus(bus, pcie_find_smpss, &smpss); | 1480 | pci_walk_bus(bus, pcie_find_smpss, &smpss); |
1469 | } | 1481 | } |
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index cbde448f9947..eb3140ee821e 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c | |||
@@ -654,8 +654,8 @@ static struct io_subchannel_private console_priv; | |||
654 | static int console_subchannel_in_use; | 654 | static int console_subchannel_in_use; |
655 | 655 | ||
656 | /* | 656 | /* |
657 | * Use tpi to get a pending interrupt, call the interrupt handler and | 657 | * Use cio_tpi to get a pending interrupt and call the interrupt handler. |
658 | * return a pointer to the subchannel structure. | 658 | * Return non-zero if an interrupt was processed, zero otherwise. |
659 | */ | 659 | */ |
660 | static int cio_tpi(void) | 660 | static int cio_tpi(void) |
661 | { | 661 | { |
@@ -667,6 +667,10 @@ static int cio_tpi(void) | |||
667 | tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id; | 667 | tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id; |
668 | if (tpi(NULL) != 1) | 668 | if (tpi(NULL) != 1) |
669 | return 0; | 669 | return 0; |
670 | if (tpi_info->adapter_IO) { | ||
671 | do_adapter_IO(tpi_info->isc); | ||
672 | return 1; | ||
673 | } | ||
670 | irb = (struct irb *)&S390_lowcore.irb; | 674 | irb = (struct irb *)&S390_lowcore.irb; |
671 | /* Store interrupt response block to lowcore. */ | 675 | /* Store interrupt response block to lowcore. */ |
672 | if (tsch(tpi_info->schid, irb) != 0) | 676 | if (tsch(tpi_info->schid, irb) != 0) |
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index b7bd5b0cc7aa..3868ab2397c6 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c | |||
@@ -1800,10 +1800,12 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_ | |||
1800 | switch (retval) { | 1800 | switch (retval) { |
1801 | case SCSI_MLQUEUE_HOST_BUSY: | 1801 | case SCSI_MLQUEUE_HOST_BUSY: |
1802 | twa_free_request_id(tw_dev, request_id); | 1802 | twa_free_request_id(tw_dev, request_id); |
1803 | twa_unmap_scsi_data(tw_dev, request_id); | ||
1803 | break; | 1804 | break; |
1804 | case 1: | 1805 | case 1: |
1805 | tw_dev->state[request_id] = TW_S_COMPLETED; | 1806 | tw_dev->state[request_id] = TW_S_COMPLETED; |
1806 | twa_free_request_id(tw_dev, request_id); | 1807 | twa_free_request_id(tw_dev, request_id); |
1808 | twa_unmap_scsi_data(tw_dev, request_id); | ||
1807 | SCpnt->result = (DID_ERROR << 16); | 1809 | SCpnt->result = (DID_ERROR << 16); |
1808 | done(SCpnt); | 1810 | done(SCpnt); |
1809 | retval = 0; | 1811 | retval = 0; |
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 3c08f5352b2d..6153a66a8a31 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile | |||
@@ -88,7 +88,7 @@ obj-$(CONFIG_SCSI_QLOGIC_FAS) += qlogicfas408.o qlogicfas.o | |||
88 | obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o | 88 | obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o |
89 | obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o | 89 | obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o |
90 | obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/ | 90 | obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/ |
91 | obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx/ | 91 | obj-$(CONFIG_SCSI_QLA_ISCSI) += libiscsi.o qla4xxx/ |
92 | obj-$(CONFIG_SCSI_LPFC) += lpfc/ | 92 | obj-$(CONFIG_SCSI_LPFC) += lpfc/ |
93 | obj-$(CONFIG_SCSI_BFA_FC) += bfa/ | 93 | obj-$(CONFIG_SCSI_BFA_FC) += bfa/ |
94 | obj-$(CONFIG_SCSI_PAS16) += pas16.o | 94 | obj-$(CONFIG_SCSI_PAS16) += pas16.o |
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index e7d0d47b9185..e5f2d7d9002e 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c | |||
@@ -1283,6 +1283,8 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced) | |||
1283 | kfree(aac->queues); | 1283 | kfree(aac->queues); |
1284 | aac->queues = NULL; | 1284 | aac->queues = NULL; |
1285 | free_irq(aac->pdev->irq, aac); | 1285 | free_irq(aac->pdev->irq, aac); |
1286 | if (aac->msi) | ||
1287 | pci_disable_msi(aac->pdev); | ||
1286 | kfree(aac->fsa_dev); | 1288 | kfree(aac->fsa_dev); |
1287 | aac->fsa_dev = NULL; | 1289 | aac->fsa_dev = NULL; |
1288 | quirks = aac_get_driver_ident(index)->quirks; | 1290 | quirks = aac_get_driver_ident(index)->quirks; |
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index bd22041e2789..f58644850333 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c | |||
@@ -913,7 +913,7 @@ static void l2t_put(struct cxgbi_sock *csk) | |||
913 | struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev; | 913 | struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev; |
914 | 914 | ||
915 | if (csk->l2t) { | 915 | if (csk->l2t) { |
916 | l2t_release(L2DATA(t3dev), csk->l2t); | 916 | l2t_release(t3dev, csk->l2t); |
917 | csk->l2t = NULL; | 917 | csk->l2t = NULL; |
918 | cxgbi_sock_put(csk); | 918 | cxgbi_sock_put(csk); |
919 | } | 919 | } |
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index f84084bba2f0..16ad97df5ba6 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c | |||
@@ -1721,7 +1721,7 @@ static int sas_find_bcast_dev(struct domain_device *dev, | |||
1721 | list_for_each_entry(ch, &ex->children, siblings) { | 1721 | list_for_each_entry(ch, &ex->children, siblings) { |
1722 | if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) { | 1722 | if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) { |
1723 | res = sas_find_bcast_dev(ch, src_dev); | 1723 | res = sas_find_bcast_dev(ch, src_dev); |
1724 | if (src_dev) | 1724 | if (*src_dev) |
1725 | return res; | 1725 | return res; |
1726 | } | 1726 | } |
1727 | } | 1727 | } |
@@ -1769,10 +1769,12 @@ static void sas_unregister_devs_sas_addr(struct domain_device *parent, | |||
1769 | sas_disable_routing(parent, phy->attached_sas_addr); | 1769 | sas_disable_routing(parent, phy->attached_sas_addr); |
1770 | } | 1770 | } |
1771 | memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); | 1771 | memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE); |
1772 | sas_port_delete_phy(phy->port, phy->phy); | 1772 | if (phy->port) { |
1773 | if (phy->port->num_phys == 0) | 1773 | sas_port_delete_phy(phy->port, phy->phy); |
1774 | sas_port_delete(phy->port); | 1774 | if (phy->port->num_phys == 0) |
1775 | phy->port = NULL; | 1775 | sas_port_delete(phy->port); |
1776 | phy->port = NULL; | ||
1777 | } | ||
1776 | } | 1778 | } |
1777 | 1779 | ||
1778 | static int sas_discover_bfs_by_root_level(struct domain_device *root, | 1780 | static int sas_discover_bfs_by_root_level(struct domain_device *root, |
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 4cace3f20c04..1e69527f1e4e 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -1328,10 +1328,9 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) | |||
1328 | qla2x00_sp_compl(ha, sp); | 1328 | qla2x00_sp_compl(ha, sp); |
1329 | } else { | 1329 | } else { |
1330 | ctx = sp->ctx; | 1330 | ctx = sp->ctx; |
1331 | if (ctx->type == SRB_LOGIN_CMD || | 1331 | if (ctx->type == SRB_ELS_CMD_RPT || |
1332 | ctx->type == SRB_LOGOUT_CMD) { | 1332 | ctx->type == SRB_ELS_CMD_HST || |
1333 | ctx->u.iocb_cmd->free(sp); | 1333 | ctx->type == SRB_CT_CMD) { |
1334 | } else { | ||
1335 | struct fc_bsg_job *bsg_job = | 1334 | struct fc_bsg_job *bsg_job = |
1336 | ctx->u.bsg_job; | 1335 | ctx->u.bsg_job; |
1337 | if (bsg_job->request->msgcode | 1336 | if (bsg_job->request->msgcode |
@@ -1343,6 +1342,8 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res) | |||
1343 | kfree(sp->ctx); | 1342 | kfree(sp->ctx); |
1344 | mempool_free(sp, | 1343 | mempool_free(sp, |
1345 | ha->srb_mempool); | 1344 | ha->srb_mempool); |
1345 | } else { | ||
1346 | ctx->u.iocb_cmd->free(sp); | ||
1346 | } | 1347 | } |
1347 | } | 1348 | } |
1348 | } | 1349 | } |
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c index 1d23f3831866..6a80749391db 100644 --- a/drivers/spi/spi-topcliff-pch.c +++ b/drivers/spi/spi-topcliff-pch.c | |||
@@ -50,6 +50,8 @@ | |||
50 | #define PCH_RX_THOLD 7 | 50 | #define PCH_RX_THOLD 7 |
51 | #define PCH_RX_THOLD_MAX 15 | 51 | #define PCH_RX_THOLD_MAX 15 |
52 | 52 | ||
53 | #define PCH_TX_THOLD 2 | ||
54 | |||
53 | #define PCH_MAX_BAUDRATE 5000000 | 55 | #define PCH_MAX_BAUDRATE 5000000 |
54 | #define PCH_MAX_FIFO_DEPTH 16 | 56 | #define PCH_MAX_FIFO_DEPTH 16 |
55 | 57 | ||
@@ -58,6 +60,7 @@ | |||
58 | #define PCH_SLEEP_TIME 10 | 60 | #define PCH_SLEEP_TIME 10 |
59 | 61 | ||
60 | #define SSN_LOW 0x02U | 62 | #define SSN_LOW 0x02U |
63 | #define SSN_HIGH 0x03U | ||
61 | #define SSN_NO_CONTROL 0x00U | 64 | #define SSN_NO_CONTROL 0x00U |
62 | #define PCH_MAX_CS 0xFF | 65 | #define PCH_MAX_CS 0xFF |
63 | #define PCI_DEVICE_ID_GE_SPI 0x8816 | 66 | #define PCI_DEVICE_ID_GE_SPI 0x8816 |
@@ -316,16 +319,19 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val, | |||
316 | 319 | ||
317 | /* if transfer complete interrupt */ | 320 | /* if transfer complete interrupt */ |
318 | if (reg_spsr_val & SPSR_FI_BIT) { | 321 | if (reg_spsr_val & SPSR_FI_BIT) { |
319 | if (tx_index < bpw_len) | 322 | if ((tx_index == bpw_len) && (rx_index == tx_index)) { |
323 | /* disable interrupts */ | ||
324 | pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL); | ||
325 | |||
326 | /* transfer is completed; | ||
327 | inform pch_spi_process_messages */ | ||
328 | data->transfer_complete = true; | ||
329 | data->transfer_active = false; | ||
330 | wake_up(&data->wait); | ||
331 | } else { | ||
320 | dev_err(&data->master->dev, | 332 | dev_err(&data->master->dev, |
321 | "%s : Transfer is not completed", __func__); | 333 | "%s : Transfer is not completed", __func__); |
322 | /* disable interrupts */ | 334 | } |
323 | pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL); | ||
324 | |||
325 | /* transfer is completed;inform pch_spi_process_messages */ | ||
326 | data->transfer_complete = true; | ||
327 | data->transfer_active = false; | ||
328 | wake_up(&data->wait); | ||
329 | } | 335 | } |
330 | } | 336 | } |
331 | 337 | ||
@@ -348,16 +354,26 @@ static irqreturn_t pch_spi_handler(int irq, void *dev_id) | |||
348 | "%s returning due to suspend\n", __func__); | 354 | "%s returning due to suspend\n", __func__); |
349 | return IRQ_NONE; | 355 | return IRQ_NONE; |
350 | } | 356 | } |
351 | if (data->use_dma) | ||
352 | return IRQ_NONE; | ||
353 | 357 | ||
354 | io_remap_addr = data->io_remap_addr; | 358 | io_remap_addr = data->io_remap_addr; |
355 | spsr = io_remap_addr + PCH_SPSR; | 359 | spsr = io_remap_addr + PCH_SPSR; |
356 | 360 | ||
357 | reg_spsr_val = ioread32(spsr); | 361 | reg_spsr_val = ioread32(spsr); |
358 | 362 | ||
359 | if (reg_spsr_val & SPSR_ORF_BIT) | 363 | if (reg_spsr_val & SPSR_ORF_BIT) { |
360 | dev_err(&board_dat->pdev->dev, "%s Over run error", __func__); | 364 | dev_err(&board_dat->pdev->dev, "%s Over run error\n", __func__); |
365 | if (data->current_msg->complete != 0) { | ||
366 | data->transfer_complete = true; | ||
367 | data->current_msg->status = -EIO; | ||
368 | data->current_msg->complete(data->current_msg->context); | ||
369 | data->bcurrent_msg_processing = false; | ||
370 | data->current_msg = NULL; | ||
371 | data->cur_trans = NULL; | ||
372 | } | ||
373 | } | ||
374 | |||
375 | if (data->use_dma) | ||
376 | return IRQ_NONE; | ||
361 | 377 | ||
362 | /* Check if the interrupt is for SPI device */ | 378 | /* Check if the interrupt is for SPI device */ |
363 | if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) { | 379 | if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) { |
@@ -756,10 +772,6 @@ static void pch_spi_set_ir(struct pch_spi_data *data) | |||
756 | 772 | ||
757 | wait_event_interruptible(data->wait, data->transfer_complete); | 773 | wait_event_interruptible(data->wait, data->transfer_complete); |
758 | 774 | ||
759 | pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL); | ||
760 | dev_dbg(&data->master->dev, | ||
761 | "%s:no more control over SSN-writing 0 to SSNXCR.", __func__); | ||
762 | |||
763 | /* clear all interrupts */ | 775 | /* clear all interrupts */ |
764 | pch_spi_writereg(data->master, PCH_SPSR, | 776 | pch_spi_writereg(data->master, PCH_SPSR, |
765 | pch_spi_readreg(data->master, PCH_SPSR)); | 777 | pch_spi_readreg(data->master, PCH_SPSR)); |
@@ -815,10 +827,11 @@ static void pch_spi_copy_rx_data_for_dma(struct pch_spi_data *data, int bpw) | |||
815 | } | 827 | } |
816 | } | 828 | } |
817 | 829 | ||
818 | static void pch_spi_start_transfer(struct pch_spi_data *data) | 830 | static int pch_spi_start_transfer(struct pch_spi_data *data) |
819 | { | 831 | { |
820 | struct pch_spi_dma_ctrl *dma; | 832 | struct pch_spi_dma_ctrl *dma; |
821 | unsigned long flags; | 833 | unsigned long flags; |
834 | int rtn; | ||
822 | 835 | ||
823 | dma = &data->dma; | 836 | dma = &data->dma; |
824 | 837 | ||
@@ -833,19 +846,23 @@ static void pch_spi_start_transfer(struct pch_spi_data *data) | |||
833 | initiating the transfer. */ | 846 | initiating the transfer. */ |
834 | dev_dbg(&data->master->dev, | 847 | dev_dbg(&data->master->dev, |
835 | "%s:waiting for transfer to get over\n", __func__); | 848 | "%s:waiting for transfer to get over\n", __func__); |
836 | wait_event_interruptible(data->wait, data->transfer_complete); | 849 | rtn = wait_event_interruptible_timeout(data->wait, |
850 | data->transfer_complete, | ||
851 | msecs_to_jiffies(2 * HZ)); | ||
837 | 852 | ||
838 | dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent, | 853 | dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent, |
839 | DMA_FROM_DEVICE); | 854 | DMA_FROM_DEVICE); |
855 | |||
856 | dma_sync_sg_for_cpu(&data->master->dev, dma->sg_tx_p, dma->nent, | ||
857 | DMA_FROM_DEVICE); | ||
858 | memset(data->dma.tx_buf_virt, 0, PAGE_SIZE); | ||
859 | |||
840 | async_tx_ack(dma->desc_rx); | 860 | async_tx_ack(dma->desc_rx); |
841 | async_tx_ack(dma->desc_tx); | 861 | async_tx_ack(dma->desc_tx); |
842 | kfree(dma->sg_tx_p); | 862 | kfree(dma->sg_tx_p); |
843 | kfree(dma->sg_rx_p); | 863 | kfree(dma->sg_rx_p); |
844 | 864 | ||
845 | spin_lock_irqsave(&data->lock, flags); | 865 | spin_lock_irqsave(&data->lock, flags); |
846 | pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL); | ||
847 | dev_dbg(&data->master->dev, | ||
848 | "%s:no more control over SSN-writing 0 to SSNXCR.", __func__); | ||
849 | 866 | ||
850 | /* clear fifo threshold, disable interrupts, disable SPI transfer */ | 867 | /* clear fifo threshold, disable interrupts, disable SPI transfer */ |
851 | pch_spi_setclr_reg(data->master, PCH_SPCR, 0, | 868 | pch_spi_setclr_reg(data->master, PCH_SPCR, 0, |
@@ -858,6 +875,8 @@ static void pch_spi_start_transfer(struct pch_spi_data *data) | |||
858 | pch_spi_clear_fifo(data->master); | 875 | pch_spi_clear_fifo(data->master); |
859 | 876 | ||
860 | spin_unlock_irqrestore(&data->lock, flags); | 877 | spin_unlock_irqrestore(&data->lock, flags); |
878 | |||
879 | return rtn; | ||
861 | } | 880 | } |
862 | 881 | ||
863 | static void pch_dma_rx_complete(void *arg) | 882 | static void pch_dma_rx_complete(void *arg) |
@@ -1023,8 +1042,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw) | |||
1023 | /* set receive fifo threshold and transmit fifo threshold */ | 1042 | /* set receive fifo threshold and transmit fifo threshold */ |
1024 | pch_spi_setclr_reg(data->master, PCH_SPCR, | 1043 | pch_spi_setclr_reg(data->master, PCH_SPCR, |
1025 | ((size - 1) << SPCR_RFIC_FIELD) | | 1044 | ((size - 1) << SPCR_RFIC_FIELD) | |
1026 | ((PCH_MAX_FIFO_DEPTH - PCH_DMA_TRANS_SIZE) << | 1045 | (PCH_TX_THOLD << SPCR_TFIC_FIELD), |
1027 | SPCR_TFIC_FIELD), | ||
1028 | MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS); | 1046 | MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS); |
1029 | 1047 | ||
1030 | spin_unlock_irqrestore(&data->lock, flags); | 1048 | spin_unlock_irqrestore(&data->lock, flags); |
@@ -1035,13 +1053,20 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw) | |||
1035 | /* offset, length setting */ | 1053 | /* offset, length setting */ |
1036 | sg = dma->sg_rx_p; | 1054 | sg = dma->sg_rx_p; |
1037 | for (i = 0; i < num; i++, sg++) { | 1055 | for (i = 0; i < num; i++, sg++) { |
1038 | if (i == 0) { | 1056 | if (i == (num - 2)) { |
1039 | sg->offset = 0; | 1057 | sg->offset = size * i; |
1058 | sg->offset = sg->offset * (*bpw / 8); | ||
1040 | sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem, | 1059 | sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem, |
1041 | sg->offset); | 1060 | sg->offset); |
1042 | sg_dma_len(sg) = rem; | 1061 | sg_dma_len(sg) = rem; |
1062 | } else if (i == (num - 1)) { | ||
1063 | sg->offset = size * (i - 1) + rem; | ||
1064 | sg->offset = sg->offset * (*bpw / 8); | ||
1065 | sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size, | ||
1066 | sg->offset); | ||
1067 | sg_dma_len(sg) = size; | ||
1043 | } else { | 1068 | } else { |
1044 | sg->offset = rem + size * (i - 1); | 1069 | sg->offset = size * i; |
1045 | sg->offset = sg->offset * (*bpw / 8); | 1070 | sg->offset = sg->offset * (*bpw / 8); |
1046 | sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size, | 1071 | sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size, |
1047 | sg->offset); | 1072 | sg->offset); |
@@ -1065,6 +1090,16 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw) | |||
1065 | dma->desc_rx = desc_rx; | 1090 | dma->desc_rx = desc_rx; |
1066 | 1091 | ||
1067 | /* TX */ | 1092 | /* TX */ |
1093 | if (data->bpw_len > PCH_DMA_TRANS_SIZE) { | ||
1094 | num = data->bpw_len / PCH_DMA_TRANS_SIZE; | ||
1095 | size = PCH_DMA_TRANS_SIZE; | ||
1096 | rem = 16; | ||
1097 | } else { | ||
1098 | num = 1; | ||
1099 | size = data->bpw_len; | ||
1100 | rem = data->bpw_len; | ||
1101 | } | ||
1102 | |||
1068 | dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC); | 1103 | dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC); |
1069 | sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */ | 1104 | sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */ |
1070 | /* offset, length setting */ | 1105 | /* offset, length setting */ |
@@ -1162,6 +1197,7 @@ static void pch_spi_process_messages(struct work_struct *pwork) | |||
1162 | if (data->use_dma) | 1197 | if (data->use_dma) |
1163 | pch_spi_request_dma(data, | 1198 | pch_spi_request_dma(data, |
1164 | data->current_msg->spi->bits_per_word); | 1199 | data->current_msg->spi->bits_per_word); |
1200 | pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL); | ||
1165 | do { | 1201 | do { |
1166 | /* If we are already processing a message get the next | 1202 | /* If we are already processing a message get the next |
1167 | transfer structure from the message otherwise retrieve | 1203 | transfer structure from the message otherwise retrieve |
@@ -1184,7 +1220,8 @@ static void pch_spi_process_messages(struct work_struct *pwork) | |||
1184 | 1220 | ||
1185 | if (data->use_dma) { | 1221 | if (data->use_dma) { |
1186 | pch_spi_handle_dma(data, &bpw); | 1222 | pch_spi_handle_dma(data, &bpw); |
1187 | pch_spi_start_transfer(data); | 1223 | if (!pch_spi_start_transfer(data)) |
1224 | goto out; | ||
1188 | pch_spi_copy_rx_data_for_dma(data, bpw); | 1225 | pch_spi_copy_rx_data_for_dma(data, bpw); |
1189 | } else { | 1226 | } else { |
1190 | pch_spi_set_tx(data, &bpw); | 1227 | pch_spi_set_tx(data, &bpw); |
@@ -1222,6 +1259,8 @@ static void pch_spi_process_messages(struct work_struct *pwork) | |||
1222 | 1259 | ||
1223 | } while (data->cur_trans != NULL); | 1260 | } while (data->cur_trans != NULL); |
1224 | 1261 | ||
1262 | out: | ||
1263 | pch_spi_writereg(data->master, PCH_SSNXCR, SSN_HIGH); | ||
1225 | if (data->use_dma) | 1264 | if (data->use_dma) |
1226 | pch_spi_release_dma(data); | 1265 | pch_spi_release_dma(data); |
1227 | } | 1266 | } |
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c index 1a7c19ae766f..8b307b428791 100644 --- a/drivers/staging/octeon/ethernet-rx.c +++ b/drivers/staging/octeon/ethernet-rx.c | |||
@@ -411,7 +411,8 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget) | |||
411 | skb->protocol = eth_type_trans(skb, dev); | 411 | skb->protocol = eth_type_trans(skb, dev); |
412 | skb->dev = dev; | 412 | skb->dev = dev; |
413 | 413 | ||
414 | if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc || work->word2.s.L4_error)) | 414 | if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc || |
415 | work->word2.s.L4_error || !work->word2.s.tcp_or_udp)) | ||
415 | skb->ip_summed = CHECKSUM_NONE; | 416 | skb->ip_summed = CHECKSUM_NONE; |
416 | else | 417 | else |
417 | skb->ip_summed = CHECKSUM_UNNECESSARY; | 418 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c index 58cf279ed879..bc95f52cad8b 100644 --- a/drivers/tty/serial/lantiq.c +++ b/drivers/tty/serial/lantiq.c | |||
@@ -478,8 +478,10 @@ lqasc_set_termios(struct uart_port *port, | |||
478 | spin_unlock_irqrestore(<q_asc_lock, flags); | 478 | spin_unlock_irqrestore(<q_asc_lock, flags); |
479 | 479 | ||
480 | /* Don't rewrite B0 */ | 480 | /* Don't rewrite B0 */ |
481 | if (tty_termios_baud_rate(new)) | 481 | if (tty_termios_baud_rate(new)) |
482 | tty_termios_encode_baud_rate(new, baud, baud); | 482 | tty_termios_encode_baud_rate(new, baud, baud); |
483 | |||
484 | uart_update_timeout(port, cflag, baud); | ||
483 | } | 485 | } |
484 | 486 | ||
485 | static const char* | 487 | static const char* |
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index a381cd22f518..e4e57d59edb7 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
@@ -1036,11 +1036,13 @@ out: | |||
1036 | * on error we return an unlocked page and the error value | 1036 | * on error we return an unlocked page and the error value |
1037 | * on success we return a locked page and 0 | 1037 | * on success we return a locked page and 0 |
1038 | */ | 1038 | */ |
1039 | static int prepare_uptodate_page(struct page *page, u64 pos) | 1039 | static int prepare_uptodate_page(struct page *page, u64 pos, |
1040 | bool force_uptodate) | ||
1040 | { | 1041 | { |
1041 | int ret = 0; | 1042 | int ret = 0; |
1042 | 1043 | ||
1043 | if ((pos & (PAGE_CACHE_SIZE - 1)) && !PageUptodate(page)) { | 1044 | if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) && |
1045 | !PageUptodate(page)) { | ||
1044 | ret = btrfs_readpage(NULL, page); | 1046 | ret = btrfs_readpage(NULL, page); |
1045 | if (ret) | 1047 | if (ret) |
1046 | return ret; | 1048 | return ret; |
@@ -1061,7 +1063,7 @@ static int prepare_uptodate_page(struct page *page, u64 pos) | |||
1061 | static noinline int prepare_pages(struct btrfs_root *root, struct file *file, | 1063 | static noinline int prepare_pages(struct btrfs_root *root, struct file *file, |
1062 | struct page **pages, size_t num_pages, | 1064 | struct page **pages, size_t num_pages, |
1063 | loff_t pos, unsigned long first_index, | 1065 | loff_t pos, unsigned long first_index, |
1064 | size_t write_bytes) | 1066 | size_t write_bytes, bool force_uptodate) |
1065 | { | 1067 | { |
1066 | struct extent_state *cached_state = NULL; | 1068 | struct extent_state *cached_state = NULL; |
1067 | int i; | 1069 | int i; |
@@ -1086,10 +1088,11 @@ again: | |||
1086 | } | 1088 | } |
1087 | 1089 | ||
1088 | if (i == 0) | 1090 | if (i == 0) |
1089 | err = prepare_uptodate_page(pages[i], pos); | 1091 | err = prepare_uptodate_page(pages[i], pos, |
1092 | force_uptodate); | ||
1090 | if (i == num_pages - 1) | 1093 | if (i == num_pages - 1) |
1091 | err = prepare_uptodate_page(pages[i], | 1094 | err = prepare_uptodate_page(pages[i], |
1092 | pos + write_bytes); | 1095 | pos + write_bytes, false); |
1093 | if (err) { | 1096 | if (err) { |
1094 | page_cache_release(pages[i]); | 1097 | page_cache_release(pages[i]); |
1095 | faili = i - 1; | 1098 | faili = i - 1; |
@@ -1158,6 +1161,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, | |||
1158 | size_t num_written = 0; | 1161 | size_t num_written = 0; |
1159 | int nrptrs; | 1162 | int nrptrs; |
1160 | int ret = 0; | 1163 | int ret = 0; |
1164 | bool force_page_uptodate = false; | ||
1161 | 1165 | ||
1162 | nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) / | 1166 | nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) / |
1163 | PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / | 1167 | PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / |
@@ -1200,7 +1204,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, | |||
1200 | * contents of pages from loop to loop | 1204 | * contents of pages from loop to loop |
1201 | */ | 1205 | */ |
1202 | ret = prepare_pages(root, file, pages, num_pages, | 1206 | ret = prepare_pages(root, file, pages, num_pages, |
1203 | pos, first_index, write_bytes); | 1207 | pos, first_index, write_bytes, |
1208 | force_page_uptodate); | ||
1204 | if (ret) { | 1209 | if (ret) { |
1205 | btrfs_delalloc_release_space(inode, | 1210 | btrfs_delalloc_release_space(inode, |
1206 | num_pages << PAGE_CACHE_SHIFT); | 1211 | num_pages << PAGE_CACHE_SHIFT); |
@@ -1217,12 +1222,15 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, | |||
1217 | if (copied < write_bytes) | 1222 | if (copied < write_bytes) |
1218 | nrptrs = 1; | 1223 | nrptrs = 1; |
1219 | 1224 | ||
1220 | if (copied == 0) | 1225 | if (copied == 0) { |
1226 | force_page_uptodate = true; | ||
1221 | dirty_pages = 0; | 1227 | dirty_pages = 0; |
1222 | else | 1228 | } else { |
1229 | force_page_uptodate = false; | ||
1223 | dirty_pages = (copied + offset + | 1230 | dirty_pages = (copied + offset + |
1224 | PAGE_CACHE_SIZE - 1) >> | 1231 | PAGE_CACHE_SIZE - 1) >> |
1225 | PAGE_CACHE_SHIFT; | 1232 | PAGE_CACHE_SHIFT; |
1233 | } | ||
1226 | 1234 | ||
1227 | /* | 1235 | /* |
1228 | * If we had a short copy we need to release the excess delaloc | 1236 | * If we had a short copy we need to release the excess delaloc |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 538f65a79ec5..dae5dfe41ba5 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -1047,7 +1047,16 @@ int btrfs_defrag_file(struct inode *inode, struct file *file, | |||
1047 | if (!max_to_defrag) | 1047 | if (!max_to_defrag) |
1048 | max_to_defrag = last_index - 1; | 1048 | max_to_defrag = last_index - 1; |
1049 | 1049 | ||
1050 | while (i <= last_index && defrag_count < max_to_defrag) { | 1050 | /* |
1051 | * make writeback starts from i, so the defrag range can be | ||
1052 | * written sequentially. | ||
1053 | */ | ||
1054 | if (i < inode->i_mapping->writeback_index) | ||
1055 | inode->i_mapping->writeback_index = i; | ||
1056 | |||
1057 | while (i <= last_index && defrag_count < max_to_defrag && | ||
1058 | (i < (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> | ||
1059 | PAGE_CACHE_SHIFT)) { | ||
1051 | /* | 1060 | /* |
1052 | * make sure we stop running if someone unmounts | 1061 | * make sure we stop running if someone unmounts |
1053 | * the FS | 1062 | * the FS |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index f4af4cc37500..71beb0201970 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -2018,7 +2018,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) | |||
2018 | warned_on_ntlm = true; | 2018 | warned_on_ntlm = true; |
2019 | cERROR(1, "default security mechanism requested. The default " | 2019 | cERROR(1, "default security mechanism requested. The default " |
2020 | "security mechanism will be upgraded from ntlm to " | 2020 | "security mechanism will be upgraded from ntlm to " |
2021 | "ntlmv2 in kernel release 3.1"); | 2021 | "ntlmv2 in kernel release 3.2"); |
2022 | } | 2022 | } |
2023 | ses->overrideSecFlg = volume_info->secFlg; | 2023 | ses->overrideSecFlg = volume_info->secFlg; |
2024 | 2024 | ||
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c index cac2ecfa6746..ef43fce519a1 100644 --- a/fs/xfs/xfs_buf_item.c +++ b/fs/xfs/xfs_buf_item.c | |||
@@ -629,7 +629,7 @@ xfs_buf_item_push( | |||
629 | * the xfsbufd to get this buffer written. We have to unlock the buffer | 629 | * the xfsbufd to get this buffer written. We have to unlock the buffer |
630 | * to allow the xfsbufd to write it, too. | 630 | * to allow the xfsbufd to write it, too. |
631 | */ | 631 | */ |
632 | STATIC void | 632 | STATIC bool |
633 | xfs_buf_item_pushbuf( | 633 | xfs_buf_item_pushbuf( |
634 | struct xfs_log_item *lip) | 634 | struct xfs_log_item *lip) |
635 | { | 635 | { |
@@ -643,6 +643,7 @@ xfs_buf_item_pushbuf( | |||
643 | 643 | ||
644 | xfs_buf_delwri_promote(bp); | 644 | xfs_buf_delwri_promote(bp); |
645 | xfs_buf_relse(bp); | 645 | xfs_buf_relse(bp); |
646 | return true; | ||
646 | } | 647 | } |
647 | 648 | ||
648 | STATIC void | 649 | STATIC void |
diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c index 9e0e2fa3f2c8..bb3f71d236d2 100644 --- a/fs/xfs/xfs_dquot_item.c +++ b/fs/xfs/xfs_dquot_item.c | |||
@@ -183,13 +183,14 @@ xfs_qm_dqunpin_wait( | |||
183 | * search the buffer cache can be a time consuming thing, and AIL lock is a | 183 | * search the buffer cache can be a time consuming thing, and AIL lock is a |
184 | * spinlock. | 184 | * spinlock. |
185 | */ | 185 | */ |
186 | STATIC void | 186 | STATIC bool |
187 | xfs_qm_dquot_logitem_pushbuf( | 187 | xfs_qm_dquot_logitem_pushbuf( |
188 | struct xfs_log_item *lip) | 188 | struct xfs_log_item *lip) |
189 | { | 189 | { |
190 | struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip); | 190 | struct xfs_dq_logitem *qlip = DQUOT_ITEM(lip); |
191 | struct xfs_dquot *dqp = qlip->qli_dquot; | 191 | struct xfs_dquot *dqp = qlip->qli_dquot; |
192 | struct xfs_buf *bp; | 192 | struct xfs_buf *bp; |
193 | bool ret = true; | ||
193 | 194 | ||
194 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); | 195 | ASSERT(XFS_DQ_IS_LOCKED(dqp)); |
195 | 196 | ||
@@ -201,17 +202,20 @@ xfs_qm_dquot_logitem_pushbuf( | |||
201 | if (completion_done(&dqp->q_flush) || | 202 | if (completion_done(&dqp->q_flush) || |
202 | !(lip->li_flags & XFS_LI_IN_AIL)) { | 203 | !(lip->li_flags & XFS_LI_IN_AIL)) { |
203 | xfs_dqunlock(dqp); | 204 | xfs_dqunlock(dqp); |
204 | return; | 205 | return true; |
205 | } | 206 | } |
206 | 207 | ||
207 | bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno, | 208 | bp = xfs_incore(dqp->q_mount->m_ddev_targp, qlip->qli_format.qlf_blkno, |
208 | dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK); | 209 | dqp->q_mount->m_quotainfo->qi_dqchunklen, XBF_TRYLOCK); |
209 | xfs_dqunlock(dqp); | 210 | xfs_dqunlock(dqp); |
210 | if (!bp) | 211 | if (!bp) |
211 | return; | 212 | return true; |
212 | if (XFS_BUF_ISDELAYWRITE(bp)) | 213 | if (XFS_BUF_ISDELAYWRITE(bp)) |
213 | xfs_buf_delwri_promote(bp); | 214 | xfs_buf_delwri_promote(bp); |
215 | if (xfs_buf_ispinned(bp)) | ||
216 | ret = false; | ||
214 | xfs_buf_relse(bp); | 217 | xfs_buf_relse(bp); |
218 | return ret; | ||
215 | } | 219 | } |
216 | 220 | ||
217 | /* | 221 | /* |
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c index 588406dc6a35..836ad80d4f2b 100644 --- a/fs/xfs/xfs_inode_item.c +++ b/fs/xfs/xfs_inode_item.c | |||
@@ -708,13 +708,14 @@ xfs_inode_item_committed( | |||
708 | * marked delayed write. If that's the case, we'll promote it and that will | 708 | * marked delayed write. If that's the case, we'll promote it and that will |
709 | * allow the caller to write the buffer by triggering the xfsbufd to run. | 709 | * allow the caller to write the buffer by triggering the xfsbufd to run. |
710 | */ | 710 | */ |
711 | STATIC void | 711 | STATIC bool |
712 | xfs_inode_item_pushbuf( | 712 | xfs_inode_item_pushbuf( |
713 | struct xfs_log_item *lip) | 713 | struct xfs_log_item *lip) |
714 | { | 714 | { |
715 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); | 715 | struct xfs_inode_log_item *iip = INODE_ITEM(lip); |
716 | struct xfs_inode *ip = iip->ili_inode; | 716 | struct xfs_inode *ip = iip->ili_inode; |
717 | struct xfs_buf *bp; | 717 | struct xfs_buf *bp; |
718 | bool ret = true; | ||
718 | 719 | ||
719 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED)); | 720 | ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED)); |
720 | 721 | ||
@@ -725,7 +726,7 @@ xfs_inode_item_pushbuf( | |||
725 | if (completion_done(&ip->i_flush) || | 726 | if (completion_done(&ip->i_flush) || |
726 | !(lip->li_flags & XFS_LI_IN_AIL)) { | 727 | !(lip->li_flags & XFS_LI_IN_AIL)) { |
727 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | 728 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
728 | return; | 729 | return true; |
729 | } | 730 | } |
730 | 731 | ||
731 | bp = xfs_incore(ip->i_mount->m_ddev_targp, iip->ili_format.ilf_blkno, | 732 | bp = xfs_incore(ip->i_mount->m_ddev_targp, iip->ili_format.ilf_blkno, |
@@ -733,10 +734,13 @@ xfs_inode_item_pushbuf( | |||
733 | 734 | ||
734 | xfs_iunlock(ip, XFS_ILOCK_SHARED); | 735 | xfs_iunlock(ip, XFS_ILOCK_SHARED); |
735 | if (!bp) | 736 | if (!bp) |
736 | return; | 737 | return true; |
737 | if (XFS_BUF_ISDELAYWRITE(bp)) | 738 | if (XFS_BUF_ISDELAYWRITE(bp)) |
738 | xfs_buf_delwri_promote(bp); | 739 | xfs_buf_delwri_promote(bp); |
740 | if (xfs_buf_ispinned(bp)) | ||
741 | ret = false; | ||
739 | xfs_buf_relse(bp); | 742 | xfs_buf_relse(bp); |
743 | return ret; | ||
740 | } | 744 | } |
741 | 745 | ||
742 | /* | 746 | /* |
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h index 1e8a45e74c3e..828662f70d64 100644 --- a/fs/xfs/xfs_linux.h +++ b/fs/xfs/xfs_linux.h | |||
@@ -68,6 +68,8 @@ | |||
68 | #include <linux/ctype.h> | 68 | #include <linux/ctype.h> |
69 | #include <linux/writeback.h> | 69 | #include <linux/writeback.h> |
70 | #include <linux/capability.h> | 70 | #include <linux/capability.h> |
71 | #include <linux/kthread.h> | ||
72 | #include <linux/freezer.h> | ||
71 | #include <linux/list_sort.h> | 73 | #include <linux/list_sort.h> |
72 | 74 | ||
73 | #include <asm/page.h> | 75 | #include <asm/page.h> |
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index 2366c54cc4fa..5cf06b85fd9d 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c | |||
@@ -1652,24 +1652,13 @@ xfs_init_workqueues(void) | |||
1652 | */ | 1652 | */ |
1653 | xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8); | 1653 | xfs_syncd_wq = alloc_workqueue("xfssyncd", WQ_CPU_INTENSIVE, 8); |
1654 | if (!xfs_syncd_wq) | 1654 | if (!xfs_syncd_wq) |
1655 | goto out; | 1655 | return -ENOMEM; |
1656 | |||
1657 | xfs_ail_wq = alloc_workqueue("xfsail", WQ_CPU_INTENSIVE, 8); | ||
1658 | if (!xfs_ail_wq) | ||
1659 | goto out_destroy_syncd; | ||
1660 | |||
1661 | return 0; | 1656 | return 0; |
1662 | |||
1663 | out_destroy_syncd: | ||
1664 | destroy_workqueue(xfs_syncd_wq); | ||
1665 | out: | ||
1666 | return -ENOMEM; | ||
1667 | } | 1657 | } |
1668 | 1658 | ||
1669 | STATIC void | 1659 | STATIC void |
1670 | xfs_destroy_workqueues(void) | 1660 | xfs_destroy_workqueues(void) |
1671 | { | 1661 | { |
1672 | destroy_workqueue(xfs_ail_wq); | ||
1673 | destroy_workqueue(xfs_syncd_wq); | 1662 | destroy_workqueue(xfs_syncd_wq); |
1674 | } | 1663 | } |
1675 | 1664 | ||
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h index 06a9759b6352..53597f4db9b5 100644 --- a/fs/xfs/xfs_trans.h +++ b/fs/xfs/xfs_trans.h | |||
@@ -350,7 +350,7 @@ typedef struct xfs_item_ops { | |||
350 | void (*iop_unlock)(xfs_log_item_t *); | 350 | void (*iop_unlock)(xfs_log_item_t *); |
351 | xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t); | 351 | xfs_lsn_t (*iop_committed)(xfs_log_item_t *, xfs_lsn_t); |
352 | void (*iop_push)(xfs_log_item_t *); | 352 | void (*iop_push)(xfs_log_item_t *); |
353 | void (*iop_pushbuf)(xfs_log_item_t *); | 353 | bool (*iop_pushbuf)(xfs_log_item_t *); |
354 | void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t); | 354 | void (*iop_committing)(xfs_log_item_t *, xfs_lsn_t); |
355 | } xfs_item_ops_t; | 355 | } xfs_item_ops_t; |
356 | 356 | ||
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c index c15aa29fa169..3a1e7ca54c2d 100644 --- a/fs/xfs/xfs_trans_ail.c +++ b/fs/xfs/xfs_trans_ail.c | |||
@@ -28,8 +28,6 @@ | |||
28 | #include "xfs_trans_priv.h" | 28 | #include "xfs_trans_priv.h" |
29 | #include "xfs_error.h" | 29 | #include "xfs_error.h" |
30 | 30 | ||
31 | struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */ | ||
32 | |||
33 | #ifdef DEBUG | 31 | #ifdef DEBUG |
34 | /* | 32 | /* |
35 | * Check that the list is sorted as it should be. | 33 | * Check that the list is sorted as it should be. |
@@ -356,16 +354,10 @@ xfs_ail_delete( | |||
356 | xfs_trans_ail_cursor_clear(ailp, lip); | 354 | xfs_trans_ail_cursor_clear(ailp, lip); |
357 | } | 355 | } |
358 | 356 | ||
359 | /* | 357 | static long |
360 | * xfs_ail_worker does the work of pushing on the AIL. It will requeue itself | 358 | xfsaild_push( |
361 | * to run at a later time if there is more work to do to complete the push. | 359 | struct xfs_ail *ailp) |
362 | */ | ||
363 | STATIC void | ||
364 | xfs_ail_worker( | ||
365 | struct work_struct *work) | ||
366 | { | 360 | { |
367 | struct xfs_ail *ailp = container_of(to_delayed_work(work), | ||
368 | struct xfs_ail, xa_work); | ||
369 | xfs_mount_t *mp = ailp->xa_mount; | 361 | xfs_mount_t *mp = ailp->xa_mount; |
370 | struct xfs_ail_cursor cur; | 362 | struct xfs_ail_cursor cur; |
371 | xfs_log_item_t *lip; | 363 | xfs_log_item_t *lip; |
@@ -427,8 +419,13 @@ xfs_ail_worker( | |||
427 | 419 | ||
428 | case XFS_ITEM_PUSHBUF: | 420 | case XFS_ITEM_PUSHBUF: |
429 | XFS_STATS_INC(xs_push_ail_pushbuf); | 421 | XFS_STATS_INC(xs_push_ail_pushbuf); |
430 | IOP_PUSHBUF(lip); | 422 | |
431 | ailp->xa_last_pushed_lsn = lsn; | 423 | if (!IOP_PUSHBUF(lip)) { |
424 | stuck++; | ||
425 | flush_log = 1; | ||
426 | } else { | ||
427 | ailp->xa_last_pushed_lsn = lsn; | ||
428 | } | ||
432 | push_xfsbufd = 1; | 429 | push_xfsbufd = 1; |
433 | break; | 430 | break; |
434 | 431 | ||
@@ -440,7 +437,6 @@ xfs_ail_worker( | |||
440 | 437 | ||
441 | case XFS_ITEM_LOCKED: | 438 | case XFS_ITEM_LOCKED: |
442 | XFS_STATS_INC(xs_push_ail_locked); | 439 | XFS_STATS_INC(xs_push_ail_locked); |
443 | ailp->xa_last_pushed_lsn = lsn; | ||
444 | stuck++; | 440 | stuck++; |
445 | break; | 441 | break; |
446 | 442 | ||
@@ -501,20 +497,6 @@ out_done: | |||
501 | /* We're past our target or empty, so idle */ | 497 | /* We're past our target or empty, so idle */ |
502 | ailp->xa_last_pushed_lsn = 0; | 498 | ailp->xa_last_pushed_lsn = 0; |
503 | 499 | ||
504 | /* | ||
505 | * We clear the XFS_AIL_PUSHING_BIT first before checking | ||
506 | * whether the target has changed. If the target has changed, | ||
507 | * this pushes the requeue race directly onto the result of the | ||
508 | * atomic test/set bit, so we are guaranteed that either the | ||
509 | * the pusher that changed the target or ourselves will requeue | ||
510 | * the work (but not both). | ||
511 | */ | ||
512 | clear_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags); | ||
513 | smp_rmb(); | ||
514 | if (XFS_LSN_CMP(ailp->xa_target, target) == 0 || | ||
515 | test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) | ||
516 | return; | ||
517 | |||
518 | tout = 50; | 500 | tout = 50; |
519 | } else if (XFS_LSN_CMP(lsn, target) >= 0) { | 501 | } else if (XFS_LSN_CMP(lsn, target) >= 0) { |
520 | /* | 502 | /* |
@@ -537,9 +519,30 @@ out_done: | |||
537 | tout = 20; | 519 | tout = 20; |
538 | } | 520 | } |
539 | 521 | ||
540 | /* There is more to do, requeue us. */ | 522 | return tout; |
541 | queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, | 523 | } |
542 | msecs_to_jiffies(tout)); | 524 | |
525 | static int | ||
526 | xfsaild( | ||
527 | void *data) | ||
528 | { | ||
529 | struct xfs_ail *ailp = data; | ||
530 | long tout = 0; /* milliseconds */ | ||
531 | |||
532 | while (!kthread_should_stop()) { | ||
533 | if (tout && tout <= 20) | ||
534 | __set_current_state(TASK_KILLABLE); | ||
535 | else | ||
536 | __set_current_state(TASK_INTERRUPTIBLE); | ||
537 | schedule_timeout(tout ? | ||
538 | msecs_to_jiffies(tout) : MAX_SCHEDULE_TIMEOUT); | ||
539 | |||
540 | try_to_freeze(); | ||
541 | |||
542 | tout = xfsaild_push(ailp); | ||
543 | } | ||
544 | |||
545 | return 0; | ||
543 | } | 546 | } |
544 | 547 | ||
545 | /* | 548 | /* |
@@ -574,8 +577,9 @@ xfs_ail_push( | |||
574 | */ | 577 | */ |
575 | smp_wmb(); | 578 | smp_wmb(); |
576 | xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn); | 579 | xfs_trans_ail_copy_lsn(ailp, &ailp->xa_target, &threshold_lsn); |
577 | if (!test_and_set_bit(XFS_AIL_PUSHING_BIT, &ailp->xa_flags)) | 580 | smp_wmb(); |
578 | queue_delayed_work(xfs_syncd_wq, &ailp->xa_work, 0); | 581 | |
582 | wake_up_process(ailp->xa_task); | ||
579 | } | 583 | } |
580 | 584 | ||
581 | /* | 585 | /* |
@@ -813,9 +817,18 @@ xfs_trans_ail_init( | |||
813 | INIT_LIST_HEAD(&ailp->xa_ail); | 817 | INIT_LIST_HEAD(&ailp->xa_ail); |
814 | INIT_LIST_HEAD(&ailp->xa_cursors); | 818 | INIT_LIST_HEAD(&ailp->xa_cursors); |
815 | spin_lock_init(&ailp->xa_lock); | 819 | spin_lock_init(&ailp->xa_lock); |
816 | INIT_DELAYED_WORK(&ailp->xa_work, xfs_ail_worker); | 820 | |
821 | ailp->xa_task = kthread_run(xfsaild, ailp, "xfsaild/%s", | ||
822 | ailp->xa_mount->m_fsname); | ||
823 | if (IS_ERR(ailp->xa_task)) | ||
824 | goto out_free_ailp; | ||
825 | |||
817 | mp->m_ail = ailp; | 826 | mp->m_ail = ailp; |
818 | return 0; | 827 | return 0; |
828 | |||
829 | out_free_ailp: | ||
830 | kmem_free(ailp); | ||
831 | return ENOMEM; | ||
819 | } | 832 | } |
820 | 833 | ||
821 | void | 834 | void |
@@ -824,6 +837,6 @@ xfs_trans_ail_destroy( | |||
824 | { | 837 | { |
825 | struct xfs_ail *ailp = mp->m_ail; | 838 | struct xfs_ail *ailp = mp->m_ail; |
826 | 839 | ||
827 | cancel_delayed_work_sync(&ailp->xa_work); | 840 | kthread_stop(ailp->xa_task); |
828 | kmem_free(ailp); | 841 | kmem_free(ailp); |
829 | } | 842 | } |
diff --git a/fs/xfs/xfs_trans_priv.h b/fs/xfs/xfs_trans_priv.h index 212946b97239..22750b5e4a8f 100644 --- a/fs/xfs/xfs_trans_priv.h +++ b/fs/xfs/xfs_trans_priv.h | |||
@@ -64,23 +64,17 @@ struct xfs_ail_cursor { | |||
64 | */ | 64 | */ |
65 | struct xfs_ail { | 65 | struct xfs_ail { |
66 | struct xfs_mount *xa_mount; | 66 | struct xfs_mount *xa_mount; |
67 | struct task_struct *xa_task; | ||
67 | struct list_head xa_ail; | 68 | struct list_head xa_ail; |
68 | xfs_lsn_t xa_target; | 69 | xfs_lsn_t xa_target; |
69 | struct list_head xa_cursors; | 70 | struct list_head xa_cursors; |
70 | spinlock_t xa_lock; | 71 | spinlock_t xa_lock; |
71 | struct delayed_work xa_work; | ||
72 | xfs_lsn_t xa_last_pushed_lsn; | 72 | xfs_lsn_t xa_last_pushed_lsn; |
73 | unsigned long xa_flags; | ||
74 | }; | 73 | }; |
75 | 74 | ||
76 | #define XFS_AIL_PUSHING_BIT 0 | ||
77 | |||
78 | /* | 75 | /* |
79 | * From xfs_trans_ail.c | 76 | * From xfs_trans_ail.c |
80 | */ | 77 | */ |
81 | |||
82 | extern struct workqueue_struct *xfs_ail_wq; /* AIL workqueue */ | ||
83 | |||
84 | void xfs_trans_ail_update_bulk(struct xfs_ail *ailp, | 78 | void xfs_trans_ail_update_bulk(struct xfs_ail *ailp, |
85 | struct xfs_ail_cursor *cur, | 79 | struct xfs_ail_cursor *cur, |
86 | struct xfs_log_item **log_items, int nr_items, | 80 | struct xfs_log_item **log_items, int nr_items, |
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index 3fa1f3d90ce0..99e3e50b5c57 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h | |||
@@ -197,6 +197,11 @@ struct dm_target { | |||
197 | * whether or not its underlying devices have support. | 197 | * whether or not its underlying devices have support. |
198 | */ | 198 | */ |
199 | unsigned discards_supported:1; | 199 | unsigned discards_supported:1; |
200 | |||
201 | /* | ||
202 | * Set if this target does not return zeroes on discarded blocks. | ||
203 | */ | ||
204 | unsigned discard_zeroes_data_unsupported:1; | ||
200 | }; | 205 | }; |
201 | 206 | ||
202 | /* Each target can link one of these into the table */ | 207 | /* Each target can link one of these into the table */ |
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index e807ad687a07..3ad553e8eae2 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h | |||
@@ -80,6 +80,7 @@ extern void irq_domain_del(struct irq_domain *domain); | |||
80 | #endif /* CONFIG_IRQ_DOMAIN */ | 80 | #endif /* CONFIG_IRQ_DOMAIN */ |
81 | 81 | ||
82 | #if defined(CONFIG_IRQ_DOMAIN) && defined(CONFIG_OF_IRQ) | 82 | #if defined(CONFIG_IRQ_DOMAIN) && defined(CONFIG_OF_IRQ) |
83 | extern struct irq_domain_ops irq_domain_simple_ops; | ||
83 | extern void irq_domain_add_simple(struct device_node *controller, int irq_base); | 84 | extern void irq_domain_add_simple(struct device_node *controller, int irq_base); |
84 | extern void irq_domain_generate_simple(const struct of_device_id *match, | 85 | extern void irq_domain_generate_simple(const struct of_device_id *match, |
85 | u64 phys_base, unsigned int irq_start); | 86 | u64 phys_base, unsigned int irq_start); |
diff --git a/include/linux/pci.h b/include/linux/pci.h index 8c230cbcbb48..9fc01226055b 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -621,8 +621,9 @@ struct pci_driver { | |||
621 | extern void pcie_bus_configure_settings(struct pci_bus *bus, u8 smpss); | 621 | extern void pcie_bus_configure_settings(struct pci_bus *bus, u8 smpss); |
622 | 622 | ||
623 | enum pcie_bus_config_types { | 623 | enum pcie_bus_config_types { |
624 | PCIE_BUS_PERFORMANCE, | 624 | PCIE_BUS_TUNE_OFF, |
625 | PCIE_BUS_SAFE, | 625 | PCIE_BUS_SAFE, |
626 | PCIE_BUS_PERFORMANCE, | ||
626 | PCIE_BUS_PEER2PEER, | 627 | PCIE_BUS_PEER2PEER, |
627 | }; | 628 | }; |
628 | 629 | ||
diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h index e07e2742a865..1dc420ba213a 100644 --- a/include/linux/ptp_classify.h +++ b/include/linux/ptp_classify.h | |||
@@ -51,6 +51,7 @@ | |||
51 | #define PTP_CLASS_V2_VLAN (PTP_CLASS_V2 | PTP_CLASS_VLAN) | 51 | #define PTP_CLASS_V2_VLAN (PTP_CLASS_V2 | PTP_CLASS_VLAN) |
52 | 52 | ||
53 | #define PTP_EV_PORT 319 | 53 | #define PTP_EV_PORT 319 |
54 | #define PTP_GEN_BIT 0x08 /* indicates general message, if set in message type */ | ||
54 | 55 | ||
55 | #define OFF_ETYPE 12 | 56 | #define OFF_ETYPE 12 |
56 | #define OFF_IHL 14 | 57 | #define OFF_IHL 14 |
@@ -116,14 +117,20 @@ static inline int ptp_filter_init(struct sock_filter *f, int len) | |||
116 | {OP_OR, 0, 0, PTP_CLASS_IPV6 }, /* */ \ | 117 | {OP_OR, 0, 0, PTP_CLASS_IPV6 }, /* */ \ |
117 | {OP_RETA, 0, 0, 0 }, /* */ \ | 118 | {OP_RETA, 0, 0, 0 }, /* */ \ |
118 | /*L3x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE }, /* */ \ | 119 | /*L3x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE }, /* */ \ |
119 | /*L40*/ {OP_JEQ, 0, 6, ETH_P_8021Q }, /* f goto L50 */ \ | 120 | /*L40*/ {OP_JEQ, 0, 9, ETH_P_8021Q }, /* f goto L50 */ \ |
120 | {OP_LDH, 0, 0, OFF_ETYPE + 4 }, /* */ \ | 121 | {OP_LDH, 0, 0, OFF_ETYPE + 4 }, /* */ \ |
121 | {OP_JEQ, 0, 9, ETH_P_1588 }, /* f goto L60 */ \ | 122 | {OP_JEQ, 0, 15, ETH_P_1588 }, /* f goto L60 */ \ |
123 | {OP_LDB, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \ | ||
124 | {OP_AND, 0, 0, PTP_GEN_BIT }, /* */ \ | ||
125 | {OP_JEQ, 0, 12, 0 }, /* f goto L6x */ \ | ||
122 | {OP_LDH, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \ | 126 | {OP_LDH, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \ |
123 | {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \ | 127 | {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \ |
124 | {OP_OR, 0, 0, PTP_CLASS_VLAN }, /* */ \ | 128 | {OP_OR, 0, 0, PTP_CLASS_VLAN }, /* */ \ |
125 | {OP_RETA, 0, 0, 0 }, /* */ \ | 129 | {OP_RETA, 0, 0, 0 }, /* */ \ |
126 | /*L50*/ {OP_JEQ, 0, 4, ETH_P_1588 }, /* f goto L61 */ \ | 130 | /*L50*/ {OP_JEQ, 0, 7, ETH_P_1588 }, /* f goto L61 */ \ |
131 | {OP_LDB, 0, 0, ETH_HLEN }, /* */ \ | ||
132 | {OP_AND, 0, 0, PTP_GEN_BIT }, /* */ \ | ||
133 | {OP_JEQ, 0, 4, 0 }, /* f goto L6x */ \ | ||
127 | {OP_LDH, 0, 0, ETH_HLEN }, /* */ \ | 134 | {OP_LDH, 0, 0, ETH_HLEN }, /* */ \ |
128 | {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \ | 135 | {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \ |
129 | {OP_OR, 0, 0, PTP_CLASS_L2 }, /* */ \ | 136 | {OP_OR, 0, 0, PTP_CLASS_L2 }, /* */ \ |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 4ac2c0578e0f..41d0237fd449 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1956,7 +1956,6 @@ static inline void disable_sched_clock_irqtime(void) {} | |||
1956 | 1956 | ||
1957 | extern unsigned long long | 1957 | extern unsigned long long |
1958 | task_sched_runtime(struct task_struct *task); | 1958 | task_sched_runtime(struct task_struct *task); |
1959 | extern unsigned long long thread_group_sched_runtime(struct task_struct *task); | ||
1960 | 1959 | ||
1961 | /* sched_exec is called by processes performing an exec */ | 1960 | /* sched_exec is called by processes performing an exec */ |
1962 | #ifdef CONFIG_SMP | 1961 | #ifdef CONFIG_SMP |
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 1aaf915656f3..8fa4430f99c1 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h | |||
@@ -900,6 +900,7 @@ struct netns_ipvs { | |||
900 | volatile int sync_state; | 900 | volatile int sync_state; |
901 | volatile int master_syncid; | 901 | volatile int master_syncid; |
902 | volatile int backup_syncid; | 902 | volatile int backup_syncid; |
903 | struct mutex sync_mutex; | ||
903 | /* multicast interface name */ | 904 | /* multicast interface name */ |
904 | char master_mcast_ifn[IP_VS_IFNAME_MAXLEN]; | 905 | char master_mcast_ifn[IP_VS_IFNAME_MAXLEN]; |
905 | char backup_mcast_ifn[IP_VS_IFNAME_MAXLEN]; | 906 | char backup_mcast_ifn[IP_VS_IFNAME_MAXLEN]; |
diff --git a/include/net/udplite.h b/include/net/udplite.h index 673a024c6b2a..5f097ca7d5c5 100644 --- a/include/net/udplite.h +++ b/include/net/udplite.h | |||
@@ -66,40 +66,34 @@ static inline int udplite_checksum_init(struct sk_buff *skb, struct udphdr *uh) | |||
66 | return 0; | 66 | return 0; |
67 | } | 67 | } |
68 | 68 | ||
69 | static inline int udplite_sender_cscov(struct udp_sock *up, struct udphdr *uh) | 69 | /* Slow-path computation of checksum. Socket is locked. */ |
70 | static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb) | ||
70 | { | 71 | { |
72 | const struct udp_sock *up = udp_sk(skb->sk); | ||
71 | int cscov = up->len; | 73 | int cscov = up->len; |
74 | __wsum csum = 0; | ||
72 | 75 | ||
73 | /* | 76 | if (up->pcflag & UDPLITE_SEND_CC) { |
74 | * Sender has set `partial coverage' option on UDP-Lite socket | 77 | /* |
75 | */ | 78 | * Sender has set `partial coverage' option on UDP-Lite socket. |
76 | if (up->pcflag & UDPLITE_SEND_CC) { | 79 | * The special case "up->pcslen == 0" signifies full coverage. |
80 | */ | ||
77 | if (up->pcslen < up->len) { | 81 | if (up->pcslen < up->len) { |
78 | /* up->pcslen == 0 means that full coverage is required, | 82 | if (0 < up->pcslen) |
79 | * partial coverage only if 0 < up->pcslen < up->len */ | 83 | cscov = up->pcslen; |
80 | if (0 < up->pcslen) { | 84 | udp_hdr(skb)->len = htons(up->pcslen); |
81 | cscov = up->pcslen; | ||
82 | } | ||
83 | uh->len = htons(up->pcslen); | ||
84 | } | 85 | } |
85 | /* | 86 | /* |
86 | * NOTE: Causes for the error case `up->pcslen > up->len': | 87 | * NOTE: Causes for the error case `up->pcslen > up->len': |
87 | * (i) Application error (will not be penalized). | 88 | * (i) Application error (will not be penalized). |
88 | * (ii) Payload too big for send buffer: data is split | 89 | * (ii) Payload too big for send buffer: data is split |
89 | * into several packets, each with its own header. | 90 | * into several packets, each with its own header. |
90 | * In this case (e.g. last segment), coverage may | 91 | * In this case (e.g. last segment), coverage may |
91 | * exceed packet length. | 92 | * exceed packet length. |
92 | * Since packets with coverage length > packet length are | 93 | * Since packets with coverage length > packet length are |
93 | * illegal, we fall back to the defaults here. | 94 | * illegal, we fall back to the defaults here. |
94 | */ | 95 | */ |
95 | } | 96 | } |
96 | return cscov; | ||
97 | } | ||
98 | |||
99 | static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb) | ||
100 | { | ||
101 | int cscov = udplite_sender_cscov(udp_sk(sk), udp_hdr(skb)); | ||
102 | __wsum csum = 0; | ||
103 | 97 | ||
104 | skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */ | 98 | skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */ |
105 | 99 | ||
@@ -115,16 +109,21 @@ static inline __wsum udplite_csum_outgoing(struct sock *sk, struct sk_buff *skb) | |||
115 | return csum; | 109 | return csum; |
116 | } | 110 | } |
117 | 111 | ||
112 | /* Fast-path computation of checksum. Socket may not be locked. */ | ||
118 | static inline __wsum udplite_csum(struct sk_buff *skb) | 113 | static inline __wsum udplite_csum(struct sk_buff *skb) |
119 | { | 114 | { |
120 | struct sock *sk = skb->sk; | 115 | const struct udp_sock *up = udp_sk(skb->sk); |
121 | int cscov = udplite_sender_cscov(udp_sk(sk), udp_hdr(skb)); | ||
122 | const int off = skb_transport_offset(skb); | 116 | const int off = skb_transport_offset(skb); |
123 | const int len = skb->len - off; | 117 | int len = skb->len - off; |
124 | 118 | ||
119 | if ((up->pcflag & UDPLITE_SEND_CC) && up->pcslen < len) { | ||
120 | if (0 < up->pcslen) | ||
121 | len = up->pcslen; | ||
122 | udp_hdr(skb)->len = htons(up->pcslen); | ||
123 | } | ||
125 | skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */ | 124 | skb->ip_summed = CHECKSUM_NONE; /* no HW support for checksumming */ |
126 | 125 | ||
127 | return skb_checksum(skb, off, min(cscov, len), 0); | 126 | return skb_checksum(skb, off, len, 0); |
128 | } | 127 | } |
129 | 128 | ||
130 | extern void udplite4_register(void); | 129 | extern void udplite4_register(void); |
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 6bca4cc0063c..5f172703eb4f 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h | |||
@@ -298,7 +298,7 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template, | |||
298 | __array(char, name, 32) | 298 | __array(char, name, 32) |
299 | __field(unsigned long, ino) | 299 | __field(unsigned long, ino) |
300 | __field(unsigned long, state) | 300 | __field(unsigned long, state) |
301 | __field(unsigned long, age) | 301 | __field(unsigned long, dirtied_when) |
302 | __field(unsigned long, writeback_index) | 302 | __field(unsigned long, writeback_index) |
303 | __field(long, nr_to_write) | 303 | __field(long, nr_to_write) |
304 | __field(unsigned long, wrote) | 304 | __field(unsigned long, wrote) |
@@ -309,19 +309,19 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template, | |||
309 | dev_name(inode->i_mapping->backing_dev_info->dev), 32); | 309 | dev_name(inode->i_mapping->backing_dev_info->dev), 32); |
310 | __entry->ino = inode->i_ino; | 310 | __entry->ino = inode->i_ino; |
311 | __entry->state = inode->i_state; | 311 | __entry->state = inode->i_state; |
312 | __entry->age = (jiffies - inode->dirtied_when) * | 312 | __entry->dirtied_when = inode->dirtied_when; |
313 | 1000 / HZ; | ||
314 | __entry->writeback_index = inode->i_mapping->writeback_index; | 313 | __entry->writeback_index = inode->i_mapping->writeback_index; |
315 | __entry->nr_to_write = nr_to_write; | 314 | __entry->nr_to_write = nr_to_write; |
316 | __entry->wrote = nr_to_write - wbc->nr_to_write; | 315 | __entry->wrote = nr_to_write - wbc->nr_to_write; |
317 | ), | 316 | ), |
318 | 317 | ||
319 | TP_printk("bdi %s: ino=%lu state=%s age=%lu " | 318 | TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu " |
320 | "index=%lu to_write=%ld wrote=%lu", | 319 | "index=%lu to_write=%ld wrote=%lu", |
321 | __entry->name, | 320 | __entry->name, |
322 | __entry->ino, | 321 | __entry->ino, |
323 | show_inode_state(__entry->state), | 322 | show_inode_state(__entry->state), |
324 | __entry->age, | 323 | __entry->dirtied_when, |
324 | (jiffies - __entry->dirtied_when) / HZ, | ||
325 | __entry->writeback_index, | 325 | __entry->writeback_index, |
326 | __entry->nr_to_write, | 326 | __entry->nr_to_write, |
327 | __entry->wrote | 327 | __entry->wrote |
diff --git a/init/main.c b/init/main.c index 2a9b88aa5e76..03b408dff825 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -381,9 +381,6 @@ static noinline void __init_refok rest_init(void) | |||
381 | preempt_enable_no_resched(); | 381 | preempt_enable_no_resched(); |
382 | schedule(); | 382 | schedule(); |
383 | 383 | ||
384 | /* At this point, we can enable user mode helper functionality */ | ||
385 | usermodehelper_enable(); | ||
386 | |||
387 | /* Call into cpu_idle with preempt disabled */ | 384 | /* Call into cpu_idle with preempt disabled */ |
388 | preempt_disable(); | 385 | preempt_disable(); |
389 | cpu_idle(); | 386 | cpu_idle(); |
@@ -733,6 +730,7 @@ static void __init do_basic_setup(void) | |||
733 | driver_init(); | 730 | driver_init(); |
734 | init_irq_proc(); | 731 | init_irq_proc(); |
735 | do_ctors(); | 732 | do_ctors(); |
733 | usermodehelper_enable(); | ||
736 | do_initcalls(); | 734 | do_initcalls(); |
737 | } | 735 | } |
738 | 736 | ||
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index d5828da3fd38..b57a3776de44 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c | |||
@@ -29,7 +29,11 @@ void irq_domain_add(struct irq_domain *domain) | |||
29 | */ | 29 | */ |
30 | for (hwirq = 0; hwirq < domain->nr_irq; hwirq++) { | 30 | for (hwirq = 0; hwirq < domain->nr_irq; hwirq++) { |
31 | d = irq_get_irq_data(irq_domain_to_irq(domain, hwirq)); | 31 | d = irq_get_irq_data(irq_domain_to_irq(domain, hwirq)); |
32 | if (d || d->domain) { | 32 | if (!d) { |
33 | WARN(1, "error: assigning domain to non existant irq_desc"); | ||
34 | return; | ||
35 | } | ||
36 | if (d->domain) { | ||
33 | /* things are broken; just report, don't clean up */ | 37 | /* things are broken; just report, don't clean up */ |
34 | WARN(1, "error: irq_desc already assigned to a domain"); | 38 | WARN(1, "error: irq_desc already assigned to a domain"); |
35 | return; | 39 | return; |
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 58f405b581e7..640ded8f5c48 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -250,7 +250,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) | |||
250 | do { | 250 | do { |
251 | times->utime = cputime_add(times->utime, t->utime); | 251 | times->utime = cputime_add(times->utime, t->utime); |
252 | times->stime = cputime_add(times->stime, t->stime); | 252 | times->stime = cputime_add(times->stime, t->stime); |
253 | times->sum_exec_runtime += t->se.sum_exec_runtime; | 253 | times->sum_exec_runtime += task_sched_runtime(t); |
254 | } while_each_thread(tsk, t); | 254 | } while_each_thread(tsk, t); |
255 | out: | 255 | out: |
256 | rcu_read_unlock(); | 256 | rcu_read_unlock(); |
@@ -274,9 +274,7 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) | |||
274 | struct task_cputime sum; | 274 | struct task_cputime sum; |
275 | unsigned long flags; | 275 | unsigned long flags; |
276 | 276 | ||
277 | spin_lock_irqsave(&cputimer->lock, flags); | ||
278 | if (!cputimer->running) { | 277 | if (!cputimer->running) { |
279 | cputimer->running = 1; | ||
280 | /* | 278 | /* |
281 | * The POSIX timer interface allows for absolute time expiry | 279 | * The POSIX timer interface allows for absolute time expiry |
282 | * values through the TIMER_ABSTIME flag, therefore we have | 280 | * values through the TIMER_ABSTIME flag, therefore we have |
@@ -284,8 +282,11 @@ void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times) | |||
284 | * it. | 282 | * it. |
285 | */ | 283 | */ |
286 | thread_group_cputime(tsk, &sum); | 284 | thread_group_cputime(tsk, &sum); |
285 | spin_lock_irqsave(&cputimer->lock, flags); | ||
286 | cputimer->running = 1; | ||
287 | update_gt_cputime(&cputimer->cputime, &sum); | 287 | update_gt_cputime(&cputimer->cputime, &sum); |
288 | } | 288 | } else |
289 | spin_lock_irqsave(&cputimer->lock, flags); | ||
289 | *times = cputimer->cputime; | 290 | *times = cputimer->cputime; |
290 | spin_unlock_irqrestore(&cputimer->lock, flags); | 291 | spin_unlock_irqrestore(&cputimer->lock, flags); |
291 | } | 292 | } |
@@ -312,7 +313,8 @@ static int cpu_clock_sample_group(const clockid_t which_clock, | |||
312 | cpu->cpu = cputime.utime; | 313 | cpu->cpu = cputime.utime; |
313 | break; | 314 | break; |
314 | case CPUCLOCK_SCHED: | 315 | case CPUCLOCK_SCHED: |
315 | cpu->sched = thread_group_sched_runtime(p); | 316 | thread_group_cputime(p, &cputime); |
317 | cpu->sched = cputime.sum_exec_runtime; | ||
316 | break; | 318 | break; |
317 | } | 319 | } |
318 | return 0; | 320 | return 0; |
diff --git a/kernel/resource.c b/kernel/resource.c index 3b3cedc52592..c8dc249da5ce 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
@@ -419,6 +419,9 @@ static int __find_resource(struct resource *root, struct resource *old, | |||
419 | else | 419 | else |
420 | tmp.end = root->end; | 420 | tmp.end = root->end; |
421 | 421 | ||
422 | if (tmp.end < tmp.start) | ||
423 | goto next; | ||
424 | |||
422 | resource_clip(&tmp, constraint->min, constraint->max); | 425 | resource_clip(&tmp, constraint->min, constraint->max); |
423 | arch_remove_reservations(&tmp); | 426 | arch_remove_reservations(&tmp); |
424 | 427 | ||
@@ -436,8 +439,10 @@ static int __find_resource(struct resource *root, struct resource *old, | |||
436 | return 0; | 439 | return 0; |
437 | } | 440 | } |
438 | } | 441 | } |
439 | if (!this) | 442 | |
443 | next: if (!this || this->end == root->end) | ||
440 | break; | 444 | break; |
445 | |||
441 | if (this != old) | 446 | if (this != old) |
442 | tmp.start = this->end + 1; | 447 | tmp.start = this->end + 1; |
443 | this = this->sibling; | 448 | this = this->sibling; |
diff --git a/kernel/sched.c b/kernel/sched.c index ec5f472bc5b9..b50b0f0c9aa9 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -3725,30 +3725,6 @@ unsigned long long task_sched_runtime(struct task_struct *p) | |||
3725 | } | 3725 | } |
3726 | 3726 | ||
3727 | /* | 3727 | /* |
3728 | * Return sum_exec_runtime for the thread group. | ||
3729 | * In case the task is currently running, return the sum plus current's | ||
3730 | * pending runtime that have not been accounted yet. | ||
3731 | * | ||
3732 | * Note that the thread group might have other running tasks as well, | ||
3733 | * so the return value not includes other pending runtime that other | ||
3734 | * running tasks might have. | ||
3735 | */ | ||
3736 | unsigned long long thread_group_sched_runtime(struct task_struct *p) | ||
3737 | { | ||
3738 | struct task_cputime totals; | ||
3739 | unsigned long flags; | ||
3740 | struct rq *rq; | ||
3741 | u64 ns; | ||
3742 | |||
3743 | rq = task_rq_lock(p, &flags); | ||
3744 | thread_group_cputime(p, &totals); | ||
3745 | ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq); | ||
3746 | task_rq_unlock(rq, p, &flags); | ||
3747 | |||
3748 | return ns; | ||
3749 | } | ||
3750 | |||
3751 | /* | ||
3752 | * Account user cpu time to a process. | 3728 | * Account user cpu time to a process. |
3753 | * @p: the process that the cpu time gets accounted to | 3729 | * @p: the process that the cpu time gets accounted to |
3754 | * @cputime: the cpu time spent in user space since the last update | 3730 | * @cputime: the cpu time spent in user space since the last update |
@@ -4372,7 +4348,7 @@ static inline void sched_submit_work(struct task_struct *tsk) | |||
4372 | blk_schedule_flush_plug(tsk); | 4348 | blk_schedule_flush_plug(tsk); |
4373 | } | 4349 | } |
4374 | 4350 | ||
4375 | asmlinkage void schedule(void) | 4351 | asmlinkage void __sched schedule(void) |
4376 | { | 4352 | { |
4377 | struct task_struct *tsk = current; | 4353 | struct task_struct *tsk = current; |
4378 | 4354 | ||
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 97540f0c9e47..af1177858be3 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -1050,7 +1050,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) | |||
1050 | */ | 1050 | */ |
1051 | if (curr && unlikely(rt_task(curr)) && | 1051 | if (curr && unlikely(rt_task(curr)) && |
1052 | (curr->rt.nr_cpus_allowed < 2 || | 1052 | (curr->rt.nr_cpus_allowed < 2 || |
1053 | curr->prio < p->prio) && | 1053 | curr->prio <= p->prio) && |
1054 | (p->rt.nr_cpus_allowed > 1)) { | 1054 | (p->rt.nr_cpus_allowed > 1)) { |
1055 | int target = find_lowest_rq(p); | 1055 | int target = find_lowest_rq(p); |
1056 | 1056 | ||
@@ -1581,7 +1581,7 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p) | |||
1581 | p->rt.nr_cpus_allowed > 1 && | 1581 | p->rt.nr_cpus_allowed > 1 && |
1582 | rt_task(rq->curr) && | 1582 | rt_task(rq->curr) && |
1583 | (rq->curr->rt.nr_cpus_allowed < 2 || | 1583 | (rq->curr->rt.nr_cpus_allowed < 2 || |
1584 | rq->curr->prio < p->prio)) | 1584 | rq->curr->prio <= p->prio)) |
1585 | push_rt_tasks(rq); | 1585 | push_rt_tasks(rq); |
1586 | } | 1586 | } |
1587 | 1587 | ||
diff --git a/kernel/sys.c b/kernel/sys.c index 18ee1d2f6474..1dbbe695a5ef 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -1172,7 +1172,7 @@ DECLARE_RWSEM(uts_sem); | |||
1172 | static int override_release(char __user *release, int len) | 1172 | static int override_release(char __user *release, int len) |
1173 | { | 1173 | { |
1174 | int ret = 0; | 1174 | int ret = 0; |
1175 | char buf[len]; | 1175 | char buf[65]; |
1176 | 1176 | ||
1177 | if (current->personality & UNAME26) { | 1177 | if (current->personality & UNAME26) { |
1178 | char *rest = UTS_RELEASE; | 1178 | char *rest = UTS_RELEASE; |
diff --git a/mm/migrate.c b/mm/migrate.c index 666e4e677414..14d0a6a632f6 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -120,10 +120,10 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, | |||
120 | 120 | ||
121 | ptep = pte_offset_map(pmd, addr); | 121 | ptep = pte_offset_map(pmd, addr); |
122 | 122 | ||
123 | if (!is_swap_pte(*ptep)) { | 123 | /* |
124 | pte_unmap(ptep); | 124 | * Peek to check is_swap_pte() before taking ptlock? No, we |
125 | goto out; | 125 | * can race mremap's move_ptes(), which skips anon_vma lock. |
126 | } | 126 | */ |
127 | 127 | ||
128 | ptl = pte_lockptr(mm, pmd); | 128 | ptl = pte_lockptr(mm, pmd); |
129 | } | 129 | } |
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 3e2f91ffa4e2..05dd35114a27 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c | |||
@@ -565,7 +565,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) | |||
565 | struct orig_node *orig_node = NULL; | 565 | struct orig_node *orig_node = NULL; |
566 | int data_len = skb->len, ret; | 566 | int data_len = skb->len, ret; |
567 | short vid = -1; | 567 | short vid = -1; |
568 | bool do_bcast = false; | 568 | bool do_bcast; |
569 | 569 | ||
570 | if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) | 570 | if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) |
571 | goto dropped; | 571 | goto dropped; |
@@ -598,15 +598,15 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) | |||
598 | tt_local_add(soft_iface, ethhdr->h_source); | 598 | tt_local_add(soft_iface, ethhdr->h_source); |
599 | 599 | ||
600 | orig_node = transtable_search(bat_priv, ethhdr->h_dest); | 600 | orig_node = transtable_search(bat_priv, ethhdr->h_dest); |
601 | if (is_multicast_ether_addr(ethhdr->h_dest) || | 601 | do_bcast = is_multicast_ether_addr(ethhdr->h_dest); |
602 | (orig_node && orig_node->gw_flags)) { | 602 | if (do_bcast || (orig_node && orig_node->gw_flags)) { |
603 | ret = gw_is_target(bat_priv, skb, orig_node); | 603 | ret = gw_is_target(bat_priv, skb, orig_node); |
604 | 604 | ||
605 | if (ret < 0) | 605 | if (ret < 0) |
606 | goto dropped; | 606 | goto dropped; |
607 | 607 | ||
608 | if (ret == 0) | 608 | if (ret) |
609 | do_bcast = true; | 609 | do_bcast = false; |
610 | } | 610 | } |
611 | 611 | ||
612 | /* ethernet packet should be broadcasted */ | 612 | /* ethernet packet should be broadcasted */ |
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index 61f1f623091d..e8292369cdcf 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c | |||
@@ -26,6 +26,8 @@ | |||
26 | 26 | ||
27 | /* Bluetooth L2CAP sockets. */ | 27 | /* Bluetooth L2CAP sockets. */ |
28 | 28 | ||
29 | #include <linux/security.h> | ||
30 | |||
29 | #include <net/bluetooth/bluetooth.h> | 31 | #include <net/bluetooth/bluetooth.h> |
30 | #include <net/bluetooth/hci_core.h> | 32 | #include <net/bluetooth/hci_core.h> |
31 | #include <net/bluetooth/l2cap.h> | 33 | #include <net/bluetooth/l2cap.h> |
@@ -933,6 +935,8 @@ static void l2cap_sock_init(struct sock *sk, struct sock *parent) | |||
933 | chan->force_reliable = pchan->force_reliable; | 935 | chan->force_reliable = pchan->force_reliable; |
934 | chan->flushable = pchan->flushable; | 936 | chan->flushable = pchan->flushable; |
935 | chan->force_active = pchan->force_active; | 937 | chan->force_active = pchan->force_active; |
938 | |||
939 | security_sk_clone(parent, sk); | ||
936 | } else { | 940 | } else { |
937 | 941 | ||
938 | switch (sk->sk_type) { | 942 | switch (sk->sk_type) { |
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 482722bbc7a0..5417f6127323 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <linux/device.h> | 42 | #include <linux/device.h> |
43 | #include <linux/debugfs.h> | 43 | #include <linux/debugfs.h> |
44 | #include <linux/seq_file.h> | 44 | #include <linux/seq_file.h> |
45 | #include <linux/security.h> | ||
45 | #include <net/sock.h> | 46 | #include <net/sock.h> |
46 | 47 | ||
47 | #include <asm/system.h> | 48 | #include <asm/system.h> |
@@ -264,6 +265,8 @@ static void rfcomm_sock_init(struct sock *sk, struct sock *parent) | |||
264 | 265 | ||
265 | pi->sec_level = rfcomm_pi(parent)->sec_level; | 266 | pi->sec_level = rfcomm_pi(parent)->sec_level; |
266 | pi->role_switch = rfcomm_pi(parent)->role_switch; | 267 | pi->role_switch = rfcomm_pi(parent)->role_switch; |
268 | |||
269 | security_sk_clone(parent, sk); | ||
267 | } else { | 270 | } else { |
268 | pi->dlc->defer_setup = 0; | 271 | pi->dlc->defer_setup = 0; |
269 | 272 | ||
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 8270f05e3f1f..a324b009e34b 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/debugfs.h> | 41 | #include <linux/debugfs.h> |
42 | #include <linux/seq_file.h> | 42 | #include <linux/seq_file.h> |
43 | #include <linux/list.h> | 43 | #include <linux/list.h> |
44 | #include <linux/security.h> | ||
44 | #include <net/sock.h> | 45 | #include <net/sock.h> |
45 | 46 | ||
46 | #include <asm/system.h> | 47 | #include <asm/system.h> |
@@ -403,8 +404,10 @@ static void sco_sock_init(struct sock *sk, struct sock *parent) | |||
403 | { | 404 | { |
404 | BT_DBG("sk %p", sk); | 405 | BT_DBG("sk %p", sk); |
405 | 406 | ||
406 | if (parent) | 407 | if (parent) { |
407 | sk->sk_type = parent->sk_type; | 408 | sk->sk_type = parent->sk_type; |
409 | security_sk_clone(parent, sk); | ||
410 | } | ||
408 | } | 411 | } |
409 | 412 | ||
410 | static struct proto sco_proto = { | 413 | static struct proto sco_proto = { |
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index 32b8f9f7f79e..ff3ed6086ce1 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c | |||
@@ -91,7 +91,6 @@ static int br_dev_open(struct net_device *dev) | |||
91 | { | 91 | { |
92 | struct net_bridge *br = netdev_priv(dev); | 92 | struct net_bridge *br = netdev_priv(dev); |
93 | 93 | ||
94 | netif_carrier_off(dev); | ||
95 | netdev_update_features(dev); | 94 | netdev_update_features(dev); |
96 | netif_start_queue(dev); | 95 | netif_start_queue(dev); |
97 | br_stp_enable_bridge(br); | 96 | br_stp_enable_bridge(br); |
@@ -108,8 +107,6 @@ static int br_dev_stop(struct net_device *dev) | |||
108 | { | 107 | { |
109 | struct net_bridge *br = netdev_priv(dev); | 108 | struct net_bridge *br = netdev_priv(dev); |
110 | 109 | ||
111 | netif_carrier_off(dev); | ||
112 | |||
113 | br_stp_disable_bridge(br); | 110 | br_stp_disable_bridge(br); |
114 | br_multicast_stop(br); | 111 | br_multicast_stop(br); |
115 | 112 | ||
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index e73815456adf..1d420f64ff27 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c | |||
@@ -161,9 +161,10 @@ static void del_nbp(struct net_bridge_port *p) | |||
161 | call_rcu(&p->rcu, destroy_nbp_rcu); | 161 | call_rcu(&p->rcu, destroy_nbp_rcu); |
162 | } | 162 | } |
163 | 163 | ||
164 | /* called with RTNL */ | 164 | /* Delete bridge device */ |
165 | static void del_br(struct net_bridge *br, struct list_head *head) | 165 | void br_dev_delete(struct net_device *dev, struct list_head *head) |
166 | { | 166 | { |
167 | struct net_bridge *br = netdev_priv(dev); | ||
167 | struct net_bridge_port *p, *n; | 168 | struct net_bridge_port *p, *n; |
168 | 169 | ||
169 | list_for_each_entry_safe(p, n, &br->port_list, list) { | 170 | list_for_each_entry_safe(p, n, &br->port_list, list) { |
@@ -268,7 +269,7 @@ int br_del_bridge(struct net *net, const char *name) | |||
268 | } | 269 | } |
269 | 270 | ||
270 | else | 271 | else |
271 | del_br(netdev_priv(dev), NULL); | 272 | br_dev_delete(dev, NULL); |
272 | 273 | ||
273 | rtnl_unlock(); | 274 | rtnl_unlock(); |
274 | return ret; | 275 | return ret; |
@@ -449,7 +450,7 @@ void __net_exit br_net_exit(struct net *net) | |||
449 | rtnl_lock(); | 450 | rtnl_lock(); |
450 | for_each_netdev(net, dev) | 451 | for_each_netdev(net, dev) |
451 | if (dev->priv_flags & IFF_EBRIDGE) | 452 | if (dev->priv_flags & IFF_EBRIDGE) |
452 | del_br(netdev_priv(dev), &list); | 453 | br_dev_delete(dev, &list); |
453 | 454 | ||
454 | unregister_netdevice_many(&list); | 455 | unregister_netdevice_many(&list); |
455 | rtnl_unlock(); | 456 | rtnl_unlock(); |
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 5b1ed1ba9aa7..e5f9ece3c9a0 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c | |||
@@ -210,6 +210,7 @@ static struct rtnl_link_ops br_link_ops __read_mostly = { | |||
210 | .priv_size = sizeof(struct net_bridge), | 210 | .priv_size = sizeof(struct net_bridge), |
211 | .setup = br_dev_setup, | 211 | .setup = br_dev_setup, |
212 | .validate = br_validate, | 212 | .validate = br_validate, |
213 | .dellink = br_dev_delete, | ||
213 | }; | 214 | }; |
214 | 215 | ||
215 | int __init br_netlink_init(void) | 216 | int __init br_netlink_init(void) |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 78cc364997d9..857a021deea9 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -294,6 +294,7 @@ static inline int br_is_root_bridge(const struct net_bridge *br) | |||
294 | 294 | ||
295 | /* br_device.c */ | 295 | /* br_device.c */ |
296 | extern void br_dev_setup(struct net_device *dev); | 296 | extern void br_dev_setup(struct net_device *dev); |
297 | extern void br_dev_delete(struct net_device *dev, struct list_head *list); | ||
297 | extern netdev_tx_t br_dev_xmit(struct sk_buff *skb, | 298 | extern netdev_tx_t br_dev_xmit(struct sk_buff *skb, |
298 | struct net_device *dev); | 299 | struct net_device *dev); |
299 | #ifdef CONFIG_NET_POLL_CONTROLLER | 300 | #ifdef CONFIG_NET_POLL_CONTROLLER |
diff --git a/net/can/bcm.c b/net/can/bcm.c index d6c8ae5b2e6a..c84963d2dee6 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c | |||
@@ -344,6 +344,18 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head, | |||
344 | } | 344 | } |
345 | } | 345 | } |
346 | 346 | ||
347 | static void bcm_tx_start_timer(struct bcm_op *op) | ||
348 | { | ||
349 | if (op->kt_ival1.tv64 && op->count) | ||
350 | hrtimer_start(&op->timer, | ||
351 | ktime_add(ktime_get(), op->kt_ival1), | ||
352 | HRTIMER_MODE_ABS); | ||
353 | else if (op->kt_ival2.tv64) | ||
354 | hrtimer_start(&op->timer, | ||
355 | ktime_add(ktime_get(), op->kt_ival2), | ||
356 | HRTIMER_MODE_ABS); | ||
357 | } | ||
358 | |||
347 | static void bcm_tx_timeout_tsklet(unsigned long data) | 359 | static void bcm_tx_timeout_tsklet(unsigned long data) |
348 | { | 360 | { |
349 | struct bcm_op *op = (struct bcm_op *)data; | 361 | struct bcm_op *op = (struct bcm_op *)data; |
@@ -365,26 +377,12 @@ static void bcm_tx_timeout_tsklet(unsigned long data) | |||
365 | 377 | ||
366 | bcm_send_to_user(op, &msg_head, NULL, 0); | 378 | bcm_send_to_user(op, &msg_head, NULL, 0); |
367 | } | 379 | } |
368 | } | ||
369 | |||
370 | if (op->kt_ival1.tv64 && (op->count > 0)) { | ||
371 | |||
372 | /* send (next) frame */ | ||
373 | bcm_can_tx(op); | 380 | bcm_can_tx(op); |
374 | hrtimer_start(&op->timer, | ||
375 | ktime_add(ktime_get(), op->kt_ival1), | ||
376 | HRTIMER_MODE_ABS); | ||
377 | 381 | ||
378 | } else { | 382 | } else if (op->kt_ival2.tv64) |
379 | if (op->kt_ival2.tv64) { | 383 | bcm_can_tx(op); |
380 | 384 | ||
381 | /* send (next) frame */ | 385 | bcm_tx_start_timer(op); |
382 | bcm_can_tx(op); | ||
383 | hrtimer_start(&op->timer, | ||
384 | ktime_add(ktime_get(), op->kt_ival2), | ||
385 | HRTIMER_MODE_ABS); | ||
386 | } | ||
387 | } | ||
388 | } | 386 | } |
389 | 387 | ||
390 | /* | 388 | /* |
@@ -964,23 +962,20 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, | |||
964 | hrtimer_cancel(&op->timer); | 962 | hrtimer_cancel(&op->timer); |
965 | } | 963 | } |
966 | 964 | ||
967 | if ((op->flags & STARTTIMER) && | 965 | if (op->flags & STARTTIMER) { |
968 | ((op->kt_ival1.tv64 && op->count) || op->kt_ival2.tv64)) { | 966 | hrtimer_cancel(&op->timer); |
969 | |||
970 | /* spec: send can_frame when starting timer */ | 967 | /* spec: send can_frame when starting timer */ |
971 | op->flags |= TX_ANNOUNCE; | 968 | op->flags |= TX_ANNOUNCE; |
972 | |||
973 | if (op->kt_ival1.tv64 && (op->count > 0)) { | ||
974 | /* op->count-- is done in bcm_tx_timeout_handler */ | ||
975 | hrtimer_start(&op->timer, op->kt_ival1, | ||
976 | HRTIMER_MODE_REL); | ||
977 | } else | ||
978 | hrtimer_start(&op->timer, op->kt_ival2, | ||
979 | HRTIMER_MODE_REL); | ||
980 | } | 969 | } |
981 | 970 | ||
982 | if (op->flags & TX_ANNOUNCE) | 971 | if (op->flags & TX_ANNOUNCE) { |
983 | bcm_can_tx(op); | 972 | bcm_can_tx(op); |
973 | if (op->count) | ||
974 | op->count--; | ||
975 | } | ||
976 | |||
977 | if (op->flags & STARTTIMER) | ||
978 | bcm_tx_start_timer(op); | ||
984 | 979 | ||
985 | return msg_head->nframes * CFSIZ + MHSIZ; | 980 | return msg_head->nframes * CFSIZ + MHSIZ; |
986 | } | 981 | } |
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c index 132963abc266..2883ea01e680 100644 --- a/net/ceph/ceph_common.c +++ b/net/ceph/ceph_common.c | |||
@@ -232,6 +232,7 @@ void ceph_destroy_options(struct ceph_options *opt) | |||
232 | ceph_crypto_key_destroy(opt->key); | 232 | ceph_crypto_key_destroy(opt->key); |
233 | kfree(opt->key); | 233 | kfree(opt->key); |
234 | } | 234 | } |
235 | kfree(opt->mon_addr); | ||
235 | kfree(opt); | 236 | kfree(opt); |
236 | } | 237 | } |
237 | EXPORT_SYMBOL(ceph_destroy_options); | 238 | EXPORT_SYMBOL(ceph_destroy_options); |
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index c340e2e0765b..9918e9eb276e 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c | |||
@@ -2307,6 +2307,7 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags) | |||
2307 | m->front_max = front_len; | 2307 | m->front_max = front_len; |
2308 | m->front_is_vmalloc = false; | 2308 | m->front_is_vmalloc = false; |
2309 | m->more_to_follow = false; | 2309 | m->more_to_follow = false; |
2310 | m->ack_stamp = 0; | ||
2310 | m->pool = NULL; | 2311 | m->pool = NULL; |
2311 | 2312 | ||
2312 | /* middle */ | 2313 | /* middle */ |
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 16836a7df7a6..88ad8a2501b5 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c | |||
@@ -217,6 +217,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, | |||
217 | INIT_LIST_HEAD(&req->r_unsafe_item); | 217 | INIT_LIST_HEAD(&req->r_unsafe_item); |
218 | INIT_LIST_HEAD(&req->r_linger_item); | 218 | INIT_LIST_HEAD(&req->r_linger_item); |
219 | INIT_LIST_HEAD(&req->r_linger_osd); | 219 | INIT_LIST_HEAD(&req->r_linger_osd); |
220 | INIT_LIST_HEAD(&req->r_req_lru_item); | ||
220 | req->r_flags = flags; | 221 | req->r_flags = flags; |
221 | 222 | ||
222 | WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0); | 223 | WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0); |
@@ -816,13 +817,10 @@ static void __register_request(struct ceph_osd_client *osdc, | |||
816 | { | 817 | { |
817 | req->r_tid = ++osdc->last_tid; | 818 | req->r_tid = ++osdc->last_tid; |
818 | req->r_request->hdr.tid = cpu_to_le64(req->r_tid); | 819 | req->r_request->hdr.tid = cpu_to_le64(req->r_tid); |
819 | INIT_LIST_HEAD(&req->r_req_lru_item); | ||
820 | |||
821 | dout("__register_request %p tid %lld\n", req, req->r_tid); | 820 | dout("__register_request %p tid %lld\n", req, req->r_tid); |
822 | __insert_request(osdc, req); | 821 | __insert_request(osdc, req); |
823 | ceph_osdc_get_request(req); | 822 | ceph_osdc_get_request(req); |
824 | osdc->num_requests++; | 823 | osdc->num_requests++; |
825 | |||
826 | if (osdc->num_requests == 1) { | 824 | if (osdc->num_requests == 1) { |
827 | dout(" first request, scheduling timeout\n"); | 825 | dout(" first request, scheduling timeout\n"); |
828 | __schedule_osd_timeout(osdc); | 826 | __schedule_osd_timeout(osdc); |
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index e97c3588c3ec..fd863fe76934 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c | |||
@@ -339,6 +339,7 @@ static int __insert_pg_mapping(struct ceph_pg_mapping *new, | |||
339 | struct ceph_pg_mapping *pg = NULL; | 339 | struct ceph_pg_mapping *pg = NULL; |
340 | int c; | 340 | int c; |
341 | 341 | ||
342 | dout("__insert_pg_mapping %llx %p\n", *(u64 *)&new->pgid, new); | ||
342 | while (*p) { | 343 | while (*p) { |
343 | parent = *p; | 344 | parent = *p; |
344 | pg = rb_entry(parent, struct ceph_pg_mapping, node); | 345 | pg = rb_entry(parent, struct ceph_pg_mapping, node); |
@@ -366,16 +367,33 @@ static struct ceph_pg_mapping *__lookup_pg_mapping(struct rb_root *root, | |||
366 | while (n) { | 367 | while (n) { |
367 | pg = rb_entry(n, struct ceph_pg_mapping, node); | 368 | pg = rb_entry(n, struct ceph_pg_mapping, node); |
368 | c = pgid_cmp(pgid, pg->pgid); | 369 | c = pgid_cmp(pgid, pg->pgid); |
369 | if (c < 0) | 370 | if (c < 0) { |
370 | n = n->rb_left; | 371 | n = n->rb_left; |
371 | else if (c > 0) | 372 | } else if (c > 0) { |
372 | n = n->rb_right; | 373 | n = n->rb_right; |
373 | else | 374 | } else { |
375 | dout("__lookup_pg_mapping %llx got %p\n", | ||
376 | *(u64 *)&pgid, pg); | ||
374 | return pg; | 377 | return pg; |
378 | } | ||
375 | } | 379 | } |
376 | return NULL; | 380 | return NULL; |
377 | } | 381 | } |
378 | 382 | ||
383 | static int __remove_pg_mapping(struct rb_root *root, struct ceph_pg pgid) | ||
384 | { | ||
385 | struct ceph_pg_mapping *pg = __lookup_pg_mapping(root, pgid); | ||
386 | |||
387 | if (pg) { | ||
388 | dout("__remove_pg_mapping %llx %p\n", *(u64 *)&pgid, pg); | ||
389 | rb_erase(&pg->node, root); | ||
390 | kfree(pg); | ||
391 | return 0; | ||
392 | } | ||
393 | dout("__remove_pg_mapping %llx dne\n", *(u64 *)&pgid); | ||
394 | return -ENOENT; | ||
395 | } | ||
396 | |||
379 | /* | 397 | /* |
380 | * rbtree of pg pool info | 398 | * rbtree of pg pool info |
381 | */ | 399 | */ |
@@ -711,7 +729,6 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, | |||
711 | void *start = *p; | 729 | void *start = *p; |
712 | int err = -EINVAL; | 730 | int err = -EINVAL; |
713 | u16 version; | 731 | u16 version; |
714 | struct rb_node *rbp; | ||
715 | 732 | ||
716 | ceph_decode_16_safe(p, end, version, bad); | 733 | ceph_decode_16_safe(p, end, version, bad); |
717 | if (version > CEPH_OSDMAP_INC_VERSION) { | 734 | if (version > CEPH_OSDMAP_INC_VERSION) { |
@@ -861,7 +878,6 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, | |||
861 | } | 878 | } |
862 | 879 | ||
863 | /* new_pg_temp */ | 880 | /* new_pg_temp */ |
864 | rbp = rb_first(&map->pg_temp); | ||
865 | ceph_decode_32_safe(p, end, len, bad); | 881 | ceph_decode_32_safe(p, end, len, bad); |
866 | while (len--) { | 882 | while (len--) { |
867 | struct ceph_pg_mapping *pg; | 883 | struct ceph_pg_mapping *pg; |
@@ -872,18 +888,6 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, | |||
872 | ceph_decode_copy(p, &pgid, sizeof(pgid)); | 888 | ceph_decode_copy(p, &pgid, sizeof(pgid)); |
873 | pglen = ceph_decode_32(p); | 889 | pglen = ceph_decode_32(p); |
874 | 890 | ||
875 | /* remove any? */ | ||
876 | while (rbp && pgid_cmp(rb_entry(rbp, struct ceph_pg_mapping, | ||
877 | node)->pgid, pgid) <= 0) { | ||
878 | struct ceph_pg_mapping *cur = | ||
879 | rb_entry(rbp, struct ceph_pg_mapping, node); | ||
880 | |||
881 | rbp = rb_next(rbp); | ||
882 | dout(" removed pg_temp %llx\n", *(u64 *)&cur->pgid); | ||
883 | rb_erase(&cur->node, &map->pg_temp); | ||
884 | kfree(cur); | ||
885 | } | ||
886 | |||
887 | if (pglen) { | 891 | if (pglen) { |
888 | /* insert */ | 892 | /* insert */ |
889 | ceph_decode_need(p, end, pglen*sizeof(u32), bad); | 893 | ceph_decode_need(p, end, pglen*sizeof(u32), bad); |
@@ -903,17 +907,11 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, | |||
903 | } | 907 | } |
904 | dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid, | 908 | dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid, |
905 | pglen); | 909 | pglen); |
910 | } else { | ||
911 | /* remove */ | ||
912 | __remove_pg_mapping(&map->pg_temp, pgid); | ||
906 | } | 913 | } |
907 | } | 914 | } |
908 | while (rbp) { | ||
909 | struct ceph_pg_mapping *cur = | ||
910 | rb_entry(rbp, struct ceph_pg_mapping, node); | ||
911 | |||
912 | rbp = rb_next(rbp); | ||
913 | dout(" removed pg_temp %llx\n", *(u64 *)&cur->pgid); | ||
914 | rb_erase(&cur->node, &map->pg_temp); | ||
915 | kfree(cur); | ||
916 | } | ||
917 | 915 | ||
918 | /* ignore the rest */ | 916 | /* ignore the rest */ |
919 | *p = end; | 917 | *p = end; |
@@ -1046,10 +1044,25 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid, | |||
1046 | struct ceph_pg_mapping *pg; | 1044 | struct ceph_pg_mapping *pg; |
1047 | struct ceph_pg_pool_info *pool; | 1045 | struct ceph_pg_pool_info *pool; |
1048 | int ruleno; | 1046 | int ruleno; |
1049 | unsigned poolid, ps, pps; | 1047 | unsigned poolid, ps, pps, t; |
1050 | int preferred; | 1048 | int preferred; |
1051 | 1049 | ||
1050 | poolid = le32_to_cpu(pgid.pool); | ||
1051 | ps = le16_to_cpu(pgid.ps); | ||
1052 | preferred = (s16)le16_to_cpu(pgid.preferred); | ||
1053 | |||
1054 | pool = __lookup_pg_pool(&osdmap->pg_pools, poolid); | ||
1055 | if (!pool) | ||
1056 | return NULL; | ||
1057 | |||
1052 | /* pg_temp? */ | 1058 | /* pg_temp? */ |
1059 | if (preferred >= 0) | ||
1060 | t = ceph_stable_mod(ps, le32_to_cpu(pool->v.lpg_num), | ||
1061 | pool->lpgp_num_mask); | ||
1062 | else | ||
1063 | t = ceph_stable_mod(ps, le32_to_cpu(pool->v.pg_num), | ||
1064 | pool->pgp_num_mask); | ||
1065 | pgid.ps = cpu_to_le16(t); | ||
1053 | pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid); | 1066 | pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid); |
1054 | if (pg) { | 1067 | if (pg) { |
1055 | *num = pg->len; | 1068 | *num = pg->len; |
@@ -1057,18 +1070,6 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid, | |||
1057 | } | 1070 | } |
1058 | 1071 | ||
1059 | /* crush */ | 1072 | /* crush */ |
1060 | poolid = le32_to_cpu(pgid.pool); | ||
1061 | ps = le16_to_cpu(pgid.ps); | ||
1062 | preferred = (s16)le16_to_cpu(pgid.preferred); | ||
1063 | |||
1064 | /* don't forcefeed bad device ids to crush */ | ||
1065 | if (preferred >= osdmap->max_osd || | ||
1066 | preferred >= osdmap->crush->max_devices) | ||
1067 | preferred = -1; | ||
1068 | |||
1069 | pool = __lookup_pg_pool(&osdmap->pg_pools, poolid); | ||
1070 | if (!pool) | ||
1071 | return NULL; | ||
1072 | ruleno = crush_find_rule(osdmap->crush, pool->v.crush_ruleset, | 1073 | ruleno = crush_find_rule(osdmap->crush, pool->v.crush_ruleset, |
1073 | pool->v.type, pool->v.size); | 1074 | pool->v.type, pool->v.size); |
1074 | if (ruleno < 0) { | 1075 | if (ruleno < 0) { |
@@ -1078,6 +1079,11 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid, | |||
1078 | return NULL; | 1079 | return NULL; |
1079 | } | 1080 | } |
1080 | 1081 | ||
1082 | /* don't forcefeed bad device ids to crush */ | ||
1083 | if (preferred >= osdmap->max_osd || | ||
1084 | preferred >= osdmap->crush->max_devices) | ||
1085 | preferred = -1; | ||
1086 | |||
1081 | if (preferred >= 0) | 1087 | if (preferred >= 0) |
1082 | pps = ceph_stable_mod(ps, | 1088 | pps = ceph_stable_mod(ps, |
1083 | le32_to_cpu(pool->v.lpgp_num), | 1089 | le32_to_cpu(pool->v.lpgp_num), |
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 3231b468bb72..27071ee2a4e1 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c | |||
@@ -475,8 +475,11 @@ static int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr* nlh, void *arg) | |||
475 | 475 | ||
476 | list_del_rcu(&rule->list); | 476 | list_del_rcu(&rule->list); |
477 | 477 | ||
478 | if (rule->action == FR_ACT_GOTO) | 478 | if (rule->action == FR_ACT_GOTO) { |
479 | ops->nr_goto_rules--; | 479 | ops->nr_goto_rules--; |
480 | if (rtnl_dereference(rule->ctarget) == NULL) | ||
481 | ops->unresolved_rules--; | ||
482 | } | ||
480 | 483 | ||
481 | /* | 484 | /* |
482 | * Check if this rule is a target to any of them. If so, | 485 | * Check if this rule is a target to any of them. If so, |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 21fab3edb92c..d73aab3fbfc0 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -1389,9 +1389,7 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, | |||
1389 | 1389 | ||
1390 | BUG_ON(!pcount); | 1390 | BUG_ON(!pcount); |
1391 | 1391 | ||
1392 | /* Tweak before seqno plays */ | 1392 | if (skb == tp->lost_skb_hint) |
1393 | if (!tcp_is_fack(tp) && tcp_is_sack(tp) && tp->lost_skb_hint && | ||
1394 | !before(TCP_SKB_CB(tp->lost_skb_hint)->seq, TCP_SKB_CB(skb)->seq)) | ||
1395 | tp->lost_cnt_hint += pcount; | 1393 | tp->lost_cnt_hint += pcount; |
1396 | 1394 | ||
1397 | TCP_SKB_CB(prev)->end_seq += shifted; | 1395 | TCP_SKB_CB(prev)->end_seq += shifted; |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index c34f01513945..7963e03f1068 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -927,18 +927,21 @@ int tcp_v4_md5_do_add(struct sock *sk, __be32 addr, | |||
927 | } | 927 | } |
928 | sk_nocaps_add(sk, NETIF_F_GSO_MASK); | 928 | sk_nocaps_add(sk, NETIF_F_GSO_MASK); |
929 | } | 929 | } |
930 | if (tcp_alloc_md5sig_pool(sk) == NULL) { | 930 | |
931 | md5sig = tp->md5sig_info; | ||
932 | if (md5sig->entries4 == 0 && | ||
933 | tcp_alloc_md5sig_pool(sk) == NULL) { | ||
931 | kfree(newkey); | 934 | kfree(newkey); |
932 | return -ENOMEM; | 935 | return -ENOMEM; |
933 | } | 936 | } |
934 | md5sig = tp->md5sig_info; | ||
935 | 937 | ||
936 | if (md5sig->alloced4 == md5sig->entries4) { | 938 | if (md5sig->alloced4 == md5sig->entries4) { |
937 | keys = kmalloc((sizeof(*keys) * | 939 | keys = kmalloc((sizeof(*keys) * |
938 | (md5sig->entries4 + 1)), GFP_ATOMIC); | 940 | (md5sig->entries4 + 1)), GFP_ATOMIC); |
939 | if (!keys) { | 941 | if (!keys) { |
940 | kfree(newkey); | 942 | kfree(newkey); |
941 | tcp_free_md5sig_pool(); | 943 | if (md5sig->entries4 == 0) |
944 | tcp_free_md5sig_pool(); | ||
942 | return -ENOMEM; | 945 | return -ENOMEM; |
943 | } | 946 | } |
944 | 947 | ||
@@ -982,6 +985,7 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr) | |||
982 | kfree(tp->md5sig_info->keys4); | 985 | kfree(tp->md5sig_info->keys4); |
983 | tp->md5sig_info->keys4 = NULL; | 986 | tp->md5sig_info->keys4 = NULL; |
984 | tp->md5sig_info->alloced4 = 0; | 987 | tp->md5sig_info->alloced4 = 0; |
988 | tcp_free_md5sig_pool(); | ||
985 | } else if (tp->md5sig_info->entries4 != i) { | 989 | } else if (tp->md5sig_info->entries4 != i) { |
986 | /* Need to do some manipulation */ | 990 | /* Need to do some manipulation */ |
987 | memmove(&tp->md5sig_info->keys4[i], | 991 | memmove(&tp->md5sig_info->keys4[i], |
@@ -989,7 +993,6 @@ int tcp_v4_md5_do_del(struct sock *sk, __be32 addr) | |||
989 | (tp->md5sig_info->entries4 - i) * | 993 | (tp->md5sig_info->entries4 - i) * |
990 | sizeof(struct tcp4_md5sig_key)); | 994 | sizeof(struct tcp4_md5sig_key)); |
991 | } | 995 | } |
992 | tcp_free_md5sig_pool(); | ||
993 | return 0; | 996 | return 0; |
994 | } | 997 | } |
995 | } | 998 | } |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index d2fe4e06b472..0ce3d06dce60 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -328,6 +328,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) | |||
328 | struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); | 328 | struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); |
329 | const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); | 329 | const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); |
330 | 330 | ||
331 | tw->tw_transparent = inet_sk(sk)->transparent; | ||
331 | tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale; | 332 | tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale; |
332 | tcptw->tw_rcv_nxt = tp->rcv_nxt; | 333 | tcptw->tw_rcv_nxt = tp->rcv_nxt; |
333 | tcptw->tw_snd_nxt = tp->snd_nxt; | 334 | tcptw->tw_snd_nxt = tp->snd_nxt; |
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 3b5669a2582d..d27c797f9f05 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c | |||
@@ -875,6 +875,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head, | |||
875 | skb_reset_transport_header(skb); | 875 | skb_reset_transport_header(skb); |
876 | __skb_push(skb, skb_gro_offset(skb)); | 876 | __skb_push(skb, skb_gro_offset(skb)); |
877 | 877 | ||
878 | ops = rcu_dereference(inet6_protos[proto]); | ||
878 | if (!ops || !ops->gro_receive) | 879 | if (!ops || !ops->gro_receive) |
879 | goto out_unlock; | 880 | goto out_unlock; |
880 | 881 | ||
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 705c82886281..def0538e2413 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -696,8 +696,10 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, | |||
696 | int err; | 696 | int err; |
697 | 697 | ||
698 | err = ip6mr_fib_lookup(net, &fl6, &mrt); | 698 | err = ip6mr_fib_lookup(net, &fl6, &mrt); |
699 | if (err < 0) | 699 | if (err < 0) { |
700 | kfree_skb(skb); | ||
700 | return err; | 701 | return err; |
702 | } | ||
701 | 703 | ||
702 | read_lock(&mrt_lock); | 704 | read_lock(&mrt_lock); |
703 | dev->stats.tx_bytes += skb->len; | 705 | dev->stats.tx_bytes += skb->len; |
@@ -2052,8 +2054,10 @@ int ip6_mr_input(struct sk_buff *skb) | |||
2052 | int err; | 2054 | int err; |
2053 | 2055 | ||
2054 | err = ip6mr_fib_lookup(net, &fl6, &mrt); | 2056 | err = ip6mr_fib_lookup(net, &fl6, &mrt); |
2055 | if (err < 0) | 2057 | if (err < 0) { |
2058 | kfree_skb(skb); | ||
2056 | return err; | 2059 | return err; |
2060 | } | ||
2057 | 2061 | ||
2058 | read_lock(&mrt_lock); | 2062 | read_lock(&mrt_lock); |
2059 | cache = ip6mr_cache_find(mrt, | 2063 | cache = ip6mr_cache_find(mrt, |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 1250f9020670..fb545edef6ea 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -244,7 +244,9 @@ static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops, | |||
244 | { | 244 | { |
245 | struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, flags); | 245 | struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, flags); |
246 | 246 | ||
247 | memset(&rt->rt6i_table, 0, sizeof(*rt) - sizeof(struct dst_entry)); | 247 | if (rt != NULL) |
248 | memset(&rt->rt6i_table, 0, | ||
249 | sizeof(*rt) - sizeof(struct dst_entry)); | ||
248 | 250 | ||
249 | return rt; | 251 | return rt; |
250 | } | 252 | } |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 3c9fa618b69d..7b8fc5794352 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -591,7 +591,8 @@ static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer, | |||
591 | } | 591 | } |
592 | sk_nocaps_add(sk, NETIF_F_GSO_MASK); | 592 | sk_nocaps_add(sk, NETIF_F_GSO_MASK); |
593 | } | 593 | } |
594 | if (tcp_alloc_md5sig_pool(sk) == NULL) { | 594 | if (tp->md5sig_info->entries6 == 0 && |
595 | tcp_alloc_md5sig_pool(sk) == NULL) { | ||
595 | kfree(newkey); | 596 | kfree(newkey); |
596 | return -ENOMEM; | 597 | return -ENOMEM; |
597 | } | 598 | } |
@@ -600,8 +601,9 @@ static int tcp_v6_md5_do_add(struct sock *sk, const struct in6_addr *peer, | |||
600 | (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC); | 601 | (tp->md5sig_info->entries6 + 1)), GFP_ATOMIC); |
601 | 602 | ||
602 | if (!keys) { | 603 | if (!keys) { |
603 | tcp_free_md5sig_pool(); | ||
604 | kfree(newkey); | 604 | kfree(newkey); |
605 | if (tp->md5sig_info->entries6 == 0) | ||
606 | tcp_free_md5sig_pool(); | ||
605 | return -ENOMEM; | 607 | return -ENOMEM; |
606 | } | 608 | } |
607 | 609 | ||
@@ -647,6 +649,7 @@ static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer) | |||
647 | kfree(tp->md5sig_info->keys6); | 649 | kfree(tp->md5sig_info->keys6); |
648 | tp->md5sig_info->keys6 = NULL; | 650 | tp->md5sig_info->keys6 = NULL; |
649 | tp->md5sig_info->alloced6 = 0; | 651 | tp->md5sig_info->alloced6 = 0; |
652 | tcp_free_md5sig_pool(); | ||
650 | } else { | 653 | } else { |
651 | /* shrink the database */ | 654 | /* shrink the database */ |
652 | if (tp->md5sig_info->entries6 != i) | 655 | if (tp->md5sig_info->entries6 != i) |
@@ -655,7 +658,6 @@ static int tcp_v6_md5_do_del(struct sock *sk, const struct in6_addr *peer) | |||
655 | (tp->md5sig_info->entries6 - i) | 658 | (tp->md5sig_info->entries6 - i) |
656 | * sizeof (tp->md5sig_info->keys6[0])); | 659 | * sizeof (tp->md5sig_info->keys6[0])); |
657 | } | 660 | } |
658 | tcp_free_md5sig_pool(); | ||
659 | return 0; | 661 | return 0; |
660 | } | 662 | } |
661 | } | 663 | } |
@@ -1383,6 +1385,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1383 | newtp->af_specific = &tcp_sock_ipv6_mapped_specific; | 1385 | newtp->af_specific = &tcp_sock_ipv6_mapped_specific; |
1384 | #endif | 1386 | #endif |
1385 | 1387 | ||
1388 | newnp->ipv6_ac_list = NULL; | ||
1389 | newnp->ipv6_fl_list = NULL; | ||
1386 | newnp->pktoptions = NULL; | 1390 | newnp->pktoptions = NULL; |
1387 | newnp->opt = NULL; | 1391 | newnp->opt = NULL; |
1388 | newnp->mcast_oif = inet6_iif(skb); | 1392 | newnp->mcast_oif = inet6_iif(skb); |
@@ -1447,6 +1451,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1447 | First: no IPv4 options. | 1451 | First: no IPv4 options. |
1448 | */ | 1452 | */ |
1449 | newinet->inet_opt = NULL; | 1453 | newinet->inet_opt = NULL; |
1454 | newnp->ipv6_ac_list = NULL; | ||
1450 | newnp->ipv6_fl_list = NULL; | 1455 | newnp->ipv6_fl_list = NULL; |
1451 | 1456 | ||
1452 | /* Clone RX bits */ | 1457 | /* Clone RX bits */ |
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index ad4ac2601a56..34b2ddeacb67 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
@@ -1045,8 +1045,10 @@ int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len | |||
1045 | headroom = NET_SKB_PAD + sizeof(struct iphdr) + | 1045 | headroom = NET_SKB_PAD + sizeof(struct iphdr) + |
1046 | uhlen + hdr_len; | 1046 | uhlen + hdr_len; |
1047 | old_headroom = skb_headroom(skb); | 1047 | old_headroom = skb_headroom(skb); |
1048 | if (skb_cow_head(skb, headroom)) | 1048 | if (skb_cow_head(skb, headroom)) { |
1049 | dev_kfree_skb(skb); | ||
1049 | goto abort; | 1050 | goto abort; |
1051 | } | ||
1050 | 1052 | ||
1051 | new_headroom = skb_headroom(skb); | 1053 | new_headroom = skb_headroom(skb); |
1052 | skb_orphan(skb); | 1054 | skb_orphan(skb); |
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 2b771dc708a3..e3be48bf4dcd 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c | |||
@@ -2283,6 +2283,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) | |||
2283 | struct ip_vs_service *svc; | 2283 | struct ip_vs_service *svc; |
2284 | struct ip_vs_dest_user *udest_compat; | 2284 | struct ip_vs_dest_user *udest_compat; |
2285 | struct ip_vs_dest_user_kern udest; | 2285 | struct ip_vs_dest_user_kern udest; |
2286 | struct netns_ipvs *ipvs = net_ipvs(net); | ||
2286 | 2287 | ||
2287 | if (!capable(CAP_NET_ADMIN)) | 2288 | if (!capable(CAP_NET_ADMIN)) |
2288 | return -EPERM; | 2289 | return -EPERM; |
@@ -2303,6 +2304,24 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) | |||
2303 | /* increase the module use count */ | 2304 | /* increase the module use count */ |
2304 | ip_vs_use_count_inc(); | 2305 | ip_vs_use_count_inc(); |
2305 | 2306 | ||
2307 | /* Handle daemons since they have another lock */ | ||
2308 | if (cmd == IP_VS_SO_SET_STARTDAEMON || | ||
2309 | cmd == IP_VS_SO_SET_STOPDAEMON) { | ||
2310 | struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg; | ||
2311 | |||
2312 | if (mutex_lock_interruptible(&ipvs->sync_mutex)) { | ||
2313 | ret = -ERESTARTSYS; | ||
2314 | goto out_dec; | ||
2315 | } | ||
2316 | if (cmd == IP_VS_SO_SET_STARTDAEMON) | ||
2317 | ret = start_sync_thread(net, dm->state, dm->mcast_ifn, | ||
2318 | dm->syncid); | ||
2319 | else | ||
2320 | ret = stop_sync_thread(net, dm->state); | ||
2321 | mutex_unlock(&ipvs->sync_mutex); | ||
2322 | goto out_dec; | ||
2323 | } | ||
2324 | |||
2306 | if (mutex_lock_interruptible(&__ip_vs_mutex)) { | 2325 | if (mutex_lock_interruptible(&__ip_vs_mutex)) { |
2307 | ret = -ERESTARTSYS; | 2326 | ret = -ERESTARTSYS; |
2308 | goto out_dec; | 2327 | goto out_dec; |
@@ -2316,15 +2335,6 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) | |||
2316 | /* Set timeout values for (tcp tcpfin udp) */ | 2335 | /* Set timeout values for (tcp tcpfin udp) */ |
2317 | ret = ip_vs_set_timeout(net, (struct ip_vs_timeout_user *)arg); | 2336 | ret = ip_vs_set_timeout(net, (struct ip_vs_timeout_user *)arg); |
2318 | goto out_unlock; | 2337 | goto out_unlock; |
2319 | } else if (cmd == IP_VS_SO_SET_STARTDAEMON) { | ||
2320 | struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg; | ||
2321 | ret = start_sync_thread(net, dm->state, dm->mcast_ifn, | ||
2322 | dm->syncid); | ||
2323 | goto out_unlock; | ||
2324 | } else if (cmd == IP_VS_SO_SET_STOPDAEMON) { | ||
2325 | struct ip_vs_daemon_user *dm = (struct ip_vs_daemon_user *)arg; | ||
2326 | ret = stop_sync_thread(net, dm->state); | ||
2327 | goto out_unlock; | ||
2328 | } | 2338 | } |
2329 | 2339 | ||
2330 | usvc_compat = (struct ip_vs_service_user *)arg; | 2340 | usvc_compat = (struct ip_vs_service_user *)arg; |
@@ -2584,6 +2594,33 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) | |||
2584 | 2594 | ||
2585 | if (copy_from_user(arg, user, copylen) != 0) | 2595 | if (copy_from_user(arg, user, copylen) != 0) |
2586 | return -EFAULT; | 2596 | return -EFAULT; |
2597 | /* | ||
2598 | * Handle daemons first since it has its own locking | ||
2599 | */ | ||
2600 | if (cmd == IP_VS_SO_GET_DAEMON) { | ||
2601 | struct ip_vs_daemon_user d[2]; | ||
2602 | |||
2603 | memset(&d, 0, sizeof(d)); | ||
2604 | if (mutex_lock_interruptible(&ipvs->sync_mutex)) | ||
2605 | return -ERESTARTSYS; | ||
2606 | |||
2607 | if (ipvs->sync_state & IP_VS_STATE_MASTER) { | ||
2608 | d[0].state = IP_VS_STATE_MASTER; | ||
2609 | strlcpy(d[0].mcast_ifn, ipvs->master_mcast_ifn, | ||
2610 | sizeof(d[0].mcast_ifn)); | ||
2611 | d[0].syncid = ipvs->master_syncid; | ||
2612 | } | ||
2613 | if (ipvs->sync_state & IP_VS_STATE_BACKUP) { | ||
2614 | d[1].state = IP_VS_STATE_BACKUP; | ||
2615 | strlcpy(d[1].mcast_ifn, ipvs->backup_mcast_ifn, | ||
2616 | sizeof(d[1].mcast_ifn)); | ||
2617 | d[1].syncid = ipvs->backup_syncid; | ||
2618 | } | ||
2619 | if (copy_to_user(user, &d, sizeof(d)) != 0) | ||
2620 | ret = -EFAULT; | ||
2621 | mutex_unlock(&ipvs->sync_mutex); | ||
2622 | return ret; | ||
2623 | } | ||
2587 | 2624 | ||
2588 | if (mutex_lock_interruptible(&__ip_vs_mutex)) | 2625 | if (mutex_lock_interruptible(&__ip_vs_mutex)) |
2589 | return -ERESTARTSYS; | 2626 | return -ERESTARTSYS; |
@@ -2681,28 +2718,6 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len) | |||
2681 | } | 2718 | } |
2682 | break; | 2719 | break; |
2683 | 2720 | ||
2684 | case IP_VS_SO_GET_DAEMON: | ||
2685 | { | ||
2686 | struct ip_vs_daemon_user d[2]; | ||
2687 | |||
2688 | memset(&d, 0, sizeof(d)); | ||
2689 | if (ipvs->sync_state & IP_VS_STATE_MASTER) { | ||
2690 | d[0].state = IP_VS_STATE_MASTER; | ||
2691 | strlcpy(d[0].mcast_ifn, ipvs->master_mcast_ifn, | ||
2692 | sizeof(d[0].mcast_ifn)); | ||
2693 | d[0].syncid = ipvs->master_syncid; | ||
2694 | } | ||
2695 | if (ipvs->sync_state & IP_VS_STATE_BACKUP) { | ||
2696 | d[1].state = IP_VS_STATE_BACKUP; | ||
2697 | strlcpy(d[1].mcast_ifn, ipvs->backup_mcast_ifn, | ||
2698 | sizeof(d[1].mcast_ifn)); | ||
2699 | d[1].syncid = ipvs->backup_syncid; | ||
2700 | } | ||
2701 | if (copy_to_user(user, &d, sizeof(d)) != 0) | ||
2702 | ret = -EFAULT; | ||
2703 | } | ||
2704 | break; | ||
2705 | |||
2706 | default: | 2721 | default: |
2707 | ret = -EINVAL; | 2722 | ret = -EINVAL; |
2708 | } | 2723 | } |
@@ -3205,7 +3220,7 @@ static int ip_vs_genl_dump_daemons(struct sk_buff *skb, | |||
3205 | struct net *net = skb_sknet(skb); | 3220 | struct net *net = skb_sknet(skb); |
3206 | struct netns_ipvs *ipvs = net_ipvs(net); | 3221 | struct netns_ipvs *ipvs = net_ipvs(net); |
3207 | 3222 | ||
3208 | mutex_lock(&__ip_vs_mutex); | 3223 | mutex_lock(&ipvs->sync_mutex); |
3209 | if ((ipvs->sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) { | 3224 | if ((ipvs->sync_state & IP_VS_STATE_MASTER) && !cb->args[0]) { |
3210 | if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_MASTER, | 3225 | if (ip_vs_genl_dump_daemon(skb, IP_VS_STATE_MASTER, |
3211 | ipvs->master_mcast_ifn, | 3226 | ipvs->master_mcast_ifn, |
@@ -3225,7 +3240,7 @@ static int ip_vs_genl_dump_daemons(struct sk_buff *skb, | |||
3225 | } | 3240 | } |
3226 | 3241 | ||
3227 | nla_put_failure: | 3242 | nla_put_failure: |
3228 | mutex_unlock(&__ip_vs_mutex); | 3243 | mutex_unlock(&ipvs->sync_mutex); |
3229 | 3244 | ||
3230 | return skb->len; | 3245 | return skb->len; |
3231 | } | 3246 | } |
@@ -3271,13 +3286,9 @@ static int ip_vs_genl_set_config(struct net *net, struct nlattr **attrs) | |||
3271 | return ip_vs_set_timeout(net, &t); | 3286 | return ip_vs_set_timeout(net, &t); |
3272 | } | 3287 | } |
3273 | 3288 | ||
3274 | static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info) | 3289 | static int ip_vs_genl_set_daemon(struct sk_buff *skb, struct genl_info *info) |
3275 | { | 3290 | { |
3276 | struct ip_vs_service *svc = NULL; | ||
3277 | struct ip_vs_service_user_kern usvc; | ||
3278 | struct ip_vs_dest_user_kern udest; | ||
3279 | int ret = 0, cmd; | 3291 | int ret = 0, cmd; |
3280 | int need_full_svc = 0, need_full_dest = 0; | ||
3281 | struct net *net; | 3292 | struct net *net; |
3282 | struct netns_ipvs *ipvs; | 3293 | struct netns_ipvs *ipvs; |
3283 | 3294 | ||
@@ -3285,19 +3296,10 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info) | |||
3285 | ipvs = net_ipvs(net); | 3296 | ipvs = net_ipvs(net); |
3286 | cmd = info->genlhdr->cmd; | 3297 | cmd = info->genlhdr->cmd; |
3287 | 3298 | ||
3288 | mutex_lock(&__ip_vs_mutex); | 3299 | if (cmd == IPVS_CMD_NEW_DAEMON || cmd == IPVS_CMD_DEL_DAEMON) { |
3289 | |||
3290 | if (cmd == IPVS_CMD_FLUSH) { | ||
3291 | ret = ip_vs_flush(net); | ||
3292 | goto out; | ||
3293 | } else if (cmd == IPVS_CMD_SET_CONFIG) { | ||
3294 | ret = ip_vs_genl_set_config(net, info->attrs); | ||
3295 | goto out; | ||
3296 | } else if (cmd == IPVS_CMD_NEW_DAEMON || | ||
3297 | cmd == IPVS_CMD_DEL_DAEMON) { | ||
3298 | |||
3299 | struct nlattr *daemon_attrs[IPVS_DAEMON_ATTR_MAX + 1]; | 3300 | struct nlattr *daemon_attrs[IPVS_DAEMON_ATTR_MAX + 1]; |
3300 | 3301 | ||
3302 | mutex_lock(&ipvs->sync_mutex); | ||
3301 | if (!info->attrs[IPVS_CMD_ATTR_DAEMON] || | 3303 | if (!info->attrs[IPVS_CMD_ATTR_DAEMON] || |
3302 | nla_parse_nested(daemon_attrs, IPVS_DAEMON_ATTR_MAX, | 3304 | nla_parse_nested(daemon_attrs, IPVS_DAEMON_ATTR_MAX, |
3303 | info->attrs[IPVS_CMD_ATTR_DAEMON], | 3305 | info->attrs[IPVS_CMD_ATTR_DAEMON], |
@@ -3310,6 +3312,33 @@ static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info) | |||
3310 | ret = ip_vs_genl_new_daemon(net, daemon_attrs); | 3312 | ret = ip_vs_genl_new_daemon(net, daemon_attrs); |
3311 | else | 3313 | else |
3312 | ret = ip_vs_genl_del_daemon(net, daemon_attrs); | 3314 | ret = ip_vs_genl_del_daemon(net, daemon_attrs); |
3315 | out: | ||
3316 | mutex_unlock(&ipvs->sync_mutex); | ||
3317 | } | ||
3318 | return ret; | ||
3319 | } | ||
3320 | |||
3321 | static int ip_vs_genl_set_cmd(struct sk_buff *skb, struct genl_info *info) | ||
3322 | { | ||
3323 | struct ip_vs_service *svc = NULL; | ||
3324 | struct ip_vs_service_user_kern usvc; | ||
3325 | struct ip_vs_dest_user_kern udest; | ||
3326 | int ret = 0, cmd; | ||
3327 | int need_full_svc = 0, need_full_dest = 0; | ||
3328 | struct net *net; | ||
3329 | struct netns_ipvs *ipvs; | ||
3330 | |||
3331 | net = skb_sknet(skb); | ||
3332 | ipvs = net_ipvs(net); | ||
3333 | cmd = info->genlhdr->cmd; | ||
3334 | |||
3335 | mutex_lock(&__ip_vs_mutex); | ||
3336 | |||
3337 | if (cmd == IPVS_CMD_FLUSH) { | ||
3338 | ret = ip_vs_flush(net); | ||
3339 | goto out; | ||
3340 | } else if (cmd == IPVS_CMD_SET_CONFIG) { | ||
3341 | ret = ip_vs_genl_set_config(net, info->attrs); | ||
3313 | goto out; | 3342 | goto out; |
3314 | } else if (cmd == IPVS_CMD_ZERO && | 3343 | } else if (cmd == IPVS_CMD_ZERO && |
3315 | !info->attrs[IPVS_CMD_ATTR_SERVICE]) { | 3344 | !info->attrs[IPVS_CMD_ATTR_SERVICE]) { |
@@ -3536,13 +3565,13 @@ static struct genl_ops ip_vs_genl_ops[] __read_mostly = { | |||
3536 | .cmd = IPVS_CMD_NEW_DAEMON, | 3565 | .cmd = IPVS_CMD_NEW_DAEMON, |
3537 | .flags = GENL_ADMIN_PERM, | 3566 | .flags = GENL_ADMIN_PERM, |
3538 | .policy = ip_vs_cmd_policy, | 3567 | .policy = ip_vs_cmd_policy, |
3539 | .doit = ip_vs_genl_set_cmd, | 3568 | .doit = ip_vs_genl_set_daemon, |
3540 | }, | 3569 | }, |
3541 | { | 3570 | { |
3542 | .cmd = IPVS_CMD_DEL_DAEMON, | 3571 | .cmd = IPVS_CMD_DEL_DAEMON, |
3543 | .flags = GENL_ADMIN_PERM, | 3572 | .flags = GENL_ADMIN_PERM, |
3544 | .policy = ip_vs_cmd_policy, | 3573 | .policy = ip_vs_cmd_policy, |
3545 | .doit = ip_vs_genl_set_cmd, | 3574 | .doit = ip_vs_genl_set_daemon, |
3546 | }, | 3575 | }, |
3547 | { | 3576 | { |
3548 | .cmd = IPVS_CMD_GET_DAEMON, | 3577 | .cmd = IPVS_CMD_GET_DAEMON, |
@@ -3679,7 +3708,7 @@ int __net_init ip_vs_control_net_init(struct net *net) | |||
3679 | int idx; | 3708 | int idx; |
3680 | struct netns_ipvs *ipvs = net_ipvs(net); | 3709 | struct netns_ipvs *ipvs = net_ipvs(net); |
3681 | 3710 | ||
3682 | ipvs->rs_lock = __RW_LOCK_UNLOCKED(ipvs->rs_lock); | 3711 | rwlock_init(&ipvs->rs_lock); |
3683 | 3712 | ||
3684 | /* Initialize rs_table */ | 3713 | /* Initialize rs_table */ |
3685 | for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++) | 3714 | for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++) |
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index 7ee7215b8ba0..3cdd479f9b5d 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c | |||
@@ -61,6 +61,7 @@ | |||
61 | 61 | ||
62 | #define SYNC_PROTO_VER 1 /* Protocol version in header */ | 62 | #define SYNC_PROTO_VER 1 /* Protocol version in header */ |
63 | 63 | ||
64 | static struct lock_class_key __ipvs_sync_key; | ||
64 | /* | 65 | /* |
65 | * IPVS sync connection entry | 66 | * IPVS sync connection entry |
66 | * Version 0, i.e. original version. | 67 | * Version 0, i.e. original version. |
@@ -1545,6 +1546,7 @@ int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid) | |||
1545 | IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n", | 1546 | IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n", |
1546 | sizeof(struct ip_vs_sync_conn_v0)); | 1547 | sizeof(struct ip_vs_sync_conn_v0)); |
1547 | 1548 | ||
1549 | |||
1548 | if (state == IP_VS_STATE_MASTER) { | 1550 | if (state == IP_VS_STATE_MASTER) { |
1549 | if (ipvs->master_thread) | 1551 | if (ipvs->master_thread) |
1550 | return -EEXIST; | 1552 | return -EEXIST; |
@@ -1667,6 +1669,7 @@ int __net_init ip_vs_sync_net_init(struct net *net) | |||
1667 | { | 1669 | { |
1668 | struct netns_ipvs *ipvs = net_ipvs(net); | 1670 | struct netns_ipvs *ipvs = net_ipvs(net); |
1669 | 1671 | ||
1672 | __mutex_init(&ipvs->sync_mutex, "ipvs->sync_mutex", &__ipvs_sync_key); | ||
1670 | INIT_LIST_HEAD(&ipvs->sync_queue); | 1673 | INIT_LIST_HEAD(&ipvs->sync_queue); |
1671 | spin_lock_init(&ipvs->sync_lock); | 1674 | spin_lock_init(&ipvs->sync_lock); |
1672 | spin_lock_init(&ipvs->sync_buff_lock); | 1675 | spin_lock_init(&ipvs->sync_buff_lock); |
@@ -1680,7 +1683,9 @@ int __net_init ip_vs_sync_net_init(struct net *net) | |||
1680 | void ip_vs_sync_net_cleanup(struct net *net) | 1683 | void ip_vs_sync_net_cleanup(struct net *net) |
1681 | { | 1684 | { |
1682 | int retc; | 1685 | int retc; |
1686 | struct netns_ipvs *ipvs = net_ipvs(net); | ||
1683 | 1687 | ||
1688 | mutex_lock(&ipvs->sync_mutex); | ||
1684 | retc = stop_sync_thread(net, IP_VS_STATE_MASTER); | 1689 | retc = stop_sync_thread(net, IP_VS_STATE_MASTER); |
1685 | if (retc && retc != -ESRCH) | 1690 | if (retc && retc != -ESRCH) |
1686 | pr_err("Failed to stop Master Daemon\n"); | 1691 | pr_err("Failed to stop Master Daemon\n"); |
@@ -1688,4 +1693,5 @@ void ip_vs_sync_net_cleanup(struct net *net) | |||
1688 | retc = stop_sync_thread(net, IP_VS_STATE_BACKUP); | 1693 | retc = stop_sync_thread(net, IP_VS_STATE_BACKUP); |
1689 | if (retc && retc != -ESRCH) | 1694 | if (retc && retc != -ESRCH) |
1690 | pr_err("Failed to stop Backup Daemon\n"); | 1695 | pr_err("Failed to stop Backup Daemon\n"); |
1696 | mutex_unlock(&ipvs->sync_mutex); | ||
1691 | } | 1697 | } |
diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c index cf616e55ca41..d69facdd9a7a 100644 --- a/net/netfilter/nf_conntrack_proto_gre.c +++ b/net/netfilter/nf_conntrack_proto_gre.c | |||
@@ -241,8 +241,8 @@ static int gre_packet(struct nf_conn *ct, | |||
241 | nf_ct_refresh_acct(ct, ctinfo, skb, | 241 | nf_ct_refresh_acct(ct, ctinfo, skb, |
242 | ct->proto.gre.stream_timeout); | 242 | ct->proto.gre.stream_timeout); |
243 | /* Also, more likely to be important, and not a probe. */ | 243 | /* Also, more likely to be important, and not a probe. */ |
244 | set_bit(IPS_ASSURED_BIT, &ct->status); | 244 | if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) |
245 | nf_conntrack_event_cache(IPCT_ASSURED, ct); | 245 | nf_conntrack_event_cache(IPCT_ASSURED, ct); |
246 | } else | 246 | } else |
247 | nf_ct_refresh_acct(ct, ctinfo, skb, | 247 | nf_ct_refresh_acct(ct, ctinfo, skb, |
248 | ct->proto.gre.timeout); | 248 | ct->proto.gre.timeout); |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index c698cec0a445..fabb4fafa281 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -961,7 +961,10 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, | |||
961 | return 0; | 961 | return 0; |
962 | 962 | ||
963 | drop_n_acct: | 963 | drop_n_acct: |
964 | po->stats.tp_drops = atomic_inc_return(&sk->sk_drops); | 964 | spin_lock(&sk->sk_receive_queue.lock); |
965 | po->stats.tp_drops++; | ||
966 | atomic_inc(&sk->sk_drops); | ||
967 | spin_unlock(&sk->sk_receive_queue.lock); | ||
965 | 968 | ||
966 | drop_n_restore: | 969 | drop_n_restore: |
967 | if (skb_head != skb->data && skb_shared(skb)) { | 970 | if (skb_head != skb->data && skb_shared(skb)) { |
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c index 8b77edbab272..4e1de171866c 100644 --- a/net/rds/iw_rdma.c +++ b/net/rds/iw_rdma.c | |||
@@ -84,7 +84,8 @@ static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool, | |||
84 | static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); | 84 | static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); |
85 | static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, | 85 | static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, |
86 | struct list_head *unmap_list, | 86 | struct list_head *unmap_list, |
87 | struct list_head *kill_list); | 87 | struct list_head *kill_list, |
88 | int *unpinned); | ||
88 | static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); | 89 | static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); |
89 | 90 | ||
90 | static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id) | 91 | static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id) |
@@ -499,7 +500,7 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all) | |||
499 | LIST_HEAD(unmap_list); | 500 | LIST_HEAD(unmap_list); |
500 | LIST_HEAD(kill_list); | 501 | LIST_HEAD(kill_list); |
501 | unsigned long flags; | 502 | unsigned long flags; |
502 | unsigned int nfreed = 0, ncleaned = 0, free_goal; | 503 | unsigned int nfreed = 0, ncleaned = 0, unpinned = 0, free_goal; |
503 | int ret = 0; | 504 | int ret = 0; |
504 | 505 | ||
505 | rds_iw_stats_inc(s_iw_rdma_mr_pool_flush); | 506 | rds_iw_stats_inc(s_iw_rdma_mr_pool_flush); |
@@ -524,7 +525,8 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all) | |||
524 | * will be destroyed by the unmap function. | 525 | * will be destroyed by the unmap function. |
525 | */ | 526 | */ |
526 | if (!list_empty(&unmap_list)) { | 527 | if (!list_empty(&unmap_list)) { |
527 | ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list, &kill_list); | 528 | ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list, |
529 | &kill_list, &unpinned); | ||
528 | /* If we've been asked to destroy all MRs, move those | 530 | /* If we've been asked to destroy all MRs, move those |
529 | * that were simply cleaned to the kill list */ | 531 | * that were simply cleaned to the kill list */ |
530 | if (free_all) | 532 | if (free_all) |
@@ -548,6 +550,7 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all) | |||
548 | spin_unlock_irqrestore(&pool->list_lock, flags); | 550 | spin_unlock_irqrestore(&pool->list_lock, flags); |
549 | } | 551 | } |
550 | 552 | ||
553 | atomic_sub(unpinned, &pool->free_pinned); | ||
551 | atomic_sub(ncleaned, &pool->dirty_count); | 554 | atomic_sub(ncleaned, &pool->dirty_count); |
552 | atomic_sub(nfreed, &pool->item_count); | 555 | atomic_sub(nfreed, &pool->item_count); |
553 | 556 | ||
@@ -828,7 +831,8 @@ static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, | |||
828 | 831 | ||
829 | static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, | 832 | static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, |
830 | struct list_head *unmap_list, | 833 | struct list_head *unmap_list, |
831 | struct list_head *kill_list) | 834 | struct list_head *kill_list, |
835 | int *unpinned) | ||
832 | { | 836 | { |
833 | struct rds_iw_mapping *mapping, *next; | 837 | struct rds_iw_mapping *mapping, *next; |
834 | unsigned int ncleaned = 0; | 838 | unsigned int ncleaned = 0; |
@@ -855,6 +859,7 @@ static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, | |||
855 | 859 | ||
856 | spin_lock_irqsave(&pool->list_lock, flags); | 860 | spin_lock_irqsave(&pool->list_lock, flags); |
857 | list_for_each_entry_safe(mapping, next, unmap_list, m_list) { | 861 | list_for_each_entry_safe(mapping, next, unmap_list, m_list) { |
862 | *unpinned += mapping->m_sg.len; | ||
858 | list_move(&mapping->m_list, &laundered); | 863 | list_move(&mapping->m_list, &laundered); |
859 | ncleaned++; | 864 | ncleaned++; |
860 | } | 865 | } |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index e83e7fee3bc0..ea40d540a990 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -4113,9 +4113,12 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev, | |||
4113 | if (len % sizeof(u32)) | 4113 | if (len % sizeof(u32)) |
4114 | return -EINVAL; | 4114 | return -EINVAL; |
4115 | 4115 | ||
4116 | if (settings->n_akm_suites > NL80211_MAX_NR_AKM_SUITES) | ||
4117 | return -EINVAL; | ||
4118 | |||
4116 | memcpy(settings->akm_suites, data, len); | 4119 | memcpy(settings->akm_suites, data, len); |
4117 | 4120 | ||
4118 | for (i = 0; i < settings->n_ciphers_pairwise; i++) | 4121 | for (i = 0; i < settings->n_akm_suites; i++) |
4119 | if (!nl80211_valid_akm_suite(settings->akm_suites[i])) | 4122 | if (!nl80211_valid_akm_suite(settings->akm_suites[i])) |
4120 | return -EINVAL; | 4123 | return -EINVAL; |
4121 | } | 4124 | } |
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index d30615419b4d..5f03e4ea65bf 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c | |||
@@ -91,7 +91,7 @@ int x25_parse_address_block(struct sk_buff *skb, | |||
91 | int needed; | 91 | int needed; |
92 | int rc; | 92 | int rc; |
93 | 93 | ||
94 | if (skb->len < 1) { | 94 | if (!pskb_may_pull(skb, 1)) { |
95 | /* packet has no address block */ | 95 | /* packet has no address block */ |
96 | rc = 0; | 96 | rc = 0; |
97 | goto empty; | 97 | goto empty; |
@@ -100,7 +100,7 @@ int x25_parse_address_block(struct sk_buff *skb, | |||
100 | len = *skb->data; | 100 | len = *skb->data; |
101 | needed = 1 + (len >> 4) + (len & 0x0f); | 101 | needed = 1 + (len >> 4) + (len & 0x0f); |
102 | 102 | ||
103 | if (skb->len < needed) { | 103 | if (!pskb_may_pull(skb, needed)) { |
104 | /* packet is too short to hold the addresses it claims | 104 | /* packet is too short to hold the addresses it claims |
105 | to hold */ | 105 | to hold */ |
106 | rc = -1; | 106 | rc = -1; |
@@ -295,7 +295,8 @@ static struct sock *x25_find_listener(struct x25_address *addr, | |||
295 | * Found a listening socket, now check the incoming | 295 | * Found a listening socket, now check the incoming |
296 | * call user data vs this sockets call user data | 296 | * call user data vs this sockets call user data |
297 | */ | 297 | */ |
298 | if(skb->len > 0 && x25_sk(s)->cudmatchlength > 0) { | 298 | if (x25_sk(s)->cudmatchlength > 0 && |
299 | skb->len >= x25_sk(s)->cudmatchlength) { | ||
299 | if((memcmp(x25_sk(s)->calluserdata.cuddata, | 300 | if((memcmp(x25_sk(s)->calluserdata.cuddata, |
300 | skb->data, | 301 | skb->data, |
301 | x25_sk(s)->cudmatchlength)) == 0) { | 302 | x25_sk(s)->cudmatchlength)) == 0) { |
@@ -951,14 +952,27 @@ int x25_rx_call_request(struct sk_buff *skb, struct x25_neigh *nb, | |||
951 | * | 952 | * |
952 | * Facilities length is mandatory in call request packets | 953 | * Facilities length is mandatory in call request packets |
953 | */ | 954 | */ |
954 | if (skb->len < 1) | 955 | if (!pskb_may_pull(skb, 1)) |
955 | goto out_clear_request; | 956 | goto out_clear_request; |
956 | len = skb->data[0] + 1; | 957 | len = skb->data[0] + 1; |
957 | if (skb->len < len) | 958 | if (!pskb_may_pull(skb, len)) |
958 | goto out_clear_request; | 959 | goto out_clear_request; |
959 | skb_pull(skb,len); | 960 | skb_pull(skb,len); |
960 | 961 | ||
961 | /* | 962 | /* |
963 | * Ensure that the amount of call user data is valid. | ||
964 | */ | ||
965 | if (skb->len > X25_MAX_CUD_LEN) | ||
966 | goto out_clear_request; | ||
967 | |||
968 | /* | ||
969 | * Get all the call user data so it can be used in | ||
970 | * x25_find_listener and skb_copy_from_linear_data up ahead. | ||
971 | */ | ||
972 | if (!pskb_may_pull(skb, skb->len)) | ||
973 | goto out_clear_request; | ||
974 | |||
975 | /* | ||
962 | * Find a listener for the particular address/cud pair. | 976 | * Find a listener for the particular address/cud pair. |
963 | */ | 977 | */ |
964 | sk = x25_find_listener(&source_addr,skb); | 978 | sk = x25_find_listener(&source_addr,skb); |
@@ -1166,6 +1180,9 @@ static int x25_sendmsg(struct kiocb *iocb, struct socket *sock, | |||
1166 | * byte of the user data is the logical value of the Q Bit. | 1180 | * byte of the user data is the logical value of the Q Bit. |
1167 | */ | 1181 | */ |
1168 | if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { | 1182 | if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { |
1183 | if (!pskb_may_pull(skb, 1)) | ||
1184 | goto out_kfree_skb; | ||
1185 | |||
1169 | qbit = skb->data[0]; | 1186 | qbit = skb->data[0]; |
1170 | skb_pull(skb, 1); | 1187 | skb_pull(skb, 1); |
1171 | } | 1188 | } |
@@ -1244,7 +1261,9 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1244 | struct x25_sock *x25 = x25_sk(sk); | 1261 | struct x25_sock *x25 = x25_sk(sk); |
1245 | struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)msg->msg_name; | 1262 | struct sockaddr_x25 *sx25 = (struct sockaddr_x25 *)msg->msg_name; |
1246 | size_t copied; | 1263 | size_t copied; |
1247 | int qbit; | 1264 | int qbit, header_len = x25->neighbour->extended ? |
1265 | X25_EXT_MIN_LEN : X25_STD_MIN_LEN; | ||
1266 | |||
1248 | struct sk_buff *skb; | 1267 | struct sk_buff *skb; |
1249 | unsigned char *asmptr; | 1268 | unsigned char *asmptr; |
1250 | int rc = -ENOTCONN; | 1269 | int rc = -ENOTCONN; |
@@ -1265,6 +1284,9 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1265 | 1284 | ||
1266 | skb = skb_dequeue(&x25->interrupt_in_queue); | 1285 | skb = skb_dequeue(&x25->interrupt_in_queue); |
1267 | 1286 | ||
1287 | if (!pskb_may_pull(skb, X25_STD_MIN_LEN)) | ||
1288 | goto out_free_dgram; | ||
1289 | |||
1268 | skb_pull(skb, X25_STD_MIN_LEN); | 1290 | skb_pull(skb, X25_STD_MIN_LEN); |
1269 | 1291 | ||
1270 | /* | 1292 | /* |
@@ -1285,10 +1307,12 @@ static int x25_recvmsg(struct kiocb *iocb, struct socket *sock, | |||
1285 | if (!skb) | 1307 | if (!skb) |
1286 | goto out; | 1308 | goto out; |
1287 | 1309 | ||
1310 | if (!pskb_may_pull(skb, header_len)) | ||
1311 | goto out_free_dgram; | ||
1312 | |||
1288 | qbit = (skb->data[0] & X25_Q_BIT) == X25_Q_BIT; | 1313 | qbit = (skb->data[0] & X25_Q_BIT) == X25_Q_BIT; |
1289 | 1314 | ||
1290 | skb_pull(skb, x25->neighbour->extended ? | 1315 | skb_pull(skb, header_len); |
1291 | X25_EXT_MIN_LEN : X25_STD_MIN_LEN); | ||
1292 | 1316 | ||
1293 | if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { | 1317 | if (test_bit(X25_Q_BIT_FLAG, &x25->flags)) { |
1294 | asmptr = skb_push(skb, 1); | 1318 | asmptr = skb_push(skb, 1); |
diff --git a/net/x25/x25_dev.c b/net/x25/x25_dev.c index e547ca1578c3..fa2b41888bd9 100644 --- a/net/x25/x25_dev.c +++ b/net/x25/x25_dev.c | |||
@@ -32,6 +32,9 @@ static int x25_receive_data(struct sk_buff *skb, struct x25_neigh *nb) | |||
32 | unsigned short frametype; | 32 | unsigned short frametype; |
33 | unsigned int lci; | 33 | unsigned int lci; |
34 | 34 | ||
35 | if (!pskb_may_pull(skb, X25_STD_MIN_LEN)) | ||
36 | return 0; | ||
37 | |||
35 | frametype = skb->data[2]; | 38 | frametype = skb->data[2]; |
36 | lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); | 39 | lci = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); |
37 | 40 | ||
@@ -115,6 +118,9 @@ int x25_lapb_receive_frame(struct sk_buff *skb, struct net_device *dev, | |||
115 | goto drop; | 118 | goto drop; |
116 | } | 119 | } |
117 | 120 | ||
121 | if (!pskb_may_pull(skb, 1)) | ||
122 | return 0; | ||
123 | |||
118 | switch (skb->data[0]) { | 124 | switch (skb->data[0]) { |
119 | 125 | ||
120 | case X25_IFACE_DATA: | 126 | case X25_IFACE_DATA: |
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c index f77e4e75f914..36384a1fa9f2 100644 --- a/net/x25/x25_facilities.c +++ b/net/x25/x25_facilities.c | |||
@@ -44,7 +44,7 @@ | |||
44 | int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, | 44 | int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, |
45 | struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask) | 45 | struct x25_dte_facilities *dte_facs, unsigned long *vc_fac_mask) |
46 | { | 46 | { |
47 | unsigned char *p = skb->data; | 47 | unsigned char *p; |
48 | unsigned int len; | 48 | unsigned int len; |
49 | 49 | ||
50 | *vc_fac_mask = 0; | 50 | *vc_fac_mask = 0; |
@@ -60,14 +60,16 @@ int x25_parse_facilities(struct sk_buff *skb, struct x25_facilities *facilities, | |||
60 | memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae)); | 60 | memset(dte_facs->called_ae, '\0', sizeof(dte_facs->called_ae)); |
61 | memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae)); | 61 | memset(dte_facs->calling_ae, '\0', sizeof(dte_facs->calling_ae)); |
62 | 62 | ||
63 | if (skb->len < 1) | 63 | if (!pskb_may_pull(skb, 1)) |
64 | return 0; | 64 | return 0; |
65 | 65 | ||
66 | len = *p++; | 66 | len = skb->data[0]; |
67 | 67 | ||
68 | if (len >= skb->len) | 68 | if (!pskb_may_pull(skb, 1 + len)) |
69 | return -1; | 69 | return -1; |
70 | 70 | ||
71 | p = skb->data + 1; | ||
72 | |||
71 | while (len > 0) { | 73 | while (len > 0) { |
72 | switch (*p & X25_FAC_CLASS_MASK) { | 74 | switch (*p & X25_FAC_CLASS_MASK) { |
73 | case X25_FAC_CLASS_A: | 75 | case X25_FAC_CLASS_A: |
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c index 0b073b51b183..a49cd4ec551a 100644 --- a/net/x25/x25_in.c +++ b/net/x25/x25_in.c | |||
@@ -107,6 +107,8 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp | |||
107 | /* | 107 | /* |
108 | * Parse the data in the frame. | 108 | * Parse the data in the frame. |
109 | */ | 109 | */ |
110 | if (!pskb_may_pull(skb, X25_STD_MIN_LEN)) | ||
111 | goto out_clear; | ||
110 | skb_pull(skb, X25_STD_MIN_LEN); | 112 | skb_pull(skb, X25_STD_MIN_LEN); |
111 | 113 | ||
112 | len = x25_parse_address_block(skb, &source_addr, | 114 | len = x25_parse_address_block(skb, &source_addr, |
@@ -127,9 +129,11 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp | |||
127 | * Copy any Call User Data. | 129 | * Copy any Call User Data. |
128 | */ | 130 | */ |
129 | if (skb->len > 0) { | 131 | if (skb->len > 0) { |
130 | skb_copy_from_linear_data(skb, | 132 | if (skb->len > X25_MAX_CUD_LEN) |
131 | x25->calluserdata.cuddata, | 133 | goto out_clear; |
132 | skb->len); | 134 | |
135 | skb_copy_bits(skb, 0, x25->calluserdata.cuddata, | ||
136 | skb->len); | ||
133 | x25->calluserdata.cudlength = skb->len; | 137 | x25->calluserdata.cudlength = skb->len; |
134 | } | 138 | } |
135 | if (!sock_flag(sk, SOCK_DEAD)) | 139 | if (!sock_flag(sk, SOCK_DEAD)) |
@@ -137,6 +141,9 @@ static int x25_state1_machine(struct sock *sk, struct sk_buff *skb, int frametyp | |||
137 | break; | 141 | break; |
138 | } | 142 | } |
139 | case X25_CLEAR_REQUEST: | 143 | case X25_CLEAR_REQUEST: |
144 | if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2)) | ||
145 | goto out_clear; | ||
146 | |||
140 | x25_write_internal(sk, X25_CLEAR_CONFIRMATION); | 147 | x25_write_internal(sk, X25_CLEAR_CONFIRMATION); |
141 | x25_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]); | 148 | x25_disconnect(sk, ECONNREFUSED, skb->data[3], skb->data[4]); |
142 | break; | 149 | break; |
@@ -164,6 +171,9 @@ static int x25_state2_machine(struct sock *sk, struct sk_buff *skb, int frametyp | |||
164 | switch (frametype) { | 171 | switch (frametype) { |
165 | 172 | ||
166 | case X25_CLEAR_REQUEST: | 173 | case X25_CLEAR_REQUEST: |
174 | if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2)) | ||
175 | goto out_clear; | ||
176 | |||
167 | x25_write_internal(sk, X25_CLEAR_CONFIRMATION); | 177 | x25_write_internal(sk, X25_CLEAR_CONFIRMATION); |
168 | x25_disconnect(sk, 0, skb->data[3], skb->data[4]); | 178 | x25_disconnect(sk, 0, skb->data[3], skb->data[4]); |
169 | break; | 179 | break; |
@@ -177,6 +187,11 @@ static int x25_state2_machine(struct sock *sk, struct sk_buff *skb, int frametyp | |||
177 | } | 187 | } |
178 | 188 | ||
179 | return 0; | 189 | return 0; |
190 | |||
191 | out_clear: | ||
192 | x25_write_internal(sk, X25_CLEAR_REQUEST); | ||
193 | x25_start_t23timer(sk); | ||
194 | return 0; | ||
180 | } | 195 | } |
181 | 196 | ||
182 | /* | 197 | /* |
@@ -206,6 +221,9 @@ static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametyp | |||
206 | break; | 221 | break; |
207 | 222 | ||
208 | case X25_CLEAR_REQUEST: | 223 | case X25_CLEAR_REQUEST: |
224 | if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2)) | ||
225 | goto out_clear; | ||
226 | |||
209 | x25_write_internal(sk, X25_CLEAR_CONFIRMATION); | 227 | x25_write_internal(sk, X25_CLEAR_CONFIRMATION); |
210 | x25_disconnect(sk, 0, skb->data[3], skb->data[4]); | 228 | x25_disconnect(sk, 0, skb->data[3], skb->data[4]); |
211 | break; | 229 | break; |
@@ -304,6 +322,12 @@ static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametyp | |||
304 | } | 322 | } |
305 | 323 | ||
306 | return queued; | 324 | return queued; |
325 | |||
326 | out_clear: | ||
327 | x25_write_internal(sk, X25_CLEAR_REQUEST); | ||
328 | x25->state = X25_STATE_2; | ||
329 | x25_start_t23timer(sk); | ||
330 | return 0; | ||
307 | } | 331 | } |
308 | 332 | ||
309 | /* | 333 | /* |
@@ -313,13 +337,13 @@ static int x25_state3_machine(struct sock *sk, struct sk_buff *skb, int frametyp | |||
313 | */ | 337 | */ |
314 | static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametype) | 338 | static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametype) |
315 | { | 339 | { |
340 | struct x25_sock *x25 = x25_sk(sk); | ||
341 | |||
316 | switch (frametype) { | 342 | switch (frametype) { |
317 | 343 | ||
318 | case X25_RESET_REQUEST: | 344 | case X25_RESET_REQUEST: |
319 | x25_write_internal(sk, X25_RESET_CONFIRMATION); | 345 | x25_write_internal(sk, X25_RESET_CONFIRMATION); |
320 | case X25_RESET_CONFIRMATION: { | 346 | case X25_RESET_CONFIRMATION: { |
321 | struct x25_sock *x25 = x25_sk(sk); | ||
322 | |||
323 | x25_stop_timer(sk); | 347 | x25_stop_timer(sk); |
324 | x25->condition = 0x00; | 348 | x25->condition = 0x00; |
325 | x25->va = 0; | 349 | x25->va = 0; |
@@ -331,6 +355,9 @@ static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametyp | |||
331 | break; | 355 | break; |
332 | } | 356 | } |
333 | case X25_CLEAR_REQUEST: | 357 | case X25_CLEAR_REQUEST: |
358 | if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 2)) | ||
359 | goto out_clear; | ||
360 | |||
334 | x25_write_internal(sk, X25_CLEAR_CONFIRMATION); | 361 | x25_write_internal(sk, X25_CLEAR_CONFIRMATION); |
335 | x25_disconnect(sk, 0, skb->data[3], skb->data[4]); | 362 | x25_disconnect(sk, 0, skb->data[3], skb->data[4]); |
336 | break; | 363 | break; |
@@ -340,6 +367,12 @@ static int x25_state4_machine(struct sock *sk, struct sk_buff *skb, int frametyp | |||
340 | } | 367 | } |
341 | 368 | ||
342 | return 0; | 369 | return 0; |
370 | |||
371 | out_clear: | ||
372 | x25_write_internal(sk, X25_CLEAR_REQUEST); | ||
373 | x25->state = X25_STATE_2; | ||
374 | x25_start_t23timer(sk); | ||
375 | return 0; | ||
343 | } | 376 | } |
344 | 377 | ||
345 | /* Higher level upcall for a LAPB frame */ | 378 | /* Higher level upcall for a LAPB frame */ |
diff --git a/net/x25/x25_link.c b/net/x25/x25_link.c index 037958ff8eed..4acacf3c6617 100644 --- a/net/x25/x25_link.c +++ b/net/x25/x25_link.c | |||
@@ -90,6 +90,9 @@ void x25_link_control(struct sk_buff *skb, struct x25_neigh *nb, | |||
90 | break; | 90 | break; |
91 | 91 | ||
92 | case X25_DIAGNOSTIC: | 92 | case X25_DIAGNOSTIC: |
93 | if (!pskb_may_pull(skb, X25_STD_MIN_LEN + 4)) | ||
94 | break; | ||
95 | |||
93 | printk(KERN_WARNING "x25: diagnostic #%d - %02X %02X %02X\n", | 96 | printk(KERN_WARNING "x25: diagnostic #%d - %02X %02X %02X\n", |
94 | skb->data[3], skb->data[4], | 97 | skb->data[3], skb->data[4], |
95 | skb->data[5], skb->data[6]); | 98 | skb->data[5], skb->data[6]); |
diff --git a/net/x25/x25_subr.c b/net/x25/x25_subr.c index 24a342ebc7f5..5170d52bfd96 100644 --- a/net/x25/x25_subr.c +++ b/net/x25/x25_subr.c | |||
@@ -269,7 +269,11 @@ int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q, | |||
269 | int *d, int *m) | 269 | int *d, int *m) |
270 | { | 270 | { |
271 | struct x25_sock *x25 = x25_sk(sk); | 271 | struct x25_sock *x25 = x25_sk(sk); |
272 | unsigned char *frame = skb->data; | 272 | unsigned char *frame; |
273 | |||
274 | if (!pskb_may_pull(skb, X25_STD_MIN_LEN)) | ||
275 | return X25_ILLEGAL; | ||
276 | frame = skb->data; | ||
273 | 277 | ||
274 | *ns = *nr = *q = *d = *m = 0; | 278 | *ns = *nr = *q = *d = *m = 0; |
275 | 279 | ||
@@ -294,6 +298,10 @@ int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q, | |||
294 | if (frame[2] == X25_RR || | 298 | if (frame[2] == X25_RR || |
295 | frame[2] == X25_RNR || | 299 | frame[2] == X25_RNR || |
296 | frame[2] == X25_REJ) { | 300 | frame[2] == X25_REJ) { |
301 | if (!pskb_may_pull(skb, X25_EXT_MIN_LEN)) | ||
302 | return X25_ILLEGAL; | ||
303 | frame = skb->data; | ||
304 | |||
297 | *nr = (frame[3] >> 1) & 0x7F; | 305 | *nr = (frame[3] >> 1) & 0x7F; |
298 | return frame[2]; | 306 | return frame[2]; |
299 | } | 307 | } |
@@ -308,6 +316,10 @@ int x25_decode(struct sock *sk, struct sk_buff *skb, int *ns, int *nr, int *q, | |||
308 | 316 | ||
309 | if (x25->neighbour->extended) { | 317 | if (x25->neighbour->extended) { |
310 | if ((frame[2] & 0x01) == X25_DATA) { | 318 | if ((frame[2] & 0x01) == X25_DATA) { |
319 | if (!pskb_may_pull(skb, X25_EXT_MIN_LEN)) | ||
320 | return X25_ILLEGAL; | ||
321 | frame = skb->data; | ||
322 | |||
311 | *q = (frame[0] & X25_Q_BIT) == X25_Q_BIT; | 323 | *q = (frame[0] & X25_Q_BIT) == X25_Q_BIT; |
312 | *d = (frame[0] & X25_D_BIT) == X25_D_BIT; | 324 | *d = (frame[0] & X25_D_BIT) == X25_D_BIT; |
313 | *m = (frame[3] & X25_EXT_M_BIT) == X25_EXT_M_BIT; | 325 | *m = (frame[3] & X25_EXT_M_BIT) == X25_EXT_M_BIT; |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 94fdcc7f1030..552df27dcf53 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -1349,14 +1349,16 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) | |||
1349 | BUG(); | 1349 | BUG(); |
1350 | } | 1350 | } |
1351 | xdst = dst_alloc(dst_ops, NULL, 0, 0, 0); | 1351 | xdst = dst_alloc(dst_ops, NULL, 0, 0, 0); |
1352 | memset(&xdst->u.rt6.rt6i_table, 0, sizeof(*xdst) - sizeof(struct dst_entry)); | ||
1353 | xfrm_policy_put_afinfo(afinfo); | ||
1354 | 1352 | ||
1355 | if (likely(xdst)) | 1353 | if (likely(xdst)) { |
1354 | memset(&xdst->u.rt6.rt6i_table, 0, | ||
1355 | sizeof(*xdst) - sizeof(struct dst_entry)); | ||
1356 | xdst->flo.ops = &xfrm_bundle_fc_ops; | 1356 | xdst->flo.ops = &xfrm_bundle_fc_ops; |
1357 | else | 1357 | } else |
1358 | xdst = ERR_PTR(-ENOBUFS); | 1358 | xdst = ERR_PTR(-ENOBUFS); |
1359 | 1359 | ||
1360 | xfrm_policy_put_afinfo(afinfo); | ||
1361 | |||
1360 | return xdst; | 1362 | return xdst; |
1361 | } | 1363 | } |
1362 | 1364 | ||
diff --git a/security/security.c b/security/security.c index 0e4fccfef12c..d9e153390926 100644 --- a/security/security.c +++ b/security/security.c | |||
@@ -1097,6 +1097,7 @@ void security_sk_clone(const struct sock *sk, struct sock *newsk) | |||
1097 | { | 1097 | { |
1098 | security_ops->sk_clone_security(sk, newsk); | 1098 | security_ops->sk_clone_security(sk, newsk); |
1099 | } | 1099 | } |
1100 | EXPORT_SYMBOL(security_sk_clone); | ||
1100 | 1101 | ||
1101 | void security_sk_classify_flow(struct sock *sk, struct flowi *fl) | 1102 | void security_sk_classify_flow(struct sock *sk, struct flowi *fl) |
1102 | { | 1103 | { |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index be6982289c0d..191284a1c0ae 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -1924,7 +1924,8 @@ static unsigned int azx_via_get_position(struct azx *chip, | |||
1924 | } | 1924 | } |
1925 | 1925 | ||
1926 | static unsigned int azx_get_position(struct azx *chip, | 1926 | static unsigned int azx_get_position(struct azx *chip, |
1927 | struct azx_dev *azx_dev) | 1927 | struct azx_dev *azx_dev, |
1928 | bool with_check) | ||
1928 | { | 1929 | { |
1929 | unsigned int pos; | 1930 | unsigned int pos; |
1930 | int stream = azx_dev->substream->stream; | 1931 | int stream = azx_dev->substream->stream; |
@@ -1940,7 +1941,7 @@ static unsigned int azx_get_position(struct azx *chip, | |||
1940 | default: | 1941 | default: |
1941 | /* use the position buffer */ | 1942 | /* use the position buffer */ |
1942 | pos = le32_to_cpu(*azx_dev->posbuf); | 1943 | pos = le32_to_cpu(*azx_dev->posbuf); |
1943 | if (chip->position_fix[stream] == POS_FIX_AUTO) { | 1944 | if (with_check && chip->position_fix[stream] == POS_FIX_AUTO) { |
1944 | if (!pos || pos == (u32)-1) { | 1945 | if (!pos || pos == (u32)-1) { |
1945 | printk(KERN_WARNING | 1946 | printk(KERN_WARNING |
1946 | "hda-intel: Invalid position buffer, " | 1947 | "hda-intel: Invalid position buffer, " |
@@ -1964,7 +1965,7 @@ static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream) | |||
1964 | struct azx *chip = apcm->chip; | 1965 | struct azx *chip = apcm->chip; |
1965 | struct azx_dev *azx_dev = get_azx_dev(substream); | 1966 | struct azx_dev *azx_dev = get_azx_dev(substream); |
1966 | return bytes_to_frames(substream->runtime, | 1967 | return bytes_to_frames(substream->runtime, |
1967 | azx_get_position(chip, azx_dev)); | 1968 | azx_get_position(chip, azx_dev, false)); |
1968 | } | 1969 | } |
1969 | 1970 | ||
1970 | /* | 1971 | /* |
@@ -1987,7 +1988,7 @@ static int azx_position_ok(struct azx *chip, struct azx_dev *azx_dev) | |||
1987 | return -1; /* bogus (too early) interrupt */ | 1988 | return -1; /* bogus (too early) interrupt */ |
1988 | 1989 | ||
1989 | stream = azx_dev->substream->stream; | 1990 | stream = azx_dev->substream->stream; |
1990 | pos = azx_get_position(chip, azx_dev); | 1991 | pos = azx_get_position(chip, azx_dev, true); |
1991 | 1992 | ||
1992 | if (WARN_ONCE(!azx_dev->period_bytes, | 1993 | if (WARN_ONCE(!azx_dev->period_bytes, |
1993 | "hda-intel: zero azx_dev->period_bytes")) | 1994 | "hda-intel: zero azx_dev->period_bytes")) |
@@ -2369,6 +2370,7 @@ static int azx_dev_free(struct snd_device *device) | |||
2369 | static struct snd_pci_quirk position_fix_list[] __devinitdata = { | 2370 | static struct snd_pci_quirk position_fix_list[] __devinitdata = { |
2370 | SND_PCI_QUIRK(0x1028, 0x01cc, "Dell D820", POS_FIX_LPIB), | 2371 | SND_PCI_QUIRK(0x1028, 0x01cc, "Dell D820", POS_FIX_LPIB), |
2371 | SND_PCI_QUIRK(0x1028, 0x01de, "Dell Precision 390", POS_FIX_LPIB), | 2372 | SND_PCI_QUIRK(0x1028, 0x01de, "Dell Precision 390", POS_FIX_LPIB), |
2373 | SND_PCI_QUIRK(0x1028, 0x02c6, "Dell Inspiron 1010", POS_FIX_LPIB), | ||
2372 | SND_PCI_QUIRK(0x103c, 0x306d, "HP dv3", POS_FIX_LPIB), | 2374 | SND_PCI_QUIRK(0x103c, 0x306d, "HP dv3", POS_FIX_LPIB), |
2373 | SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB), | 2375 | SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB), |
2374 | SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB), | 2376 | SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB), |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 7696d05b9356..76752d8ea733 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
@@ -3110,6 +3110,7 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = { | |||
3110 | SND_PCI_QUIRK(0x17aa, 0x21c5, "Thinkpad Edge 13", CXT5066_THINKPAD), | 3110 | SND_PCI_QUIRK(0x17aa, 0x21c5, "Thinkpad Edge 13", CXT5066_THINKPAD), |
3111 | SND_PCI_QUIRK(0x17aa, 0x21c6, "Thinkpad Edge 13", CXT5066_ASUS), | 3111 | SND_PCI_QUIRK(0x17aa, 0x21c6, "Thinkpad Edge 13", CXT5066_ASUS), |
3112 | SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD), | 3112 | SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD), |
3113 | SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520 & W520", CXT5066_AUTO), | ||
3113 | SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD), | 3114 | SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD), |
3114 | SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD), | 3115 | SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD), |
3115 | SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo U350", CXT5066_ASUS), | 3116 | SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo U350", CXT5066_ASUS), |
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c index ffa2ffe5ec11..aa091a0d8187 100644 --- a/sound/soc/codecs/wm8753.c +++ b/sound/soc/codecs/wm8753.c | |||
@@ -1454,8 +1454,8 @@ static int wm8753_probe(struct snd_soc_codec *codec) | |||
1454 | /* set the update bits */ | 1454 | /* set the update bits */ |
1455 | snd_soc_update_bits(codec, WM8753_LDAC, 0x0100, 0x0100); | 1455 | snd_soc_update_bits(codec, WM8753_LDAC, 0x0100, 0x0100); |
1456 | snd_soc_update_bits(codec, WM8753_RDAC, 0x0100, 0x0100); | 1456 | snd_soc_update_bits(codec, WM8753_RDAC, 0x0100, 0x0100); |
1457 | snd_soc_update_bits(codec, WM8753_LDAC, 0x0100, 0x0100); | 1457 | snd_soc_update_bits(codec, WM8753_LADC, 0x0100, 0x0100); |
1458 | snd_soc_update_bits(codec, WM8753_RDAC, 0x0100, 0x0100); | 1458 | snd_soc_update_bits(codec, WM8753_RADC, 0x0100, 0x0100); |
1459 | snd_soc_update_bits(codec, WM8753_LOUT1V, 0x0100, 0x0100); | 1459 | snd_soc_update_bits(codec, WM8753_LOUT1V, 0x0100, 0x0100); |
1460 | snd_soc_update_bits(codec, WM8753_ROUT1V, 0x0100, 0x0100); | 1460 | snd_soc_update_bits(codec, WM8753_ROUT1V, 0x0100, 0x0100); |
1461 | snd_soc_update_bits(codec, WM8753_LOUT2V, 0x0100, 0x0100); | 1461 | snd_soc_update_bits(codec, WM8753_LOUT2V, 0x0100, 0x0100); |
diff --git a/sound/soc/omap/mcpdm.c b/sound/soc/omap/mcpdm.c index 928f03707451..50e59194ad81 100644 --- a/sound/soc/omap/mcpdm.c +++ b/sound/soc/omap/mcpdm.c | |||
@@ -449,7 +449,7 @@ exit: | |||
449 | return ret; | 449 | return ret; |
450 | } | 450 | } |
451 | 451 | ||
452 | int __devexit omap_mcpdm_remove(struct platform_device *pdev) | 452 | int omap_mcpdm_remove(struct platform_device *pdev) |
453 | { | 453 | { |
454 | struct omap_mcpdm *mcpdm_ptr = platform_get_drvdata(pdev); | 454 | struct omap_mcpdm *mcpdm_ptr = platform_get_drvdata(pdev); |
455 | 455 | ||
diff --git a/sound/soc/omap/mcpdm.h b/sound/soc/omap/mcpdm.h index df3e16fb51f3..20c20a8649fe 100644 --- a/sound/soc/omap/mcpdm.h +++ b/sound/soc/omap/mcpdm.h | |||
@@ -150,4 +150,4 @@ extern int omap_mcpdm_request(void); | |||
150 | extern void omap_mcpdm_free(void); | 150 | extern void omap_mcpdm_free(void); |
151 | extern int omap_mcpdm_set_offset(int offset1, int offset2); | 151 | extern int omap_mcpdm_set_offset(int offset1, int offset2); |
152 | int __devinit omap_mcpdm_probe(struct platform_device *pdev); | 152 | int __devinit omap_mcpdm_probe(struct platform_device *pdev); |
153 | int __devexit omap_mcpdm_remove(struct platform_device *pdev); | 153 | int omap_mcpdm_remove(struct platform_device *pdev); |
diff --git a/sound/soc/pxa/zylonite.c b/sound/soc/pxa/zylonite.c index b6445757fc54..2b8350b52232 100644 --- a/sound/soc/pxa/zylonite.c +++ b/sound/soc/pxa/zylonite.c | |||
@@ -196,20 +196,20 @@ static int zylonite_probe(struct snd_soc_card *card) | |||
196 | if (clk_pout) { | 196 | if (clk_pout) { |
197 | pout = clk_get(NULL, "CLK_POUT"); | 197 | pout = clk_get(NULL, "CLK_POUT"); |
198 | if (IS_ERR(pout)) { | 198 | if (IS_ERR(pout)) { |
199 | dev_err(&pdev->dev, "Unable to obtain CLK_POUT: %ld\n", | 199 | dev_err(card->dev, "Unable to obtain CLK_POUT: %ld\n", |
200 | PTR_ERR(pout)); | 200 | PTR_ERR(pout)); |
201 | return PTR_ERR(pout); | 201 | return PTR_ERR(pout); |
202 | } | 202 | } |
203 | 203 | ||
204 | ret = clk_enable(pout); | 204 | ret = clk_enable(pout); |
205 | if (ret != 0) { | 205 | if (ret != 0) { |
206 | dev_err(&pdev->dev, "Unable to enable CLK_POUT: %d\n", | 206 | dev_err(card->dev, "Unable to enable CLK_POUT: %d\n", |
207 | ret); | 207 | ret); |
208 | clk_put(pout); | 208 | clk_put(pout); |
209 | return ret; | 209 | return ret; |
210 | } | 210 | } |
211 | 211 | ||
212 | dev_dbg(&pdev->dev, "MCLK enabled at %luHz\n", | 212 | dev_dbg(card->dev, "MCLK enabled at %luHz\n", |
213 | clk_get_rate(pout)); | 213 | clk_get_rate(pout)); |
214 | } | 214 | } |
215 | 215 | ||
@@ -241,7 +241,7 @@ static int zylonite_resume_pre(struct snd_soc_card *card) | |||
241 | if (clk_pout) { | 241 | if (clk_pout) { |
242 | ret = clk_enable(pout); | 242 | ret = clk_enable(pout); |
243 | if (ret != 0) | 243 | if (ret != 0) |
244 | dev_err(&pdev->dev, "Unable to enable CLK_POUT: %d\n", | 244 | dev_err(card->dev, "Unable to enable CLK_POUT: %d\n", |
245 | ret); | 245 | ret); |
246 | } | 246 | } |
247 | 247 | ||
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index c5748c52318f..e389815078d3 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c | |||
@@ -449,6 +449,8 @@ int perf_event__parse_sample(const union perf_event *event, u64 type, | |||
449 | } | 449 | } |
450 | 450 | ||
451 | if (type & PERF_SAMPLE_RAW) { | 451 | if (type & PERF_SAMPLE_RAW) { |
452 | const u64 *pdata; | ||
453 | |||
452 | u.val64 = *array; | 454 | u.val64 = *array; |
453 | if (WARN_ONCE(swapped, | 455 | if (WARN_ONCE(swapped, |
454 | "Endianness of raw data not corrected!\n")) { | 456 | "Endianness of raw data not corrected!\n")) { |
@@ -462,11 +464,12 @@ int perf_event__parse_sample(const union perf_event *event, u64 type, | |||
462 | return -EFAULT; | 464 | return -EFAULT; |
463 | 465 | ||
464 | data->raw_size = u.val32[0]; | 466 | data->raw_size = u.val32[0]; |
467 | pdata = (void *) array + sizeof(u32); | ||
465 | 468 | ||
466 | if (sample_overlap(event, &u.val32[1], data->raw_size)) | 469 | if (sample_overlap(event, pdata, data->raw_size)) |
467 | return -EFAULT; | 470 | return -EFAULT; |
468 | 471 | ||
469 | data->raw_data = &u.val32[1]; | 472 | data->raw_data = (void *) pdata; |
470 | } | 473 | } |
471 | 474 | ||
472 | return 0; | 475 | return 0; |