diff options
author | Robert Richter <robert.richter@amd.com> | 2011-11-08 09:52:15 -0500 |
---|---|---|
committer | Robert Richter <robert.richter@amd.com> | 2011-11-08 09:52:15 -0500 |
commit | de346b6949063aa040ef607943b072835294f4b3 (patch) | |
tree | 11f5a10b9ab41a10ea26bf8ab1f133b802e7559e | |
parent | dcfce4a095932e6e95d83ad982be3609947963bc (diff) | |
parent | 9c48f1c629ecfa114850c03f875c6691003214de (diff) |
Merge branch 'perf/core' into oprofile/master
Merge reason: Resolve conflicts with Don's NMI rework:
commit 9c48f1c629ecfa114850c03f875c6691003214de
Author: Don Zickus <dzickus@redhat.com>
Date: Fri Sep 30 15:06:21 2011 -0400
x86, nmi: Wire up NMI handlers to new routines
Conflicts:
arch/x86/oprofile/nmi_timer_int.c
Signed-off-by: Robert Richter <robert.richter@amd.com>
210 files changed, 2140 insertions, 1497 deletions
diff --git a/Documentation/hwmon/coretemp b/Documentation/hwmon/coretemp index fa8776ab9b18..84d46c0c71a3 100644 --- a/Documentation/hwmon/coretemp +++ b/Documentation/hwmon/coretemp | |||
@@ -35,13 +35,6 @@ the Out-Of-Spec bit. Following table summarizes the exported sysfs files: | |||
35 | All Sysfs entries are named with their core_id (represented here by 'X'). | 35 | All Sysfs entries are named with their core_id (represented here by 'X'). |
36 | tempX_input - Core temperature (in millidegrees Celsius). | 36 | tempX_input - Core temperature (in millidegrees Celsius). |
37 | tempX_max - All cooling devices should be turned on (on Core2). | 37 | tempX_max - All cooling devices should be turned on (on Core2). |
38 | Initialized with IA32_THERM_INTERRUPT. When the CPU | ||
39 | temperature reaches this temperature, an interrupt is | ||
40 | generated and tempX_max_alarm is set. | ||
41 | tempX_max_hyst - If the CPU temperature falls below than temperature, | ||
42 | an interrupt is generated and tempX_max_alarm is reset. | ||
43 | tempX_max_alarm - Set if the temperature reaches or exceeds tempX_max. | ||
44 | Reset if the temperature drops to or below tempX_max_hyst. | ||
45 | tempX_crit - Maximum junction temperature (in millidegrees Celsius). | 38 | tempX_crit - Maximum junction temperature (in millidegrees Celsius). |
46 | tempX_crit_alarm - Set when Out-of-spec bit is set, never clears. | 39 | tempX_crit_alarm - Set when Out-of-spec bit is set, never clears. |
47 | Correct CPU operation is no longer guaranteed. | 40 | Correct CPU operation is no longer guaranteed. |
@@ -49,9 +42,10 @@ tempX_label - Contains string "Core X", where X is processor | |||
49 | number. For Package temp, this will be "Physical id Y", | 42 | number. For Package temp, this will be "Physical id Y", |
50 | where Y is the package number. | 43 | where Y is the package number. |
51 | 44 | ||
52 | The TjMax temperature is set to 85 degrees C if undocumented model specific | 45 | On CPU models which support it, TjMax is read from a model-specific register. |
53 | register (UMSR) 0xee has bit 30 set. If not the TjMax is 100 degrees C as | 46 | On other models, it is set to an arbitrary value based on weak heuristics. |
54 | (sometimes) documented in processor datasheet. | 47 | If these heuristics don't work for you, you can pass the correct TjMax value |
48 | as a module parameter (tjmax). | ||
55 | 49 | ||
56 | Appendix A. Known TjMax lists (TBD): | 50 | Appendix A. Known TjMax lists (TBD): |
57 | Some information comes from ark.intel.com | 51 | Some information comes from ark.intel.com |
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 81546990f41c..ca5cdcd0f0e3 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt | |||
@@ -1042,7 +1042,7 @@ conf/interface/*: | |||
1042 | The functional behaviour for certain settings is different | 1042 | The functional behaviour for certain settings is different |
1043 | depending on whether local forwarding is enabled or not. | 1043 | depending on whether local forwarding is enabled or not. |
1044 | 1044 | ||
1045 | accept_ra - BOOLEAN | 1045 | accept_ra - INTEGER |
1046 | Accept Router Advertisements; autoconfigure using them. | 1046 | Accept Router Advertisements; autoconfigure using them. |
1047 | 1047 | ||
1048 | Possible values are: | 1048 | Possible values are: |
@@ -1106,7 +1106,7 @@ dad_transmits - INTEGER | |||
1106 | The amount of Duplicate Address Detection probes to send. | 1106 | The amount of Duplicate Address Detection probes to send. |
1107 | Default: 1 | 1107 | Default: 1 |
1108 | 1108 | ||
1109 | forwarding - BOOLEAN | 1109 | forwarding - INTEGER |
1110 | Configure interface-specific Host/Router behaviour. | 1110 | Configure interface-specific Host/Router behaviour. |
1111 | 1111 | ||
1112 | Note: It is recommended to have the same setting on all | 1112 | Note: It is recommended to have the same setting on all |
diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt index 58fd7414e6c0..8ce7c30e7230 100644 --- a/Documentation/networking/scaling.txt +++ b/Documentation/networking/scaling.txt | |||
@@ -243,7 +243,7 @@ configured. The number of entries in the global flow table is set through: | |||
243 | 243 | ||
244 | The number of entries in the per-queue flow table are set through: | 244 | The number of entries in the per-queue flow table are set through: |
245 | 245 | ||
246 | /sys/class/net/<dev>/queues/tx-<n>/rps_flow_cnt | 246 | /sys/class/net/<dev>/queues/rx-<n>/rps_flow_cnt |
247 | 247 | ||
248 | == Suggested Configuration | 248 | == Suggested Configuration |
249 | 249 | ||
diff --git a/Documentation/vm/transhuge.txt b/Documentation/vm/transhuge.txt index 0924aaca3302..29bdf62aac09 100644 --- a/Documentation/vm/transhuge.txt +++ b/Documentation/vm/transhuge.txt | |||
@@ -123,10 +123,11 @@ be automatically shutdown if it's set to "never". | |||
123 | khugepaged runs usually at low frequency so while one may not want to | 123 | khugepaged runs usually at low frequency so while one may not want to |
124 | invoke defrag algorithms synchronously during the page faults, it | 124 | invoke defrag algorithms synchronously during the page faults, it |
125 | should be worth invoking defrag at least in khugepaged. However it's | 125 | should be worth invoking defrag at least in khugepaged. However it's |
126 | also possible to disable defrag in khugepaged: | 126 | also possible to disable defrag in khugepaged by writing 0 or enable |
127 | defrag in khugepaged by writing 1: | ||
127 | 128 | ||
128 | echo yes >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag | 129 | echo 0 >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag |
129 | echo no >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag | 130 | echo 1 >/sys/kernel/mm/transparent_hugepage/khugepaged/defrag |
130 | 131 | ||
131 | You can also control how many pages khugepaged should scan at each | 132 | You can also control how many pages khugepaged should scan at each |
132 | pass: | 133 | pass: |
diff --git a/MAINTAINERS b/MAINTAINERS index ae8820e173a2..ace8f9c81b96 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -6374,7 +6374,6 @@ S: Supported | |||
6374 | F: arch/arm/mach-tegra | 6374 | F: arch/arm/mach-tegra |
6375 | 6375 | ||
6376 | TEHUTI ETHERNET DRIVER | 6376 | TEHUTI ETHERNET DRIVER |
6377 | M: Alexander Indenbaum <baum@tehutinetworks.net> | ||
6378 | M: Andy Gospodarek <andy@greyhouse.net> | 6377 | M: Andy Gospodarek <andy@greyhouse.net> |
6379 | L: netdev@vger.kernel.org | 6378 | L: netdev@vger.kernel.org |
6380 | S: Supported | 6379 | S: Supported |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 3 | 1 | VERSION = 3 |
2 | PATCHLEVEL = 1 | 2 | PATCHLEVEL = 1 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc7 | 4 | EXTRAVERSION = -rc9 |
5 | NAME = "Divemaster Edition" | 5 | NAME = "Divemaster Edition" |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 3269576dbfa8..3146ed3f6eca 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -1283,6 +1283,20 @@ config ARM_ERRATA_364296 | |||
1283 | processor into full low interrupt latency mode. ARM11MPCore | 1283 | processor into full low interrupt latency mode. ARM11MPCore |
1284 | is not affected. | 1284 | is not affected. |
1285 | 1285 | ||
1286 | config ARM_ERRATA_764369 | ||
1287 | bool "ARM errata: Data cache line maintenance operation by MVA may not succeed" | ||
1288 | depends on CPU_V7 && SMP | ||
1289 | help | ||
1290 | This option enables the workaround for erratum 764369 | ||
1291 | affecting Cortex-A9 MPCore with two or more processors (all | ||
1292 | current revisions). Under certain timing circumstances, a data | ||
1293 | cache line maintenance operation by MVA targeting an Inner | ||
1294 | Shareable memory region may fail to proceed up to either the | ||
1295 | Point of Coherency or to the Point of Unification of the | ||
1296 | system. This workaround adds a DSB instruction before the | ||
1297 | relevant cache maintenance functions and sets a specific bit | ||
1298 | in the diagnostic control register of the SCU. | ||
1299 | |||
1286 | endmenu | 1300 | endmenu |
1287 | 1301 | ||
1288 | source "arch/arm/common/Kconfig" | 1302 | source "arch/arm/common/Kconfig" |
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h index 8c73900da9ed..253cc86318bf 100644 --- a/arch/arm/include/asm/futex.h +++ b/arch/arm/include/asm/futex.h | |||
@@ -25,17 +25,17 @@ | |||
25 | 25 | ||
26 | #ifdef CONFIG_SMP | 26 | #ifdef CONFIG_SMP |
27 | 27 | ||
28 | #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ | 28 | #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ |
29 | smp_mb(); \ | 29 | smp_mb(); \ |
30 | __asm__ __volatile__( \ | 30 | __asm__ __volatile__( \ |
31 | "1: ldrex %1, [%2]\n" \ | 31 | "1: ldrex %1, [%3]\n" \ |
32 | " " insn "\n" \ | 32 | " " insn "\n" \ |
33 | "2: strex %1, %0, [%2]\n" \ | 33 | "2: strex %2, %0, [%3]\n" \ |
34 | " teq %1, #0\n" \ | 34 | " teq %2, #0\n" \ |
35 | " bne 1b\n" \ | 35 | " bne 1b\n" \ |
36 | " mov %0, #0\n" \ | 36 | " mov %0, #0\n" \ |
37 | __futex_atomic_ex_table("%4") \ | 37 | __futex_atomic_ex_table("%5") \ |
38 | : "=&r" (ret), "=&r" (oldval) \ | 38 | : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \ |
39 | : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ | 39 | : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ |
40 | : "cc", "memory") | 40 | : "cc", "memory") |
41 | 41 | ||
@@ -73,14 +73,14 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, | |||
73 | #include <linux/preempt.h> | 73 | #include <linux/preempt.h> |
74 | #include <asm/domain.h> | 74 | #include <asm/domain.h> |
75 | 75 | ||
76 | #define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ | 76 | #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ |
77 | __asm__ __volatile__( \ | 77 | __asm__ __volatile__( \ |
78 | "1: " T(ldr) " %1, [%2]\n" \ | 78 | "1: " T(ldr) " %1, [%3]\n" \ |
79 | " " insn "\n" \ | 79 | " " insn "\n" \ |
80 | "2: " T(str) " %0, [%2]\n" \ | 80 | "2: " T(str) " %0, [%3]\n" \ |
81 | " mov %0, #0\n" \ | 81 | " mov %0, #0\n" \ |
82 | __futex_atomic_ex_table("%4") \ | 82 | __futex_atomic_ex_table("%5") \ |
83 | : "=&r" (ret), "=&r" (oldval) \ | 83 | : "=&r" (ret), "=&r" (oldval), "=&r" (tmp) \ |
84 | : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ | 84 | : "r" (uaddr), "r" (oparg), "Ir" (-EFAULT) \ |
85 | : "cc", "memory") | 85 | : "cc", "memory") |
86 | 86 | ||
@@ -117,7 +117,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) | |||
117 | int cmp = (encoded_op >> 24) & 15; | 117 | int cmp = (encoded_op >> 24) & 15; |
118 | int oparg = (encoded_op << 8) >> 20; | 118 | int oparg = (encoded_op << 8) >> 20; |
119 | int cmparg = (encoded_op << 20) >> 20; | 119 | int cmparg = (encoded_op << 20) >> 20; |
120 | int oldval = 0, ret; | 120 | int oldval = 0, ret, tmp; |
121 | 121 | ||
122 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | 122 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
123 | oparg = 1 << oparg; | 123 | oparg = 1 << oparg; |
@@ -129,19 +129,19 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) | |||
129 | 129 | ||
130 | switch (op) { | 130 | switch (op) { |
131 | case FUTEX_OP_SET: | 131 | case FUTEX_OP_SET: |
132 | __futex_atomic_op("mov %0, %3", ret, oldval, uaddr, oparg); | 132 | __futex_atomic_op("mov %0, %4", ret, oldval, tmp, uaddr, oparg); |
133 | break; | 133 | break; |
134 | case FUTEX_OP_ADD: | 134 | case FUTEX_OP_ADD: |
135 | __futex_atomic_op("add %0, %1, %3", ret, oldval, uaddr, oparg); | 135 | __futex_atomic_op("add %0, %1, %4", ret, oldval, tmp, uaddr, oparg); |
136 | break; | 136 | break; |
137 | case FUTEX_OP_OR: | 137 | case FUTEX_OP_OR: |
138 | __futex_atomic_op("orr %0, %1, %3", ret, oldval, uaddr, oparg); | 138 | __futex_atomic_op("orr %0, %1, %4", ret, oldval, tmp, uaddr, oparg); |
139 | break; | 139 | break; |
140 | case FUTEX_OP_ANDN: | 140 | case FUTEX_OP_ANDN: |
141 | __futex_atomic_op("and %0, %1, %3", ret, oldval, uaddr, ~oparg); | 141 | __futex_atomic_op("and %0, %1, %4", ret, oldval, tmp, uaddr, ~oparg); |
142 | break; | 142 | break; |
143 | case FUTEX_OP_XOR: | 143 | case FUTEX_OP_XOR: |
144 | __futex_atomic_op("eor %0, %1, %3", ret, oldval, uaddr, oparg); | 144 | __futex_atomic_op("eor %0, %1, %4", ret, oldval, tmp, uaddr, oparg); |
145 | break; | 145 | break; |
146 | default: | 146 | default: |
147 | ret = -ENOSYS; | 147 | ret = -ENOSYS; |
diff --git a/arch/arm/include/asm/unistd.h b/arch/arm/include/asm/unistd.h index 2c04ed5efeb5..c60a2944f95b 100644 --- a/arch/arm/include/asm/unistd.h +++ b/arch/arm/include/asm/unistd.h | |||
@@ -478,8 +478,8 @@ | |||
478 | /* | 478 | /* |
479 | * Unimplemented (or alternatively implemented) syscalls | 479 | * Unimplemented (or alternatively implemented) syscalls |
480 | */ | 480 | */ |
481 | #define __IGNORE_fadvise64_64 1 | 481 | #define __IGNORE_fadvise64_64 |
482 | #define __IGNORE_migrate_pages 1 | 482 | #define __IGNORE_migrate_pages |
483 | 483 | ||
484 | #endif /* __KERNEL__ */ | 484 | #endif /* __KERNEL__ */ |
485 | #endif /* __ASM_ARM_UNISTD_H */ | 485 | #endif /* __ASM_ARM_UNISTD_H */ |
diff --git a/arch/arm/kernel/smp_scu.c b/arch/arm/kernel/smp_scu.c index 79ed5e7f204a..7fcddb75c877 100644 --- a/arch/arm/kernel/smp_scu.c +++ b/arch/arm/kernel/smp_scu.c | |||
@@ -13,6 +13,7 @@ | |||
13 | 13 | ||
14 | #include <asm/smp_scu.h> | 14 | #include <asm/smp_scu.h> |
15 | #include <asm/cacheflush.h> | 15 | #include <asm/cacheflush.h> |
16 | #include <asm/cputype.h> | ||
16 | 17 | ||
17 | #define SCU_CTRL 0x00 | 18 | #define SCU_CTRL 0x00 |
18 | #define SCU_CONFIG 0x04 | 19 | #define SCU_CONFIG 0x04 |
@@ -37,6 +38,15 @@ void __init scu_enable(void __iomem *scu_base) | |||
37 | { | 38 | { |
38 | u32 scu_ctrl; | 39 | u32 scu_ctrl; |
39 | 40 | ||
41 | #ifdef CONFIG_ARM_ERRATA_764369 | ||
42 | /* Cortex-A9 only */ | ||
43 | if ((read_cpuid(CPUID_ID) & 0xff0ffff0) == 0x410fc090) { | ||
44 | scu_ctrl = __raw_readl(scu_base + 0x30); | ||
45 | if (!(scu_ctrl & 1)) | ||
46 | __raw_writel(scu_ctrl | 0x1, scu_base + 0x30); | ||
47 | } | ||
48 | #endif | ||
49 | |||
40 | scu_ctrl = __raw_readl(scu_base + SCU_CTRL); | 50 | scu_ctrl = __raw_readl(scu_base + SCU_CTRL); |
41 | /* already enabled? */ | 51 | /* already enabled? */ |
42 | if (scu_ctrl & 1) | 52 | if (scu_ctrl & 1) |
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index bf977f8514f6..4e66f62b8d41 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S | |||
@@ -23,8 +23,10 @@ | |||
23 | 23 | ||
24 | #if defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK) | 24 | #if defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK) |
25 | #define ARM_EXIT_KEEP(x) x | 25 | #define ARM_EXIT_KEEP(x) x |
26 | #define ARM_EXIT_DISCARD(x) | ||
26 | #else | 27 | #else |
27 | #define ARM_EXIT_KEEP(x) | 28 | #define ARM_EXIT_KEEP(x) |
29 | #define ARM_EXIT_DISCARD(x) x | ||
28 | #endif | 30 | #endif |
29 | 31 | ||
30 | OUTPUT_ARCH(arm) | 32 | OUTPUT_ARCH(arm) |
@@ -39,6 +41,11 @@ jiffies = jiffies_64 + 4; | |||
39 | SECTIONS | 41 | SECTIONS |
40 | { | 42 | { |
41 | /* | 43 | /* |
44 | * XXX: The linker does not define how output sections are | ||
45 | * assigned to input sections when there are multiple statements | ||
46 | * matching the same input section name. There is no documented | ||
47 | * order of matching. | ||
48 | * | ||
42 | * unwind exit sections must be discarded before the rest of the | 49 | * unwind exit sections must be discarded before the rest of the |
43 | * unwind sections get included. | 50 | * unwind sections get included. |
44 | */ | 51 | */ |
@@ -47,6 +54,9 @@ SECTIONS | |||
47 | *(.ARM.extab.exit.text) | 54 | *(.ARM.extab.exit.text) |
48 | ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) | 55 | ARM_CPU_DISCARD(*(.ARM.exidx.cpuexit.text)) |
49 | ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) | 56 | ARM_CPU_DISCARD(*(.ARM.extab.cpuexit.text)) |
57 | ARM_EXIT_DISCARD(EXIT_TEXT) | ||
58 | ARM_EXIT_DISCARD(EXIT_DATA) | ||
59 | EXIT_CALL | ||
50 | #ifndef CONFIG_HOTPLUG | 60 | #ifndef CONFIG_HOTPLUG |
51 | *(.ARM.exidx.devexit.text) | 61 | *(.ARM.exidx.devexit.text) |
52 | *(.ARM.extab.devexit.text) | 62 | *(.ARM.extab.devexit.text) |
@@ -58,6 +68,8 @@ SECTIONS | |||
58 | #ifndef CONFIG_SMP_ON_UP | 68 | #ifndef CONFIG_SMP_ON_UP |
59 | *(.alt.smp.init) | 69 | *(.alt.smp.init) |
60 | #endif | 70 | #endif |
71 | *(.discard) | ||
72 | *(.discard.*) | ||
61 | } | 73 | } |
62 | 74 | ||
63 | #ifdef CONFIG_XIP_KERNEL | 75 | #ifdef CONFIG_XIP_KERNEL |
@@ -279,9 +291,6 @@ SECTIONS | |||
279 | 291 | ||
280 | STABS_DEBUG | 292 | STABS_DEBUG |
281 | .comment 0 : { *(.comment) } | 293 | .comment 0 : { *(.comment) } |
282 | |||
283 | /* Default discards */ | ||
284 | DISCARDS | ||
285 | } | 294 | } |
286 | 295 | ||
287 | /* | 296 | /* |
diff --git a/arch/arm/mach-exynos4/clock.c b/arch/arm/mach-exynos4/clock.c index 79d6cd0c8e7b..86964d2e9e1b 100644 --- a/arch/arm/mach-exynos4/clock.c +++ b/arch/arm/mach-exynos4/clock.c | |||
@@ -899,8 +899,7 @@ static struct clksrc_clk clksrcs[] = { | |||
899 | .reg_div = { .reg = S5P_CLKDIV_CAM, .shift = 28, .size = 4 }, | 899 | .reg_div = { .reg = S5P_CLKDIV_CAM, .shift = 28, .size = 4 }, |
900 | }, { | 900 | }, { |
901 | .clk = { | 901 | .clk = { |
902 | .name = "sclk_cam", | 902 | .name = "sclk_cam0", |
903 | .devname = "exynos4-fimc.0", | ||
904 | .enable = exynos4_clksrc_mask_cam_ctrl, | 903 | .enable = exynos4_clksrc_mask_cam_ctrl, |
905 | .ctrlbit = (1 << 16), | 904 | .ctrlbit = (1 << 16), |
906 | }, | 905 | }, |
@@ -909,8 +908,7 @@ static struct clksrc_clk clksrcs[] = { | |||
909 | .reg_div = { .reg = S5P_CLKDIV_CAM, .shift = 16, .size = 4 }, | 908 | .reg_div = { .reg = S5P_CLKDIV_CAM, .shift = 16, .size = 4 }, |
910 | }, { | 909 | }, { |
911 | .clk = { | 910 | .clk = { |
912 | .name = "sclk_cam", | 911 | .name = "sclk_cam1", |
913 | .devname = "exynos4-fimc.1", | ||
914 | .enable = exynos4_clksrc_mask_cam_ctrl, | 912 | .enable = exynos4_clksrc_mask_cam_ctrl, |
915 | .ctrlbit = (1 << 20), | 913 | .ctrlbit = (1 << 20), |
916 | }, | 914 | }, |
diff --git a/arch/arm/mach-s3c2443/clock.c b/arch/arm/mach-s3c2443/clock.c index a1a7176675b9..38058af48972 100644 --- a/arch/arm/mach-s3c2443/clock.c +++ b/arch/arm/mach-s3c2443/clock.c | |||
@@ -128,7 +128,7 @@ static int s3c2443_armclk_setrate(struct clk *clk, unsigned long rate) | |||
128 | unsigned long clkcon0; | 128 | unsigned long clkcon0; |
129 | 129 | ||
130 | clkcon0 = __raw_readl(S3C2443_CLKDIV0); | 130 | clkcon0 = __raw_readl(S3C2443_CLKDIV0); |
131 | clkcon0 &= S3C2443_CLKDIV0_ARMDIV_MASK; | 131 | clkcon0 &= ~S3C2443_CLKDIV0_ARMDIV_MASK; |
132 | clkcon0 |= val << S3C2443_CLKDIV0_ARMDIV_SHIFT; | 132 | clkcon0 |= val << S3C2443_CLKDIV0_ARMDIV_SHIFT; |
133 | __raw_writel(clkcon0, S3C2443_CLKDIV0); | 133 | __raw_writel(clkcon0, S3C2443_CLKDIV0); |
134 | } | 134 | } |
diff --git a/arch/arm/mach-s5pv210/clock.c b/arch/arm/mach-s5pv210/clock.c index 52a8e607bcc2..f5f8fa89679c 100644 --- a/arch/arm/mach-s5pv210/clock.c +++ b/arch/arm/mach-s5pv210/clock.c | |||
@@ -815,8 +815,7 @@ static struct clksrc_clk clksrcs[] = { | |||
815 | .reg_div = { .reg = S5P_CLK_DIV3, .shift = 20, .size = 4 }, | 815 | .reg_div = { .reg = S5P_CLK_DIV3, .shift = 20, .size = 4 }, |
816 | }, { | 816 | }, { |
817 | .clk = { | 817 | .clk = { |
818 | .name = "sclk_cam", | 818 | .name = "sclk_cam0", |
819 | .devname = "s5pv210-fimc.0", | ||
820 | .enable = s5pv210_clk_mask0_ctrl, | 819 | .enable = s5pv210_clk_mask0_ctrl, |
821 | .ctrlbit = (1 << 3), | 820 | .ctrlbit = (1 << 3), |
822 | }, | 821 | }, |
@@ -825,8 +824,7 @@ static struct clksrc_clk clksrcs[] = { | |||
825 | .reg_div = { .reg = S5P_CLK_DIV1, .shift = 12, .size = 4 }, | 824 | .reg_div = { .reg = S5P_CLK_DIV1, .shift = 12, .size = 4 }, |
826 | }, { | 825 | }, { |
827 | .clk = { | 826 | .clk = { |
828 | .name = "sclk_cam", | 827 | .name = "sclk_cam1", |
829 | .devname = "s5pv210-fimc.1", | ||
830 | .enable = s5pv210_clk_mask0_ctrl, | 828 | .enable = s5pv210_clk_mask0_ctrl, |
831 | .ctrlbit = (1 << 4), | 829 | .ctrlbit = (1 << 4), |
832 | }, | 830 | }, |
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index 3b24bfa3b828..07c4bc8ea0a4 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S | |||
@@ -174,6 +174,10 @@ ENTRY(v7_coherent_user_range) | |||
174 | dcache_line_size r2, r3 | 174 | dcache_line_size r2, r3 |
175 | sub r3, r2, #1 | 175 | sub r3, r2, #1 |
176 | bic r12, r0, r3 | 176 | bic r12, r0, r3 |
177 | #ifdef CONFIG_ARM_ERRATA_764369 | ||
178 | ALT_SMP(W(dsb)) | ||
179 | ALT_UP(W(nop)) | ||
180 | #endif | ||
177 | 1: | 181 | 1: |
178 | USER( mcr p15, 0, r12, c7, c11, 1 ) @ clean D line to the point of unification | 182 | USER( mcr p15, 0, r12, c7, c11, 1 ) @ clean D line to the point of unification |
179 | add r12, r12, r2 | 183 | add r12, r12, r2 |
@@ -223,6 +227,10 @@ ENTRY(v7_flush_kern_dcache_area) | |||
223 | add r1, r0, r1 | 227 | add r1, r0, r1 |
224 | sub r3, r2, #1 | 228 | sub r3, r2, #1 |
225 | bic r0, r0, r3 | 229 | bic r0, r0, r3 |
230 | #ifdef CONFIG_ARM_ERRATA_764369 | ||
231 | ALT_SMP(W(dsb)) | ||
232 | ALT_UP(W(nop)) | ||
233 | #endif | ||
226 | 1: | 234 | 1: |
227 | mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line | 235 | mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D line / unified line |
228 | add r0, r0, r2 | 236 | add r0, r0, r2 |
@@ -247,6 +255,10 @@ v7_dma_inv_range: | |||
247 | sub r3, r2, #1 | 255 | sub r3, r2, #1 |
248 | tst r0, r3 | 256 | tst r0, r3 |
249 | bic r0, r0, r3 | 257 | bic r0, r0, r3 |
258 | #ifdef CONFIG_ARM_ERRATA_764369 | ||
259 | ALT_SMP(W(dsb)) | ||
260 | ALT_UP(W(nop)) | ||
261 | #endif | ||
250 | mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line | 262 | mcrne p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line |
251 | 263 | ||
252 | tst r1, r3 | 264 | tst r1, r3 |
@@ -270,6 +282,10 @@ v7_dma_clean_range: | |||
270 | dcache_line_size r2, r3 | 282 | dcache_line_size r2, r3 |
271 | sub r3, r2, #1 | 283 | sub r3, r2, #1 |
272 | bic r0, r0, r3 | 284 | bic r0, r0, r3 |
285 | #ifdef CONFIG_ARM_ERRATA_764369 | ||
286 | ALT_SMP(W(dsb)) | ||
287 | ALT_UP(W(nop)) | ||
288 | #endif | ||
273 | 1: | 289 | 1: |
274 | mcr p15, 0, r0, c7, c10, 1 @ clean D / U line | 290 | mcr p15, 0, r0, c7, c10, 1 @ clean D / U line |
275 | add r0, r0, r2 | 291 | add r0, r0, r2 |
@@ -288,6 +304,10 @@ ENTRY(v7_dma_flush_range) | |||
288 | dcache_line_size r2, r3 | 304 | dcache_line_size r2, r3 |
289 | sub r3, r2, #1 | 305 | sub r3, r2, #1 |
290 | bic r0, r0, r3 | 306 | bic r0, r0, r3 |
307 | #ifdef CONFIG_ARM_ERRATA_764369 | ||
308 | ALT_SMP(W(dsb)) | ||
309 | ALT_UP(W(nop)) | ||
310 | #endif | ||
291 | 1: | 311 | 1: |
292 | mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line | 312 | mcr p15, 0, r0, c7, c14, 1 @ clean & invalidate D / U line |
293 | add r0, r0, r2 | 313 | add r0, r0, r2 |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 0a0a1e7c20d2..c3ff82f92d9c 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
@@ -324,6 +324,8 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, | |||
324 | 324 | ||
325 | if (addr) | 325 | if (addr) |
326 | *handle = pfn_to_dma(dev, page_to_pfn(page)); | 326 | *handle = pfn_to_dma(dev, page_to_pfn(page)); |
327 | else | ||
328 | __dma_free_buffer(page, size); | ||
327 | 329 | ||
328 | return addr; | 330 | return addr; |
329 | } | 331 | } |
diff --git a/arch/arm/plat-s5p/irq-gpioint.c b/arch/arm/plat-s5p/irq-gpioint.c index f71078ef6bb5..f88216d23991 100644 --- a/arch/arm/plat-s5p/irq-gpioint.c +++ b/arch/arm/plat-s5p/irq-gpioint.c | |||
@@ -114,17 +114,18 @@ static __init int s5p_gpioint_add(struct s3c_gpio_chip *chip) | |||
114 | { | 114 | { |
115 | static int used_gpioint_groups = 0; | 115 | static int used_gpioint_groups = 0; |
116 | int group = chip->group; | 116 | int group = chip->group; |
117 | struct s5p_gpioint_bank *bank = NULL; | 117 | struct s5p_gpioint_bank *b, *bank = NULL; |
118 | struct irq_chip_generic *gc; | 118 | struct irq_chip_generic *gc; |
119 | struct irq_chip_type *ct; | 119 | struct irq_chip_type *ct; |
120 | 120 | ||
121 | if (used_gpioint_groups >= S5P_GPIOINT_GROUP_COUNT) | 121 | if (used_gpioint_groups >= S5P_GPIOINT_GROUP_COUNT) |
122 | return -ENOMEM; | 122 | return -ENOMEM; |
123 | 123 | ||
124 | list_for_each_entry(bank, &banks, list) { | 124 | list_for_each_entry(b, &banks, list) { |
125 | if (group >= bank->start && | 125 | if (group >= b->start && group < b->start + b->nr_groups) { |
126 | group < bank->start + bank->nr_groups) | 126 | bank = b; |
127 | break; | 127 | break; |
128 | } | ||
128 | } | 129 | } |
129 | if (!bank) | 130 | if (!bank) |
130 | return -EINVAL; | 131 | return -EINVAL; |
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c index 5cc83851ad06..31a7d3a7ce25 100644 --- a/arch/powerpc/platforms/powermac/pci.c +++ b/arch/powerpc/platforms/powermac/pci.c | |||
@@ -561,6 +561,20 @@ static struct pci_ops u4_pcie_pci_ops = | |||
561 | .write = u4_pcie_write_config, | 561 | .write = u4_pcie_write_config, |
562 | }; | 562 | }; |
563 | 563 | ||
564 | static void __devinit pmac_pci_fixup_u4_of_node(struct pci_dev *dev) | ||
565 | { | ||
566 | /* Apple's device-tree "hides" the root complex virtual P2P bridge | ||
567 | * on U4. However, Linux sees it, causing the PCI <-> OF matching | ||
568 | * code to fail to properly match devices below it. This works around | ||
569 | * it by setting the node of the bridge to point to the PHB node, | ||
570 | * which is not entirely correct but fixes the matching code and | ||
571 | * doesn't break anything else. It's also the simplest possible fix. | ||
572 | */ | ||
573 | if (dev->dev.of_node == NULL) | ||
574 | dev->dev.of_node = pcibios_get_phb_of_node(dev->bus); | ||
575 | } | ||
576 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_APPLE, 0x5b, pmac_pci_fixup_u4_of_node); | ||
577 | |||
564 | #endif /* CONFIG_PPC64 */ | 578 | #endif /* CONFIG_PPC64 */ |
565 | 579 | ||
566 | #ifdef CONFIG_PPC32 | 580 | #ifdef CONFIG_PPC32 |
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h index 64b61bf72e93..547f1a6a35d4 100644 --- a/arch/s390/include/asm/elf.h +++ b/arch/s390/include/asm/elf.h | |||
@@ -188,7 +188,8 @@ extern char elf_platform[]; | |||
188 | #define SET_PERSONALITY(ex) \ | 188 | #define SET_PERSONALITY(ex) \ |
189 | do { \ | 189 | do { \ |
190 | if (personality(current->personality) != PER_LINUX32) \ | 190 | if (personality(current->personality) != PER_LINUX32) \ |
191 | set_personality(PER_LINUX); \ | 191 | set_personality(PER_LINUX | \ |
192 | (current->personality & ~PER_MASK)); \ | ||
192 | if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ | 193 | if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ |
193 | set_thread_flag(TIF_31BIT); \ | 194 | set_thread_flag(TIF_31BIT); \ |
194 | else \ | 195 | else \ |
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 519eb5f187ef..c0cb794bb365 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h | |||
@@ -658,12 +658,14 @@ static inline void pgste_set_pte(pte_t *ptep, pgste_t pgste) | |||
658 | * struct gmap_struct - guest address space | 658 | * struct gmap_struct - guest address space |
659 | * @mm: pointer to the parent mm_struct | 659 | * @mm: pointer to the parent mm_struct |
660 | * @table: pointer to the page directory | 660 | * @table: pointer to the page directory |
661 | * @asce: address space control element for gmap page table | ||
661 | * @crst_list: list of all crst tables used in the guest address space | 662 | * @crst_list: list of all crst tables used in the guest address space |
662 | */ | 663 | */ |
663 | struct gmap { | 664 | struct gmap { |
664 | struct list_head list; | 665 | struct list_head list; |
665 | struct mm_struct *mm; | 666 | struct mm_struct *mm; |
666 | unsigned long *table; | 667 | unsigned long *table; |
668 | unsigned long asce; | ||
667 | struct list_head crst_list; | 669 | struct list_head crst_list; |
668 | }; | 670 | }; |
669 | 671 | ||
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c index 532fd4322156..2b45591e1582 100644 --- a/arch/s390/kernel/asm-offsets.c +++ b/arch/s390/kernel/asm-offsets.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/sched.h> | 10 | #include <linux/sched.h> |
11 | #include <asm/vdso.h> | 11 | #include <asm/vdso.h> |
12 | #include <asm/sigp.h> | 12 | #include <asm/sigp.h> |
13 | #include <asm/pgtable.h> | ||
13 | 14 | ||
14 | /* | 15 | /* |
15 | * Make sure that the compiler is new enough. We want a compiler that | 16 | * Make sure that the compiler is new enough. We want a compiler that |
@@ -126,6 +127,7 @@ int main(void) | |||
126 | DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack)); | 127 | DEFINE(__LC_KERNEL_STACK, offsetof(struct _lowcore, kernel_stack)); |
127 | DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack)); | 128 | DEFINE(__LC_ASYNC_STACK, offsetof(struct _lowcore, async_stack)); |
128 | DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack)); | 129 | DEFINE(__LC_PANIC_STACK, offsetof(struct _lowcore, panic_stack)); |
130 | DEFINE(__LC_USER_ASCE, offsetof(struct _lowcore, user_asce)); | ||
129 | DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); | 131 | DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); |
130 | DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); | 132 | DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); |
131 | DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); | 133 | DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); |
@@ -151,6 +153,7 @@ int main(void) | |||
151 | DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); | 153 | DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); |
152 | DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap)); | 154 | DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap)); |
153 | DEFINE(__LC_CMF_HPP, offsetof(struct _lowcore, cmf_hpp)); | 155 | DEFINE(__LC_CMF_HPP, offsetof(struct _lowcore, cmf_hpp)); |
156 | DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce)); | ||
154 | #endif /* CONFIG_32BIT */ | 157 | #endif /* CONFIG_32BIT */ |
155 | return 0; | 158 | return 0; |
156 | } | 159 | } |
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S index 5f729d627cef..713da0760538 100644 --- a/arch/s390/kernel/entry64.S +++ b/arch/s390/kernel/entry64.S | |||
@@ -1076,6 +1076,11 @@ sie_loop: | |||
1076 | lg %r14,__LC_THREAD_INFO # pointer thread_info struct | 1076 | lg %r14,__LC_THREAD_INFO # pointer thread_info struct |
1077 | tm __TI_flags+7(%r14),_TIF_EXIT_SIE | 1077 | tm __TI_flags+7(%r14),_TIF_EXIT_SIE |
1078 | jnz sie_exit | 1078 | jnz sie_exit |
1079 | lg %r14,__LC_GMAP # get gmap pointer | ||
1080 | ltgr %r14,%r14 | ||
1081 | jz sie_gmap | ||
1082 | lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce | ||
1083 | sie_gmap: | ||
1079 | lg %r14,__SF_EMPTY(%r15) # get control block pointer | 1084 | lg %r14,__SF_EMPTY(%r15) # get control block pointer |
1080 | SPP __SF_EMPTY(%r15) # set guest id | 1085 | SPP __SF_EMPTY(%r15) # set guest id |
1081 | sie 0(%r14) | 1086 | sie 0(%r14) |
@@ -1083,6 +1088,7 @@ sie_done: | |||
1083 | SPP __LC_CMF_HPP # set host id | 1088 | SPP __LC_CMF_HPP # set host id |
1084 | lg %r14,__LC_THREAD_INFO # pointer thread_info struct | 1089 | lg %r14,__LC_THREAD_INFO # pointer thread_info struct |
1085 | sie_exit: | 1090 | sie_exit: |
1091 | lctlg %c1,%c1,__LC_USER_ASCE # load primary asce | ||
1086 | ni __TI_flags+6(%r14),255-(_TIF_SIE>>8) | 1092 | ni __TI_flags+6(%r14),255-(_TIF_SIE>>8) |
1087 | lg %r14,__SF_EMPTY+8(%r15) # load guest register save area | 1093 | lg %r14,__SF_EMPTY+8(%r15) # load guest register save area |
1088 | stmg %r0,%r13,0(%r14) # save guest gprs 0-13 | 1094 | stmg %r0,%r13,0(%r14) # save guest gprs 0-13 |
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index f17296e4fc89..dc2b580e27bc 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c | |||
@@ -123,6 +123,7 @@ int kvm_dev_ioctl_check_extension(long ext) | |||
123 | 123 | ||
124 | switch (ext) { | 124 | switch (ext) { |
125 | case KVM_CAP_S390_PSW: | 125 | case KVM_CAP_S390_PSW: |
126 | case KVM_CAP_S390_GMAP: | ||
126 | r = 1; | 127 | r = 1; |
127 | break; | 128 | break; |
128 | default: | 129 | default: |
@@ -263,10 +264,12 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |||
263 | vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK; | 264 | vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK; |
264 | restore_fp_regs(&vcpu->arch.guest_fpregs); | 265 | restore_fp_regs(&vcpu->arch.guest_fpregs); |
265 | restore_access_regs(vcpu->arch.guest_acrs); | 266 | restore_access_regs(vcpu->arch.guest_acrs); |
267 | gmap_enable(vcpu->arch.gmap); | ||
266 | } | 268 | } |
267 | 269 | ||
268 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | 270 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
269 | { | 271 | { |
272 | gmap_disable(vcpu->arch.gmap); | ||
270 | save_fp_regs(&vcpu->arch.guest_fpregs); | 273 | save_fp_regs(&vcpu->arch.guest_fpregs); |
271 | save_access_regs(vcpu->arch.guest_acrs); | 274 | save_access_regs(vcpu->arch.guest_acrs); |
272 | restore_fp_regs(&vcpu->arch.host_fpregs); | 275 | restore_fp_regs(&vcpu->arch.host_fpregs); |
@@ -461,7 +464,6 @@ static void __vcpu_run(struct kvm_vcpu *vcpu) | |||
461 | local_irq_disable(); | 464 | local_irq_disable(); |
462 | kvm_guest_enter(); | 465 | kvm_guest_enter(); |
463 | local_irq_enable(); | 466 | local_irq_enable(); |
464 | gmap_enable(vcpu->arch.gmap); | ||
465 | VCPU_EVENT(vcpu, 6, "entering sie flags %x", | 467 | VCPU_EVENT(vcpu, 6, "entering sie flags %x", |
466 | atomic_read(&vcpu->arch.sie_block->cpuflags)); | 468 | atomic_read(&vcpu->arch.sie_block->cpuflags)); |
467 | if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) { | 469 | if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) { |
@@ -470,7 +472,6 @@ static void __vcpu_run(struct kvm_vcpu *vcpu) | |||
470 | } | 472 | } |
471 | VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", | 473 | VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", |
472 | vcpu->arch.sie_block->icptcode); | 474 | vcpu->arch.sie_block->icptcode); |
473 | gmap_disable(vcpu->arch.gmap); | ||
474 | local_irq_disable(); | 475 | local_irq_disable(); |
475 | kvm_guest_exit(); | 476 | kvm_guest_exit(); |
476 | local_irq_enable(); | 477 | local_irq_enable(); |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 4d1f2bce87b3..5d56c2b95b14 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -160,6 +160,8 @@ struct gmap *gmap_alloc(struct mm_struct *mm) | |||
160 | table = (unsigned long *) page_to_phys(page); | 160 | table = (unsigned long *) page_to_phys(page); |
161 | crst_table_init(table, _REGION1_ENTRY_EMPTY); | 161 | crst_table_init(table, _REGION1_ENTRY_EMPTY); |
162 | gmap->table = table; | 162 | gmap->table = table; |
163 | gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH | | ||
164 | _ASCE_USER_BITS | __pa(table); | ||
163 | list_add(&gmap->list, &mm->context.gmap_list); | 165 | list_add(&gmap->list, &mm->context.gmap_list); |
164 | return gmap; | 166 | return gmap; |
165 | 167 | ||
@@ -240,10 +242,6 @@ EXPORT_SYMBOL_GPL(gmap_free); | |||
240 | */ | 242 | */ |
241 | void gmap_enable(struct gmap *gmap) | 243 | void gmap_enable(struct gmap *gmap) |
242 | { | 244 | { |
243 | /* Load primary space page table origin. */ | ||
244 | S390_lowcore.user_asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH | | ||
245 | _ASCE_USER_BITS | __pa(gmap->table); | ||
246 | asm volatile("lctlg 1,1,%0\n" : : "m" (S390_lowcore.user_asce) ); | ||
247 | S390_lowcore.gmap = (unsigned long) gmap; | 245 | S390_lowcore.gmap = (unsigned long) gmap; |
248 | } | 246 | } |
249 | EXPORT_SYMBOL_GPL(gmap_enable); | 247 | EXPORT_SYMBOL_GPL(gmap_enable); |
@@ -254,10 +252,6 @@ EXPORT_SYMBOL_GPL(gmap_enable); | |||
254 | */ | 252 | */ |
255 | void gmap_disable(struct gmap *gmap) | 253 | void gmap_disable(struct gmap *gmap) |
256 | { | 254 | { |
257 | /* Load primary space page table origin. */ | ||
258 | S390_lowcore.user_asce = | ||
259 | gmap->mm->context.asce_bits | __pa(gmap->mm->pgd); | ||
260 | asm volatile("lctlg 1,1,%0\n" : : "m" (S390_lowcore.user_asce) ); | ||
261 | S390_lowcore.gmap = 0UL; | 255 | S390_lowcore.gmap = 0UL; |
262 | } | 256 | } |
263 | EXPORT_SYMBOL_GPL(gmap_disable); | 257 | EXPORT_SYMBOL_GPL(gmap_disable); |
@@ -309,15 +303,15 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) | |||
309 | /* Walk the guest addr space page table */ | 303 | /* Walk the guest addr space page table */ |
310 | table = gmap->table + (((to + off) >> 53) & 0x7ff); | 304 | table = gmap->table + (((to + off) >> 53) & 0x7ff); |
311 | if (*table & _REGION_ENTRY_INV) | 305 | if (*table & _REGION_ENTRY_INV) |
312 | return 0; | 306 | goto out; |
313 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 307 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
314 | table = table + (((to + off) >> 42) & 0x7ff); | 308 | table = table + (((to + off) >> 42) & 0x7ff); |
315 | if (*table & _REGION_ENTRY_INV) | 309 | if (*table & _REGION_ENTRY_INV) |
316 | return 0; | 310 | goto out; |
317 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 311 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
318 | table = table + (((to + off) >> 31) & 0x7ff); | 312 | table = table + (((to + off) >> 31) & 0x7ff); |
319 | if (*table & _REGION_ENTRY_INV) | 313 | if (*table & _REGION_ENTRY_INV) |
320 | return 0; | 314 | goto out; |
321 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); | 315 | table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN); |
322 | table = table + (((to + off) >> 20) & 0x7ff); | 316 | table = table + (((to + off) >> 20) & 0x7ff); |
323 | 317 | ||
@@ -325,6 +319,7 @@ int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len) | |||
325 | flush |= gmap_unlink_segment(gmap, table); | 319 | flush |= gmap_unlink_segment(gmap, table); |
326 | *table = _SEGMENT_ENTRY_INV; | 320 | *table = _SEGMENT_ENTRY_INV; |
327 | } | 321 | } |
322 | out: | ||
328 | up_read(&gmap->mm->mmap_sem); | 323 | up_read(&gmap->mm->mmap_sem); |
329 | if (flush) | 324 | if (flush) |
330 | gmap_flush_tlb(gmap); | 325 | gmap_flush_tlb(gmap); |
diff --git a/arch/sparc/include/asm/spitfire.h b/arch/sparc/include/asm/spitfire.h index 55a17c6efeb8..d06a26601753 100644 --- a/arch/sparc/include/asm/spitfire.h +++ b/arch/sparc/include/asm/spitfire.h | |||
@@ -43,6 +43,8 @@ | |||
43 | #define SUN4V_CHIP_NIAGARA1 0x01 | 43 | #define SUN4V_CHIP_NIAGARA1 0x01 |
44 | #define SUN4V_CHIP_NIAGARA2 0x02 | 44 | #define SUN4V_CHIP_NIAGARA2 0x02 |
45 | #define SUN4V_CHIP_NIAGARA3 0x03 | 45 | #define SUN4V_CHIP_NIAGARA3 0x03 |
46 | #define SUN4V_CHIP_NIAGARA4 0x04 | ||
47 | #define SUN4V_CHIP_NIAGARA5 0x05 | ||
46 | #define SUN4V_CHIP_UNKNOWN 0xff | 48 | #define SUN4V_CHIP_UNKNOWN 0xff |
47 | 49 | ||
48 | #ifndef __ASSEMBLY__ | 50 | #ifndef __ASSEMBLY__ |
diff --git a/arch/sparc/include/asm/xor_64.h b/arch/sparc/include/asm/xor_64.h index 9ed6ff679ab7..ee8edc68423e 100644 --- a/arch/sparc/include/asm/xor_64.h +++ b/arch/sparc/include/asm/xor_64.h | |||
@@ -66,6 +66,8 @@ static struct xor_block_template xor_block_niagara = { | |||
66 | ((tlb_type == hypervisor && \ | 66 | ((tlb_type == hypervisor && \ |
67 | (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 || \ | 67 | (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 || \ |
68 | sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || \ | 68 | sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || \ |
69 | sun4v_chip_type == SUN4V_CHIP_NIAGARA3)) ? \ | 69 | sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || \ |
70 | sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || \ | ||
71 | sun4v_chip_type == SUN4V_CHIP_NIAGARA5)) ? \ | ||
70 | &xor_block_niagara : \ | 72 | &xor_block_niagara : \ |
71 | &xor_block_VIS) | 73 | &xor_block_VIS) |
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c index 9810fd881058..ba9b1cec4e6b 100644 --- a/arch/sparc/kernel/cpu.c +++ b/arch/sparc/kernel/cpu.c | |||
@@ -481,6 +481,18 @@ static void __init sun4v_cpu_probe(void) | |||
481 | sparc_pmu_type = "niagara3"; | 481 | sparc_pmu_type = "niagara3"; |
482 | break; | 482 | break; |
483 | 483 | ||
484 | case SUN4V_CHIP_NIAGARA4: | ||
485 | sparc_cpu_type = "UltraSparc T4 (Niagara4)"; | ||
486 | sparc_fpu_type = "UltraSparc T4 integrated FPU"; | ||
487 | sparc_pmu_type = "niagara4"; | ||
488 | break; | ||
489 | |||
490 | case SUN4V_CHIP_NIAGARA5: | ||
491 | sparc_cpu_type = "UltraSparc T5 (Niagara5)"; | ||
492 | sparc_fpu_type = "UltraSparc T5 integrated FPU"; | ||
493 | sparc_pmu_type = "niagara5"; | ||
494 | break; | ||
495 | |||
484 | default: | 496 | default: |
485 | printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n", | 497 | printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n", |
486 | prom_cpu_compatible); | 498 | prom_cpu_compatible); |
diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c index 4197e8d62d4c..9323eafccb93 100644 --- a/arch/sparc/kernel/cpumap.c +++ b/arch/sparc/kernel/cpumap.c | |||
@@ -325,6 +325,8 @@ static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index) | |||
325 | case SUN4V_CHIP_NIAGARA1: | 325 | case SUN4V_CHIP_NIAGARA1: |
326 | case SUN4V_CHIP_NIAGARA2: | 326 | case SUN4V_CHIP_NIAGARA2: |
327 | case SUN4V_CHIP_NIAGARA3: | 327 | case SUN4V_CHIP_NIAGARA3: |
328 | case SUN4V_CHIP_NIAGARA4: | ||
329 | case SUN4V_CHIP_NIAGARA5: | ||
328 | rover_inc_table = niagara_iterate_method; | 330 | rover_inc_table = niagara_iterate_method; |
329 | break; | 331 | break; |
330 | default: | 332 | default: |
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S index 0eac1b2fc53d..0d810c2f1d00 100644 --- a/arch/sparc/kernel/head_64.S +++ b/arch/sparc/kernel/head_64.S | |||
@@ -133,7 +133,7 @@ prom_sun4v_name: | |||
133 | prom_niagara_prefix: | 133 | prom_niagara_prefix: |
134 | .asciz "SUNW,UltraSPARC-T" | 134 | .asciz "SUNW,UltraSPARC-T" |
135 | prom_sparc_prefix: | 135 | prom_sparc_prefix: |
136 | .asciz "SPARC-T" | 136 | .asciz "SPARC-" |
137 | .align 4 | 137 | .align 4 |
138 | prom_root_compatible: | 138 | prom_root_compatible: |
139 | .skip 64 | 139 | .skip 64 |
@@ -396,7 +396,7 @@ sun4v_chip_type: | |||
396 | or %g1, %lo(prom_cpu_compatible), %g1 | 396 | or %g1, %lo(prom_cpu_compatible), %g1 |
397 | sethi %hi(prom_sparc_prefix), %g7 | 397 | sethi %hi(prom_sparc_prefix), %g7 |
398 | or %g7, %lo(prom_sparc_prefix), %g7 | 398 | or %g7, %lo(prom_sparc_prefix), %g7 |
399 | mov 7, %g3 | 399 | mov 6, %g3 |
400 | 90: ldub [%g7], %g2 | 400 | 90: ldub [%g7], %g2 |
401 | ldub [%g1], %g4 | 401 | ldub [%g1], %g4 |
402 | cmp %g2, %g4 | 402 | cmp %g2, %g4 |
@@ -408,10 +408,23 @@ sun4v_chip_type: | |||
408 | 408 | ||
409 | sethi %hi(prom_cpu_compatible), %g1 | 409 | sethi %hi(prom_cpu_compatible), %g1 |
410 | or %g1, %lo(prom_cpu_compatible), %g1 | 410 | or %g1, %lo(prom_cpu_compatible), %g1 |
411 | ldub [%g1 + 7], %g2 | 411 | ldub [%g1 + 6], %g2 |
412 | cmp %g2, 'T' | ||
413 | be,pt %xcc, 70f | ||
414 | cmp %g2, 'M' | ||
415 | bne,pn %xcc, 4f | ||
416 | nop | ||
417 | |||
418 | 70: ldub [%g1 + 7], %g2 | ||
412 | cmp %g2, '3' | 419 | cmp %g2, '3' |
413 | be,pt %xcc, 5f | 420 | be,pt %xcc, 5f |
414 | mov SUN4V_CHIP_NIAGARA3, %g4 | 421 | mov SUN4V_CHIP_NIAGARA3, %g4 |
422 | cmp %g2, '4' | ||
423 | be,pt %xcc, 5f | ||
424 | mov SUN4V_CHIP_NIAGARA4, %g4 | ||
425 | cmp %g2, '5' | ||
426 | be,pt %xcc, 5f | ||
427 | mov SUN4V_CHIP_NIAGARA5, %g4 | ||
415 | ba,pt %xcc, 4f | 428 | ba,pt %xcc, 4f |
416 | nop | 429 | nop |
417 | 430 | ||
@@ -545,6 +558,12 @@ niagara_tlb_fixup: | |||
545 | cmp %g1, SUN4V_CHIP_NIAGARA3 | 558 | cmp %g1, SUN4V_CHIP_NIAGARA3 |
546 | be,pt %xcc, niagara2_patch | 559 | be,pt %xcc, niagara2_patch |
547 | nop | 560 | nop |
561 | cmp %g1, SUN4V_CHIP_NIAGARA4 | ||
562 | be,pt %xcc, niagara2_patch | ||
563 | nop | ||
564 | cmp %g1, SUN4V_CHIP_NIAGARA5 | ||
565 | be,pt %xcc, niagara2_patch | ||
566 | nop | ||
548 | 567 | ||
549 | call generic_patch_copyops | 568 | call generic_patch_copyops |
550 | nop | 569 | nop |
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index c8cc461ff75f..f793742eec2b 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c | |||
@@ -380,8 +380,7 @@ void flush_thread(void) | |||
380 | #endif | 380 | #endif |
381 | } | 381 | } |
382 | 382 | ||
383 | /* Now, this task is no longer a kernel thread. */ | 383 | /* This task is no longer a kernel thread. */ |
384 | current->thread.current_ds = USER_DS; | ||
385 | if (current->thread.flags & SPARC_FLAG_KTHREAD) { | 384 | if (current->thread.flags & SPARC_FLAG_KTHREAD) { |
386 | current->thread.flags &= ~SPARC_FLAG_KTHREAD; | 385 | current->thread.flags &= ~SPARC_FLAG_KTHREAD; |
387 | 386 | ||
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index c158a95ec664..d959cd0a4aa4 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c | |||
@@ -368,9 +368,6 @@ void flush_thread(void) | |||
368 | 368 | ||
369 | /* Clear FPU register state. */ | 369 | /* Clear FPU register state. */ |
370 | t->fpsaved[0] = 0; | 370 | t->fpsaved[0] = 0; |
371 | |||
372 | if (get_thread_current_ds() != ASI_AIUS) | ||
373 | set_fs(USER_DS); | ||
374 | } | 371 | } |
375 | 372 | ||
376 | /* It's a bit more tricky when 64-bit tasks are involved... */ | 373 | /* It's a bit more tricky when 64-bit tasks are involved... */ |
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c index d26e1f6c717a..3e3e2914c70b 100644 --- a/arch/sparc/kernel/setup_32.c +++ b/arch/sparc/kernel/setup_32.c | |||
@@ -137,7 +137,7 @@ static void __init process_switch(char c) | |||
137 | prom_halt(); | 137 | prom_halt(); |
138 | break; | 138 | break; |
139 | case 'p': | 139 | case 'p': |
140 | /* Just ignore, this behavior is now the default. */ | 140 | prom_early_console.flags &= ~CON_BOOT; |
141 | break; | 141 | break; |
142 | default: | 142 | default: |
143 | printk("Unknown boot switch (-%c)\n", c); | 143 | printk("Unknown boot switch (-%c)\n", c); |
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index 3c5bb784214f..c965595aa7e9 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c | |||
@@ -106,7 +106,7 @@ static void __init process_switch(char c) | |||
106 | prom_halt(); | 106 | prom_halt(); |
107 | break; | 107 | break; |
108 | case 'p': | 108 | case 'p': |
109 | /* Just ignore, this behavior is now the default. */ | 109 | prom_early_console.flags &= ~CON_BOOT; |
110 | break; | 110 | break; |
111 | case 'P': | 111 | case 'P': |
112 | /* Force UltraSPARC-III P-Cache on. */ | 112 | /* Force UltraSPARC-III P-Cache on. */ |
@@ -425,10 +425,14 @@ static void __init init_sparc64_elf_hwcap(void) | |||
425 | else if (tlb_type == hypervisor) { | 425 | else if (tlb_type == hypervisor) { |
426 | if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 || | 426 | if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 || |
427 | sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || | 427 | sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || |
428 | sun4v_chip_type == SUN4V_CHIP_NIAGARA3) | 428 | sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || |
429 | sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || | ||
430 | sun4v_chip_type == SUN4V_CHIP_NIAGARA5) | ||
429 | cap |= HWCAP_SPARC_BLKINIT; | 431 | cap |= HWCAP_SPARC_BLKINIT; |
430 | if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || | 432 | if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || |
431 | sun4v_chip_type == SUN4V_CHIP_NIAGARA3) | 433 | sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || |
434 | sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || | ||
435 | sun4v_chip_type == SUN4V_CHIP_NIAGARA5) | ||
432 | cap |= HWCAP_SPARC_N2; | 436 | cap |= HWCAP_SPARC_N2; |
433 | } | 437 | } |
434 | 438 | ||
@@ -452,11 +456,15 @@ static void __init init_sparc64_elf_hwcap(void) | |||
452 | if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1) | 456 | if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1) |
453 | cap |= AV_SPARC_ASI_BLK_INIT; | 457 | cap |= AV_SPARC_ASI_BLK_INIT; |
454 | if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || | 458 | if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || |
455 | sun4v_chip_type == SUN4V_CHIP_NIAGARA3) | 459 | sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || |
460 | sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || | ||
461 | sun4v_chip_type == SUN4V_CHIP_NIAGARA5) | ||
456 | cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 | | 462 | cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 | |
457 | AV_SPARC_ASI_BLK_INIT | | 463 | AV_SPARC_ASI_BLK_INIT | |
458 | AV_SPARC_POPC); | 464 | AV_SPARC_POPC); |
459 | if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3) | 465 | if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || |
466 | sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || | ||
467 | sun4v_chip_type == SUN4V_CHIP_NIAGARA5) | ||
460 | cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC | | 468 | cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC | |
461 | AV_SPARC_FMAF); | 469 | AV_SPARC_FMAF); |
462 | } | 470 | } |
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 581531dbc8b5..8e073d802139 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c | |||
@@ -511,6 +511,11 @@ static void __init read_obp_translations(void) | |||
511 | for (i = 0; i < prom_trans_ents; i++) | 511 | for (i = 0; i < prom_trans_ents; i++) |
512 | prom_trans[i].data &= ~0x0003fe0000000000UL; | 512 | prom_trans[i].data &= ~0x0003fe0000000000UL; |
513 | } | 513 | } |
514 | |||
515 | /* Force execute bit on. */ | ||
516 | for (i = 0; i < prom_trans_ents; i++) | ||
517 | prom_trans[i].data |= (tlb_type == hypervisor ? | ||
518 | _PAGE_EXEC_4V : _PAGE_EXEC_4U); | ||
514 | } | 519 | } |
515 | 520 | ||
516 | static void __init hypervisor_tlb_lock(unsigned long vaddr, | 521 | static void __init hypervisor_tlb_lock(unsigned long vaddr, |
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h index 4886a68f267e..53610957feaf 100644 --- a/arch/x86/include/asm/nmi.h +++ b/arch/x86/include/asm/nmi.h | |||
@@ -22,25 +22,23 @@ void arch_trigger_all_cpu_backtrace(void); | |||
22 | #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace | 22 | #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace |
23 | #endif | 23 | #endif |
24 | 24 | ||
25 | /* | 25 | #define NMI_FLAG_FIRST 1 |
26 | * Define some priorities for the nmi notifier call chain. | 26 | |
27 | * | 27 | enum { |
28 | * Create a local nmi bit that has a higher priority than | 28 | NMI_LOCAL=0, |
29 | * external nmis, because the local ones are more frequent. | 29 | NMI_UNKNOWN, |
30 | * | 30 | NMI_MAX |
31 | * Also setup some default high/normal/low settings for | 31 | }; |
32 | * subsystems to registers with. Using 4 bits to separate | 32 | |
33 | * the priorities. This can go a lot higher if needed be. | 33 | #define NMI_DONE 0 |
34 | */ | 34 | #define NMI_HANDLED 1 |
35 | 35 | ||
36 | #define NMI_LOCAL_SHIFT 16 /* randomly picked */ | 36 | typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *); |
37 | #define NMI_LOCAL_BIT (1ULL << NMI_LOCAL_SHIFT) | 37 | |
38 | #define NMI_HIGH_PRIOR (1ULL << 8) | 38 | int register_nmi_handler(unsigned int, nmi_handler_t, unsigned long, |
39 | #define NMI_NORMAL_PRIOR (1ULL << 4) | 39 | const char *); |
40 | #define NMI_LOW_PRIOR (1ULL << 0) | 40 | |
41 | #define NMI_LOCAL_HIGH_PRIOR (NMI_LOCAL_BIT | NMI_HIGH_PRIOR) | 41 | void unregister_nmi_handler(unsigned int, const char *); |
42 | #define NMI_LOCAL_NORMAL_PRIOR (NMI_LOCAL_BIT | NMI_NORMAL_PRIOR) | ||
43 | #define NMI_LOCAL_LOW_PRIOR (NMI_LOCAL_BIT | NMI_LOW_PRIOR) | ||
44 | 42 | ||
45 | void stop_nmi(void); | 43 | void stop_nmi(void); |
46 | void restart_nmi(void); | 44 | void restart_nmi(void); |
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h index 094fb30817ab..e47cb6167e8f 100644 --- a/arch/x86/include/asm/perf_event.h +++ b/arch/x86/include/asm/perf_event.h | |||
@@ -29,6 +29,9 @@ | |||
29 | #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23) | 29 | #define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23) |
30 | #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL | 30 | #define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL |
31 | 31 | ||
32 | #define AMD_PERFMON_EVENTSEL_GUESTONLY (1ULL << 40) | ||
33 | #define AMD_PERFMON_EVENTSEL_HOSTONLY (1ULL << 41) | ||
34 | |||
32 | #define AMD64_EVENTSEL_EVENT \ | 35 | #define AMD64_EVENTSEL_EVENT \ |
33 | (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32)) | 36 | (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32)) |
34 | #define INTEL_ARCH_EVENT_MASK \ | 37 | #define INTEL_ARCH_EVENT_MASK \ |
@@ -159,7 +162,19 @@ extern unsigned long perf_misc_flags(struct pt_regs *regs); | |||
159 | ); \ | 162 | ); \ |
160 | } | 163 | } |
161 | 164 | ||
165 | struct perf_guest_switch_msr { | ||
166 | unsigned msr; | ||
167 | u64 host, guest; | ||
168 | }; | ||
169 | |||
170 | extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr); | ||
162 | #else | 171 | #else |
172 | static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr) | ||
173 | { | ||
174 | *nr = 0; | ||
175 | return NULL; | ||
176 | } | ||
177 | |||
163 | static inline void perf_events_lapic_init(void) { } | 178 | static inline void perf_events_lapic_init(void) { } |
164 | #endif | 179 | #endif |
165 | 180 | ||
diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h index 3250e3d605d9..92f297069e87 100644 --- a/arch/x86/include/asm/reboot.h +++ b/arch/x86/include/asm/reboot.h | |||
@@ -23,7 +23,7 @@ void machine_real_restart(unsigned int type); | |||
23 | #define MRR_BIOS 0 | 23 | #define MRR_BIOS 0 |
24 | #define MRR_APM 1 | 24 | #define MRR_APM 1 |
25 | 25 | ||
26 | typedef void (*nmi_shootdown_cb)(int, struct die_args*); | 26 | typedef void (*nmi_shootdown_cb)(int, struct pt_regs*); |
27 | void nmi_shootdown_cpus(nmi_shootdown_cb callback); | 27 | void nmi_shootdown_cpus(nmi_shootdown_cb callback); |
28 | 28 | ||
29 | #endif /* _ASM_X86_REBOOT_H */ | 29 | #endif /* _ASM_X86_REBOOT_H */ |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 82f2912155a5..8baca3c4871c 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -19,7 +19,7 @@ endif | |||
19 | 19 | ||
20 | obj-y := process_$(BITS).o signal.o entry_$(BITS).o | 20 | obj-y := process_$(BITS).o signal.o entry_$(BITS).o |
21 | obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o | 21 | obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o |
22 | obj-y += time.o ioport.o ldt.o dumpstack.o | 22 | obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o |
23 | obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o | 23 | obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o |
24 | obj-$(CONFIG_IRQ_WORK) += irq_work.o | 24 | obj-$(CONFIG_IRQ_WORK) += irq_work.o |
25 | obj-y += probe_roms.o | 25 | obj-y += probe_roms.o |
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c index d5e57db0f7be..31cb9ae992b7 100644 --- a/arch/x86/kernel/apic/hw_nmi.c +++ b/arch/x86/kernel/apic/hw_nmi.c | |||
@@ -60,22 +60,10 @@ void arch_trigger_all_cpu_backtrace(void) | |||
60 | } | 60 | } |
61 | 61 | ||
62 | static int __kprobes | 62 | static int __kprobes |
63 | arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self, | 63 | arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs) |
64 | unsigned long cmd, void *__args) | ||
65 | { | 64 | { |
66 | struct die_args *args = __args; | ||
67 | struct pt_regs *regs; | ||
68 | int cpu; | 65 | int cpu; |
69 | 66 | ||
70 | switch (cmd) { | ||
71 | case DIE_NMI: | ||
72 | break; | ||
73 | |||
74 | default: | ||
75 | return NOTIFY_DONE; | ||
76 | } | ||
77 | |||
78 | regs = args->regs; | ||
79 | cpu = smp_processor_id(); | 67 | cpu = smp_processor_id(); |
80 | 68 | ||
81 | if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { | 69 | if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { |
@@ -86,21 +74,16 @@ arch_trigger_all_cpu_backtrace_handler(struct notifier_block *self, | |||
86 | show_regs(regs); | 74 | show_regs(regs); |
87 | arch_spin_unlock(&lock); | 75 | arch_spin_unlock(&lock); |
88 | cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); | 76 | cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); |
89 | return NOTIFY_STOP; | 77 | return NMI_HANDLED; |
90 | } | 78 | } |
91 | 79 | ||
92 | return NOTIFY_DONE; | 80 | return NMI_DONE; |
93 | } | 81 | } |
94 | 82 | ||
95 | static __read_mostly struct notifier_block backtrace_notifier = { | ||
96 | .notifier_call = arch_trigger_all_cpu_backtrace_handler, | ||
97 | .next = NULL, | ||
98 | .priority = NMI_LOCAL_LOW_PRIOR, | ||
99 | }; | ||
100 | |||
101 | static int __init register_trigger_all_cpu_backtrace(void) | 83 | static int __init register_trigger_all_cpu_backtrace(void) |
102 | { | 84 | { |
103 | register_die_notifier(&backtrace_notifier); | 85 | register_nmi_handler(NMI_LOCAL, arch_trigger_all_cpu_backtrace_handler, |
86 | 0, "arch_bt"); | ||
104 | return 0; | 87 | return 0; |
105 | } | 88 | } |
106 | early_initcall(register_trigger_all_cpu_backtrace); | 89 | early_initcall(register_trigger_all_cpu_backtrace); |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 34b18594e724..75be00ecfff2 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -672,18 +672,11 @@ void __cpuinit uv_cpu_init(void) | |||
672 | /* | 672 | /* |
673 | * When NMI is received, print a stack trace. | 673 | * When NMI is received, print a stack trace. |
674 | */ | 674 | */ |
675 | int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data) | 675 | int uv_handle_nmi(unsigned int reason, struct pt_regs *regs) |
676 | { | 676 | { |
677 | unsigned long real_uv_nmi; | 677 | unsigned long real_uv_nmi; |
678 | int bid; | 678 | int bid; |
679 | 679 | ||
680 | if (reason != DIE_NMIUNKNOWN) | ||
681 | return NOTIFY_OK; | ||
682 | |||
683 | if (in_crash_kexec) | ||
684 | /* do nothing if entering the crash kernel */ | ||
685 | return NOTIFY_OK; | ||
686 | |||
687 | /* | 680 | /* |
688 | * Each blade has an MMR that indicates when an NMI has been sent | 681 | * Each blade has an MMR that indicates when an NMI has been sent |
689 | * to cpus on the blade. If an NMI is detected, atomically | 682 | * to cpus on the blade. If an NMI is detected, atomically |
@@ -704,7 +697,7 @@ int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data) | |||
704 | } | 697 | } |
705 | 698 | ||
706 | if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count)) | 699 | if (likely(__get_cpu_var(cpu_last_nmi_count) == uv_blade_info[bid].nmi_count)) |
707 | return NOTIFY_DONE; | 700 | return NMI_DONE; |
708 | 701 | ||
709 | __get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count; | 702 | __get_cpu_var(cpu_last_nmi_count) = uv_blade_info[bid].nmi_count; |
710 | 703 | ||
@@ -717,17 +710,12 @@ int uv_handle_nmi(struct notifier_block *self, unsigned long reason, void *data) | |||
717 | dump_stack(); | 710 | dump_stack(); |
718 | spin_unlock(&uv_nmi_lock); | 711 | spin_unlock(&uv_nmi_lock); |
719 | 712 | ||
720 | return NOTIFY_STOP; | 713 | return NMI_HANDLED; |
721 | } | 714 | } |
722 | 715 | ||
723 | static struct notifier_block uv_dump_stack_nmi_nb = { | ||
724 | .notifier_call = uv_handle_nmi, | ||
725 | .priority = NMI_LOCAL_LOW_PRIOR - 1, | ||
726 | }; | ||
727 | |||
728 | void uv_register_nmi_notifier(void) | 716 | void uv_register_nmi_notifier(void) |
729 | { | 717 | { |
730 | if (register_die_notifier(&uv_dump_stack_nmi_nb)) | 718 | if (register_nmi_handler(NMI_UNKNOWN, uv_handle_nmi, 0, "uv")) |
731 | printk(KERN_WARNING "UV NMI handler failed to register\n"); | 719 | printk(KERN_WARNING "UV NMI handler failed to register\n"); |
732 | } | 720 | } |
733 | 721 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c index 0ed633c5048b..6199232161cf 100644 --- a/arch/x86/kernel/cpu/mcheck/mce-inject.c +++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c | |||
@@ -78,27 +78,20 @@ static void raise_exception(struct mce *m, struct pt_regs *pregs) | |||
78 | 78 | ||
79 | static cpumask_var_t mce_inject_cpumask; | 79 | static cpumask_var_t mce_inject_cpumask; |
80 | 80 | ||
81 | static int mce_raise_notify(struct notifier_block *self, | 81 | static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs) |
82 | unsigned long val, void *data) | ||
83 | { | 82 | { |
84 | struct die_args *args = (struct die_args *)data; | ||
85 | int cpu = smp_processor_id(); | 83 | int cpu = smp_processor_id(); |
86 | struct mce *m = &__get_cpu_var(injectm); | 84 | struct mce *m = &__get_cpu_var(injectm); |
87 | if (val != DIE_NMI || !cpumask_test_cpu(cpu, mce_inject_cpumask)) | 85 | if (!cpumask_test_cpu(cpu, mce_inject_cpumask)) |
88 | return NOTIFY_DONE; | 86 | return NMI_DONE; |
89 | cpumask_clear_cpu(cpu, mce_inject_cpumask); | 87 | cpumask_clear_cpu(cpu, mce_inject_cpumask); |
90 | if (m->inject_flags & MCJ_EXCEPTION) | 88 | if (m->inject_flags & MCJ_EXCEPTION) |
91 | raise_exception(m, args->regs); | 89 | raise_exception(m, regs); |
92 | else if (m->status) | 90 | else if (m->status) |
93 | raise_poll(m); | 91 | raise_poll(m); |
94 | return NOTIFY_STOP; | 92 | return NMI_HANDLED; |
95 | } | 93 | } |
96 | 94 | ||
97 | static struct notifier_block mce_raise_nb = { | ||
98 | .notifier_call = mce_raise_notify, | ||
99 | .priority = NMI_LOCAL_NORMAL_PRIOR, | ||
100 | }; | ||
101 | |||
102 | /* Inject mce on current CPU */ | 95 | /* Inject mce on current CPU */ |
103 | static int raise_local(void) | 96 | static int raise_local(void) |
104 | { | 97 | { |
@@ -216,7 +209,8 @@ static int inject_init(void) | |||
216 | return -ENOMEM; | 209 | return -ENOMEM; |
217 | printk(KERN_INFO "Machine check injector initialized\n"); | 210 | printk(KERN_INFO "Machine check injector initialized\n"); |
218 | mce_chrdev_ops.write = mce_write; | 211 | mce_chrdev_ops.write = mce_write; |
219 | register_die_notifier(&mce_raise_nb); | 212 | register_nmi_handler(NMI_LOCAL, mce_raise_notify, 0, |
213 | "mce_notify"); | ||
220 | return 0; | 214 | return 0; |
221 | } | 215 | } |
222 | 216 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 08363b042122..fce51ad1f362 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -908,9 +908,6 @@ void do_machine_check(struct pt_regs *regs, long error_code) | |||
908 | 908 | ||
909 | percpu_inc(mce_exception_count); | 909 | percpu_inc(mce_exception_count); |
910 | 910 | ||
911 | if (notify_die(DIE_NMI, "machine check", regs, error_code, | ||
912 | 18, SIGKILL) == NOTIFY_STOP) | ||
913 | goto out; | ||
914 | if (!banks) | 911 | if (!banks) |
915 | goto out; | 912 | goto out; |
916 | 913 | ||
@@ -1140,6 +1137,15 @@ static void mce_start_timer(unsigned long data) | |||
1140 | add_timer_on(t, smp_processor_id()); | 1137 | add_timer_on(t, smp_processor_id()); |
1141 | } | 1138 | } |
1142 | 1139 | ||
1140 | /* Must not be called in IRQ context where del_timer_sync() can deadlock */ | ||
1141 | static void mce_timer_delete_all(void) | ||
1142 | { | ||
1143 | int cpu; | ||
1144 | |||
1145 | for_each_online_cpu(cpu) | ||
1146 | del_timer_sync(&per_cpu(mce_timer, cpu)); | ||
1147 | } | ||
1148 | |||
1143 | static void mce_do_trigger(struct work_struct *work) | 1149 | static void mce_do_trigger(struct work_struct *work) |
1144 | { | 1150 | { |
1145 | call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT); | 1151 | call_usermodehelper(mce_helper, mce_helper_argv, NULL, UMH_NO_WAIT); |
@@ -1750,7 +1756,6 @@ static struct syscore_ops mce_syscore_ops = { | |||
1750 | 1756 | ||
1751 | static void mce_cpu_restart(void *data) | 1757 | static void mce_cpu_restart(void *data) |
1752 | { | 1758 | { |
1753 | del_timer_sync(&__get_cpu_var(mce_timer)); | ||
1754 | if (!mce_available(__this_cpu_ptr(&cpu_info))) | 1759 | if (!mce_available(__this_cpu_ptr(&cpu_info))) |
1755 | return; | 1760 | return; |
1756 | __mcheck_cpu_init_generic(); | 1761 | __mcheck_cpu_init_generic(); |
@@ -1760,16 +1765,15 @@ static void mce_cpu_restart(void *data) | |||
1760 | /* Reinit MCEs after user configuration changes */ | 1765 | /* Reinit MCEs after user configuration changes */ |
1761 | static void mce_restart(void) | 1766 | static void mce_restart(void) |
1762 | { | 1767 | { |
1768 | mce_timer_delete_all(); | ||
1763 | on_each_cpu(mce_cpu_restart, NULL, 1); | 1769 | on_each_cpu(mce_cpu_restart, NULL, 1); |
1764 | } | 1770 | } |
1765 | 1771 | ||
1766 | /* Toggle features for corrected errors */ | 1772 | /* Toggle features for corrected errors */ |
1767 | static void mce_disable_ce(void *all) | 1773 | static void mce_disable_cmci(void *data) |
1768 | { | 1774 | { |
1769 | if (!mce_available(__this_cpu_ptr(&cpu_info))) | 1775 | if (!mce_available(__this_cpu_ptr(&cpu_info))) |
1770 | return; | 1776 | return; |
1771 | if (all) | ||
1772 | del_timer_sync(&__get_cpu_var(mce_timer)); | ||
1773 | cmci_clear(); | 1777 | cmci_clear(); |
1774 | } | 1778 | } |
1775 | 1779 | ||
@@ -1852,7 +1856,8 @@ static ssize_t set_ignore_ce(struct sys_device *s, | |||
1852 | if (mce_ignore_ce ^ !!new) { | 1856 | if (mce_ignore_ce ^ !!new) { |
1853 | if (new) { | 1857 | if (new) { |
1854 | /* disable ce features */ | 1858 | /* disable ce features */ |
1855 | on_each_cpu(mce_disable_ce, (void *)1, 1); | 1859 | mce_timer_delete_all(); |
1860 | on_each_cpu(mce_disable_cmci, NULL, 1); | ||
1856 | mce_ignore_ce = 1; | 1861 | mce_ignore_ce = 1; |
1857 | } else { | 1862 | } else { |
1858 | /* enable ce features */ | 1863 | /* enable ce features */ |
@@ -1875,7 +1880,7 @@ static ssize_t set_cmci_disabled(struct sys_device *s, | |||
1875 | if (mce_cmci_disabled ^ !!new) { | 1880 | if (mce_cmci_disabled ^ !!new) { |
1876 | if (new) { | 1881 | if (new) { |
1877 | /* disable cmci */ | 1882 | /* disable cmci */ |
1878 | on_each_cpu(mce_disable_ce, NULL, 1); | 1883 | on_each_cpu(mce_disable_cmci, NULL, 1); |
1879 | mce_cmci_disabled = 1; | 1884 | mce_cmci_disabled = 1; |
1880 | } else { | 1885 | } else { |
1881 | /* enable cmci */ | 1886 | /* enable cmci */ |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 8ab89112f93c..640891014b2a 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -1058,76 +1058,15 @@ void perf_events_lapic_init(void) | |||
1058 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 1058 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
1059 | } | 1059 | } |
1060 | 1060 | ||
1061 | struct pmu_nmi_state { | ||
1062 | unsigned int marked; | ||
1063 | int handled; | ||
1064 | }; | ||
1065 | |||
1066 | static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi); | ||
1067 | |||
1068 | static int __kprobes | 1061 | static int __kprobes |
1069 | perf_event_nmi_handler(struct notifier_block *self, | 1062 | perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs) |
1070 | unsigned long cmd, void *__args) | ||
1071 | { | 1063 | { |
1072 | struct die_args *args = __args; | ||
1073 | unsigned int this_nmi; | ||
1074 | int handled; | ||
1075 | |||
1076 | if (!atomic_read(&active_events)) | 1064 | if (!atomic_read(&active_events)) |
1077 | return NOTIFY_DONE; | 1065 | return NMI_DONE; |
1078 | |||
1079 | switch (cmd) { | ||
1080 | case DIE_NMI: | ||
1081 | break; | ||
1082 | case DIE_NMIUNKNOWN: | ||
1083 | this_nmi = percpu_read(irq_stat.__nmi_count); | ||
1084 | if (this_nmi != __this_cpu_read(pmu_nmi.marked)) | ||
1085 | /* let the kernel handle the unknown nmi */ | ||
1086 | return NOTIFY_DONE; | ||
1087 | /* | ||
1088 | * This one is a PMU back-to-back nmi. Two events | ||
1089 | * trigger 'simultaneously' raising two back-to-back | ||
1090 | * NMIs. If the first NMI handles both, the latter | ||
1091 | * will be empty and daze the CPU. So, we drop it to | ||
1092 | * avoid false-positive 'unknown nmi' messages. | ||
1093 | */ | ||
1094 | return NOTIFY_STOP; | ||
1095 | default: | ||
1096 | return NOTIFY_DONE; | ||
1097 | } | ||
1098 | |||
1099 | handled = x86_pmu.handle_irq(args->regs); | ||
1100 | if (!handled) | ||
1101 | return NOTIFY_DONE; | ||
1102 | 1066 | ||
1103 | this_nmi = percpu_read(irq_stat.__nmi_count); | 1067 | return x86_pmu.handle_irq(regs); |
1104 | if ((handled > 1) || | ||
1105 | /* the next nmi could be a back-to-back nmi */ | ||
1106 | ((__this_cpu_read(pmu_nmi.marked) == this_nmi) && | ||
1107 | (__this_cpu_read(pmu_nmi.handled) > 1))) { | ||
1108 | /* | ||
1109 | * We could have two subsequent back-to-back nmis: The | ||
1110 | * first handles more than one counter, the 2nd | ||
1111 | * handles only one counter and the 3rd handles no | ||
1112 | * counter. | ||
1113 | * | ||
1114 | * This is the 2nd nmi because the previous was | ||
1115 | * handling more than one counter. We will mark the | ||
1116 | * next (3rd) and then drop it if unhandled. | ||
1117 | */ | ||
1118 | __this_cpu_write(pmu_nmi.marked, this_nmi + 1); | ||
1119 | __this_cpu_write(pmu_nmi.handled, handled); | ||
1120 | } | ||
1121 | |||
1122 | return NOTIFY_STOP; | ||
1123 | } | 1068 | } |
1124 | 1069 | ||
1125 | static __read_mostly struct notifier_block perf_event_nmi_notifier = { | ||
1126 | .notifier_call = perf_event_nmi_handler, | ||
1127 | .next = NULL, | ||
1128 | .priority = NMI_LOCAL_LOW_PRIOR, | ||
1129 | }; | ||
1130 | |||
1131 | struct event_constraint emptyconstraint; | 1070 | struct event_constraint emptyconstraint; |
1132 | struct event_constraint unconstrained; | 1071 | struct event_constraint unconstrained; |
1133 | 1072 | ||
@@ -1232,7 +1171,7 @@ static int __init init_hw_perf_events(void) | |||
1232 | ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED; | 1171 | ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED; |
1233 | 1172 | ||
1234 | perf_events_lapic_init(); | 1173 | perf_events_lapic_init(); |
1235 | register_die_notifier(&perf_event_nmi_notifier); | 1174 | register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI"); |
1236 | 1175 | ||
1237 | unconstrained = (struct event_constraint) | 1176 | unconstrained = (struct event_constraint) |
1238 | __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, | 1177 | __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, |
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index fb330b0a816e..b9698d40ac4b 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h | |||
@@ -131,6 +131,13 @@ struct cpu_hw_events { | |||
131 | struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; | 131 | struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; |
132 | 132 | ||
133 | /* | 133 | /* |
134 | * Intel host/guest exclude bits | ||
135 | */ | ||
136 | u64 intel_ctrl_guest_mask; | ||
137 | u64 intel_ctrl_host_mask; | ||
138 | struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX]; | ||
139 | |||
140 | /* | ||
134 | * manage shared (per-core, per-cpu) registers | 141 | * manage shared (per-core, per-cpu) registers |
135 | * used on Intel NHM/WSM/SNB | 142 | * used on Intel NHM/WSM/SNB |
136 | */ | 143 | */ |
@@ -295,6 +302,11 @@ struct x86_pmu { | |||
295 | */ | 302 | */ |
296 | struct extra_reg *extra_regs; | 303 | struct extra_reg *extra_regs; |
297 | unsigned int er_flags; | 304 | unsigned int er_flags; |
305 | |||
306 | /* | ||
307 | * Intel host/guest support (KVM) | ||
308 | */ | ||
309 | struct perf_guest_switch_msr *(*guest_get_msrs)(int *nr); | ||
298 | }; | 310 | }; |
299 | 311 | ||
300 | #define ERF_NO_HT_SHARING 1 | 312 | #define ERF_NO_HT_SHARING 1 |
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c index 384450d67128..db8e603ff0c6 100644 --- a/arch/x86/kernel/cpu/perf_event_amd.c +++ b/arch/x86/kernel/cpu/perf_event_amd.c | |||
@@ -138,6 +138,19 @@ static int amd_pmu_hw_config(struct perf_event *event) | |||
138 | if (ret) | 138 | if (ret) |
139 | return ret; | 139 | return ret; |
140 | 140 | ||
141 | if (event->attr.exclude_host && event->attr.exclude_guest) | ||
142 | /* | ||
143 | * When HO == GO == 1 the hardware treats that as GO == HO == 0 | ||
144 | * and will count in both modes. We don't want to count in that | ||
145 | * case so we emulate no-counting by setting US = OS = 0. | ||
146 | */ | ||
147 | event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR | | ||
148 | ARCH_PERFMON_EVENTSEL_OS); | ||
149 | else if (event->attr.exclude_host) | ||
150 | event->hw.config |= AMD_PERFMON_EVENTSEL_GUESTONLY; | ||
151 | else if (event->attr.exclude_guest) | ||
152 | event->hw.config |= AMD_PERFMON_EVENTSEL_HOSTONLY; | ||
153 | |||
141 | if (event->attr.type != PERF_TYPE_RAW) | 154 | if (event->attr.type != PERF_TYPE_RAW) |
142 | return 0; | 155 | return 0; |
143 | 156 | ||
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 61fa35750b98..e09ca20e86ee 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -749,7 +749,8 @@ static void intel_pmu_enable_all(int added) | |||
749 | 749 | ||
750 | intel_pmu_pebs_enable_all(); | 750 | intel_pmu_pebs_enable_all(); |
751 | intel_pmu_lbr_enable_all(); | 751 | intel_pmu_lbr_enable_all(); |
752 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl); | 752 | wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, |
753 | x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask); | ||
753 | 754 | ||
754 | if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { | 755 | if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { |
755 | struct perf_event *event = | 756 | struct perf_event *event = |
@@ -872,6 +873,7 @@ static void intel_pmu_disable_fixed(struct hw_perf_event *hwc) | |||
872 | static void intel_pmu_disable_event(struct perf_event *event) | 873 | static void intel_pmu_disable_event(struct perf_event *event) |
873 | { | 874 | { |
874 | struct hw_perf_event *hwc = &event->hw; | 875 | struct hw_perf_event *hwc = &event->hw; |
876 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
875 | 877 | ||
876 | if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { | 878 | if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { |
877 | intel_pmu_disable_bts(); | 879 | intel_pmu_disable_bts(); |
@@ -879,6 +881,9 @@ static void intel_pmu_disable_event(struct perf_event *event) | |||
879 | return; | 881 | return; |
880 | } | 882 | } |
881 | 883 | ||
884 | cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx); | ||
885 | cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx); | ||
886 | |||
882 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | 887 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { |
883 | intel_pmu_disable_fixed(hwc); | 888 | intel_pmu_disable_fixed(hwc); |
884 | return; | 889 | return; |
@@ -924,6 +929,7 @@ static void intel_pmu_enable_fixed(struct hw_perf_event *hwc) | |||
924 | static void intel_pmu_enable_event(struct perf_event *event) | 929 | static void intel_pmu_enable_event(struct perf_event *event) |
925 | { | 930 | { |
926 | struct hw_perf_event *hwc = &event->hw; | 931 | struct hw_perf_event *hwc = &event->hw; |
932 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
927 | 933 | ||
928 | if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { | 934 | if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { |
929 | if (!__this_cpu_read(cpu_hw_events.enabled)) | 935 | if (!__this_cpu_read(cpu_hw_events.enabled)) |
@@ -933,6 +939,11 @@ static void intel_pmu_enable_event(struct perf_event *event) | |||
933 | return; | 939 | return; |
934 | } | 940 | } |
935 | 941 | ||
942 | if (event->attr.exclude_host) | ||
943 | cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx); | ||
944 | if (event->attr.exclude_guest) | ||
945 | cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx); | ||
946 | |||
936 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | 947 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { |
937 | intel_pmu_enable_fixed(hwc); | 948 | intel_pmu_enable_fixed(hwc); |
938 | return; | 949 | return; |
@@ -1302,12 +1313,84 @@ static int intel_pmu_hw_config(struct perf_event *event) | |||
1302 | return 0; | 1313 | return 0; |
1303 | } | 1314 | } |
1304 | 1315 | ||
1316 | struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr) | ||
1317 | { | ||
1318 | if (x86_pmu.guest_get_msrs) | ||
1319 | return x86_pmu.guest_get_msrs(nr); | ||
1320 | *nr = 0; | ||
1321 | return NULL; | ||
1322 | } | ||
1323 | EXPORT_SYMBOL_GPL(perf_guest_get_msrs); | ||
1324 | |||
1325 | static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr) | ||
1326 | { | ||
1327 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
1328 | struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; | ||
1329 | |||
1330 | arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL; | ||
1331 | arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask; | ||
1332 | arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask; | ||
1333 | |||
1334 | *nr = 1; | ||
1335 | return arr; | ||
1336 | } | ||
1337 | |||
1338 | static struct perf_guest_switch_msr *core_guest_get_msrs(int *nr) | ||
1339 | { | ||
1340 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
1341 | struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; | ||
1342 | int idx; | ||
1343 | |||
1344 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | ||
1345 | struct perf_event *event = cpuc->events[idx]; | ||
1346 | |||
1347 | arr[idx].msr = x86_pmu_config_addr(idx); | ||
1348 | arr[idx].host = arr[idx].guest = 0; | ||
1349 | |||
1350 | if (!test_bit(idx, cpuc->active_mask)) | ||
1351 | continue; | ||
1352 | |||
1353 | arr[idx].host = arr[idx].guest = | ||
1354 | event->hw.config | ARCH_PERFMON_EVENTSEL_ENABLE; | ||
1355 | |||
1356 | if (event->attr.exclude_host) | ||
1357 | arr[idx].host &= ~ARCH_PERFMON_EVENTSEL_ENABLE; | ||
1358 | else if (event->attr.exclude_guest) | ||
1359 | arr[idx].guest &= ~ARCH_PERFMON_EVENTSEL_ENABLE; | ||
1360 | } | ||
1361 | |||
1362 | *nr = x86_pmu.num_counters; | ||
1363 | return arr; | ||
1364 | } | ||
1365 | |||
1366 | static void core_pmu_enable_event(struct perf_event *event) | ||
1367 | { | ||
1368 | if (!event->attr.exclude_host) | ||
1369 | x86_pmu_enable_event(event); | ||
1370 | } | ||
1371 | |||
1372 | static void core_pmu_enable_all(int added) | ||
1373 | { | ||
1374 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
1375 | int idx; | ||
1376 | |||
1377 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | ||
1378 | struct hw_perf_event *hwc = &cpuc->events[idx]->hw; | ||
1379 | |||
1380 | if (!test_bit(idx, cpuc->active_mask) || | ||
1381 | cpuc->events[idx]->attr.exclude_host) | ||
1382 | continue; | ||
1383 | |||
1384 | __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); | ||
1385 | } | ||
1386 | } | ||
1387 | |||
1305 | static __initconst const struct x86_pmu core_pmu = { | 1388 | static __initconst const struct x86_pmu core_pmu = { |
1306 | .name = "core", | 1389 | .name = "core", |
1307 | .handle_irq = x86_pmu_handle_irq, | 1390 | .handle_irq = x86_pmu_handle_irq, |
1308 | .disable_all = x86_pmu_disable_all, | 1391 | .disable_all = x86_pmu_disable_all, |
1309 | .enable_all = x86_pmu_enable_all, | 1392 | .enable_all = core_pmu_enable_all, |
1310 | .enable = x86_pmu_enable_event, | 1393 | .enable = core_pmu_enable_event, |
1311 | .disable = x86_pmu_disable_event, | 1394 | .disable = x86_pmu_disable_event, |
1312 | .hw_config = x86_pmu_hw_config, | 1395 | .hw_config = x86_pmu_hw_config, |
1313 | .schedule_events = x86_schedule_events, | 1396 | .schedule_events = x86_schedule_events, |
@@ -1325,6 +1408,7 @@ static __initconst const struct x86_pmu core_pmu = { | |||
1325 | .get_event_constraints = intel_get_event_constraints, | 1408 | .get_event_constraints = intel_get_event_constraints, |
1326 | .put_event_constraints = intel_put_event_constraints, | 1409 | .put_event_constraints = intel_put_event_constraints, |
1327 | .event_constraints = intel_core_event_constraints, | 1410 | .event_constraints = intel_core_event_constraints, |
1411 | .guest_get_msrs = core_guest_get_msrs, | ||
1328 | }; | 1412 | }; |
1329 | 1413 | ||
1330 | struct intel_shared_regs *allocate_shared_regs(int cpu) | 1414 | struct intel_shared_regs *allocate_shared_regs(int cpu) |
@@ -1431,6 +1515,7 @@ static __initconst const struct x86_pmu intel_pmu = { | |||
1431 | .cpu_prepare = intel_pmu_cpu_prepare, | 1515 | .cpu_prepare = intel_pmu_cpu_prepare, |
1432 | .cpu_starting = intel_pmu_cpu_starting, | 1516 | .cpu_starting = intel_pmu_cpu_starting, |
1433 | .cpu_dying = intel_pmu_cpu_dying, | 1517 | .cpu_dying = intel_pmu_cpu_dying, |
1518 | .guest_get_msrs = intel_guest_get_msrs, | ||
1434 | }; | 1519 | }; |
1435 | 1520 | ||
1436 | static void intel_clovertown_quirks(void) | 1521 | static void intel_clovertown_quirks(void) |
diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index 764c7c2b1811..13ad89971d47 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c | |||
@@ -32,15 +32,12 @@ int in_crash_kexec; | |||
32 | 32 | ||
33 | #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) | 33 | #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC) |
34 | 34 | ||
35 | static void kdump_nmi_callback(int cpu, struct die_args *args) | 35 | static void kdump_nmi_callback(int cpu, struct pt_regs *regs) |
36 | { | 36 | { |
37 | struct pt_regs *regs; | ||
38 | #ifdef CONFIG_X86_32 | 37 | #ifdef CONFIG_X86_32 |
39 | struct pt_regs fixed_regs; | 38 | struct pt_regs fixed_regs; |
40 | #endif | 39 | #endif |
41 | 40 | ||
42 | regs = args->regs; | ||
43 | |||
44 | #ifdef CONFIG_X86_32 | 41 | #ifdef CONFIG_X86_32 |
45 | if (!user_mode_vm(regs)) { | 42 | if (!user_mode_vm(regs)) { |
46 | crash_fixup_ss_esp(&fixed_regs, regs); | 43 | crash_fixup_ss_esp(&fixed_regs, regs); |
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c index 00354d4919a9..faba5771acad 100644 --- a/arch/x86/kernel/kgdb.c +++ b/arch/x86/kernel/kgdb.c | |||
@@ -511,28 +511,37 @@ single_step_cont(struct pt_regs *regs, struct die_args *args) | |||
511 | 511 | ||
512 | static int was_in_debug_nmi[NR_CPUS]; | 512 | static int was_in_debug_nmi[NR_CPUS]; |
513 | 513 | ||
514 | static int __kgdb_notify(struct die_args *args, unsigned long cmd) | 514 | static int kgdb_nmi_handler(unsigned int cmd, struct pt_regs *regs) |
515 | { | 515 | { |
516 | struct pt_regs *regs = args->regs; | ||
517 | |||
518 | switch (cmd) { | 516 | switch (cmd) { |
519 | case DIE_NMI: | 517 | case NMI_LOCAL: |
520 | if (atomic_read(&kgdb_active) != -1) { | 518 | if (atomic_read(&kgdb_active) != -1) { |
521 | /* KGDB CPU roundup */ | 519 | /* KGDB CPU roundup */ |
522 | kgdb_nmicallback(raw_smp_processor_id(), regs); | 520 | kgdb_nmicallback(raw_smp_processor_id(), regs); |
523 | was_in_debug_nmi[raw_smp_processor_id()] = 1; | 521 | was_in_debug_nmi[raw_smp_processor_id()] = 1; |
524 | touch_nmi_watchdog(); | 522 | touch_nmi_watchdog(); |
525 | return NOTIFY_STOP; | 523 | return NMI_HANDLED; |
526 | } | 524 | } |
527 | return NOTIFY_DONE; | 525 | break; |
528 | 526 | ||
529 | case DIE_NMIUNKNOWN: | 527 | case NMI_UNKNOWN: |
530 | if (was_in_debug_nmi[raw_smp_processor_id()]) { | 528 | if (was_in_debug_nmi[raw_smp_processor_id()]) { |
531 | was_in_debug_nmi[raw_smp_processor_id()] = 0; | 529 | was_in_debug_nmi[raw_smp_processor_id()] = 0; |
532 | return NOTIFY_STOP; | 530 | return NMI_HANDLED; |
533 | } | 531 | } |
534 | return NOTIFY_DONE; | 532 | break; |
533 | default: | ||
534 | /* do nothing */ | ||
535 | break; | ||
536 | } | ||
537 | return NMI_DONE; | ||
538 | } | ||
539 | |||
540 | static int __kgdb_notify(struct die_args *args, unsigned long cmd) | ||
541 | { | ||
542 | struct pt_regs *regs = args->regs; | ||
535 | 543 | ||
544 | switch (cmd) { | ||
536 | case DIE_DEBUG: | 545 | case DIE_DEBUG: |
537 | if (atomic_read(&kgdb_cpu_doing_single_step) != -1) { | 546 | if (atomic_read(&kgdb_cpu_doing_single_step) != -1) { |
538 | if (user_mode(regs)) | 547 | if (user_mode(regs)) |
@@ -590,11 +599,6 @@ kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr) | |||
590 | 599 | ||
591 | static struct notifier_block kgdb_notifier = { | 600 | static struct notifier_block kgdb_notifier = { |
592 | .notifier_call = kgdb_notify, | 601 | .notifier_call = kgdb_notify, |
593 | |||
594 | /* | ||
595 | * Lowest-prio notifier priority, we want to be notified last: | ||
596 | */ | ||
597 | .priority = NMI_LOCAL_LOW_PRIOR, | ||
598 | }; | 602 | }; |
599 | 603 | ||
600 | /** | 604 | /** |
@@ -605,7 +609,31 @@ static struct notifier_block kgdb_notifier = { | |||
605 | */ | 609 | */ |
606 | int kgdb_arch_init(void) | 610 | int kgdb_arch_init(void) |
607 | { | 611 | { |
608 | return register_die_notifier(&kgdb_notifier); | 612 | int retval; |
613 | |||
614 | retval = register_die_notifier(&kgdb_notifier); | ||
615 | if (retval) | ||
616 | goto out; | ||
617 | |||
618 | retval = register_nmi_handler(NMI_LOCAL, kgdb_nmi_handler, | ||
619 | 0, "kgdb"); | ||
620 | if (retval) | ||
621 | goto out1; | ||
622 | |||
623 | retval = register_nmi_handler(NMI_UNKNOWN, kgdb_nmi_handler, | ||
624 | 0, "kgdb"); | ||
625 | |||
626 | if (retval) | ||
627 | goto out2; | ||
628 | |||
629 | return retval; | ||
630 | |||
631 | out2: | ||
632 | unregister_nmi_handler(NMI_LOCAL, "kgdb"); | ||
633 | out1: | ||
634 | unregister_die_notifier(&kgdb_notifier); | ||
635 | out: | ||
636 | return retval; | ||
609 | } | 637 | } |
610 | 638 | ||
611 | static void kgdb_hw_overflow_handler(struct perf_event *event, | 639 | static void kgdb_hw_overflow_handler(struct perf_event *event, |
@@ -673,6 +701,8 @@ void kgdb_arch_exit(void) | |||
673 | breakinfo[i].pev = NULL; | 701 | breakinfo[i].pev = NULL; |
674 | } | 702 | } |
675 | } | 703 | } |
704 | unregister_nmi_handler(NMI_UNKNOWN, "kgdb"); | ||
705 | unregister_nmi_handler(NMI_LOCAL, "kgdb"); | ||
676 | unregister_die_notifier(&kgdb_notifier); | 706 | unregister_die_notifier(&kgdb_notifier); |
677 | } | 707 | } |
678 | 708 | ||
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c new file mode 100644 index 000000000000..e20f5e790599 --- /dev/null +++ b/arch/x86/kernel/nmi.c | |||
@@ -0,0 +1,336 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
3 | * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs | ||
4 | * Copyright (C) 2011 Don Zickus Red Hat, Inc. | ||
5 | * | ||
6 | * Pentium III FXSR, SSE support | ||
7 | * Gareth Hughes <gareth@valinux.com>, May 2000 | ||
8 | */ | ||
9 | |||
10 | /* | ||
11 | * Handle hardware traps and faults. | ||
12 | */ | ||
13 | #include <linux/spinlock.h> | ||
14 | #include <linux/kprobes.h> | ||
15 | #include <linux/kdebug.h> | ||
16 | #include <linux/nmi.h> | ||
17 | #include <linux/delay.h> | ||
18 | #include <linux/hardirq.h> | ||
19 | #include <linux/slab.h> | ||
20 | |||
21 | #if defined(CONFIG_EDAC) | ||
22 | #include <linux/edac.h> | ||
23 | #endif | ||
24 | |||
25 | #include <linux/atomic.h> | ||
26 | #include <asm/traps.h> | ||
27 | #include <asm/mach_traps.h> | ||
28 | #include <asm/nmi.h> | ||
29 | |||
30 | #define NMI_MAX_NAMELEN 16 | ||
31 | struct nmiaction { | ||
32 | struct list_head list; | ||
33 | nmi_handler_t handler; | ||
34 | unsigned int flags; | ||
35 | char *name; | ||
36 | }; | ||
37 | |||
38 | struct nmi_desc { | ||
39 | spinlock_t lock; | ||
40 | struct list_head head; | ||
41 | }; | ||
42 | |||
43 | static struct nmi_desc nmi_desc[NMI_MAX] = | ||
44 | { | ||
45 | { | ||
46 | .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock), | ||
47 | .head = LIST_HEAD_INIT(nmi_desc[0].head), | ||
48 | }, | ||
49 | { | ||
50 | .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock), | ||
51 | .head = LIST_HEAD_INIT(nmi_desc[1].head), | ||
52 | }, | ||
53 | |||
54 | }; | ||
55 | |||
56 | static int ignore_nmis; | ||
57 | |||
58 | int unknown_nmi_panic; | ||
59 | /* | ||
60 | * Prevent NMI reason port (0x61) being accessed simultaneously, can | ||
61 | * only be used in NMI handler. | ||
62 | */ | ||
63 | static DEFINE_RAW_SPINLOCK(nmi_reason_lock); | ||
64 | |||
65 | static int __init setup_unknown_nmi_panic(char *str) | ||
66 | { | ||
67 | unknown_nmi_panic = 1; | ||
68 | return 1; | ||
69 | } | ||
70 | __setup("unknown_nmi_panic", setup_unknown_nmi_panic); | ||
71 | |||
72 | #define nmi_to_desc(type) (&nmi_desc[type]) | ||
73 | |||
74 | static int notrace __kprobes nmi_handle(unsigned int type, struct pt_regs *regs) | ||
75 | { | ||
76 | struct nmi_desc *desc = nmi_to_desc(type); | ||
77 | struct nmiaction *a; | ||
78 | int handled=0; | ||
79 | |||
80 | rcu_read_lock(); | ||
81 | |||
82 | /* | ||
83 | * NMIs are edge-triggered, which means if you have enough | ||
84 | * of them concurrently, you can lose some because only one | ||
85 | * can be latched at any given time. Walk the whole list | ||
86 | * to handle those situations. | ||
87 | */ | ||
88 | list_for_each_entry_rcu(a, &desc->head, list) { | ||
89 | |||
90 | handled += a->handler(type, regs); | ||
91 | |||
92 | } | ||
93 | |||
94 | rcu_read_unlock(); | ||
95 | |||
96 | /* return total number of NMI events handled */ | ||
97 | return handled; | ||
98 | } | ||
99 | |||
100 | static int __setup_nmi(unsigned int type, struct nmiaction *action) | ||
101 | { | ||
102 | struct nmi_desc *desc = nmi_to_desc(type); | ||
103 | unsigned long flags; | ||
104 | |||
105 | spin_lock_irqsave(&desc->lock, flags); | ||
106 | |||
107 | /* | ||
108 | * some handlers need to be executed first otherwise a fake | ||
109 | * event confuses some handlers (kdump uses this flag) | ||
110 | */ | ||
111 | if (action->flags & NMI_FLAG_FIRST) | ||
112 | list_add_rcu(&action->list, &desc->head); | ||
113 | else | ||
114 | list_add_tail_rcu(&action->list, &desc->head); | ||
115 | |||
116 | spin_unlock_irqrestore(&desc->lock, flags); | ||
117 | return 0; | ||
118 | } | ||
119 | |||
120 | static struct nmiaction *__free_nmi(unsigned int type, const char *name) | ||
121 | { | ||
122 | struct nmi_desc *desc = nmi_to_desc(type); | ||
123 | struct nmiaction *n; | ||
124 | unsigned long flags; | ||
125 | |||
126 | spin_lock_irqsave(&desc->lock, flags); | ||
127 | |||
128 | list_for_each_entry_rcu(n, &desc->head, list) { | ||
129 | /* | ||
130 | * the name passed in to describe the nmi handler | ||
131 | * is used as the lookup key | ||
132 | */ | ||
133 | if (!strcmp(n->name, name)) { | ||
134 | WARN(in_nmi(), | ||
135 | "Trying to free NMI (%s) from NMI context!\n", n->name); | ||
136 | list_del_rcu(&n->list); | ||
137 | break; | ||
138 | } | ||
139 | } | ||
140 | |||
141 | spin_unlock_irqrestore(&desc->lock, flags); | ||
142 | synchronize_rcu(); | ||
143 | return (n); | ||
144 | } | ||
145 | |||
146 | int register_nmi_handler(unsigned int type, nmi_handler_t handler, | ||
147 | unsigned long nmiflags, const char *devname) | ||
148 | { | ||
149 | struct nmiaction *action; | ||
150 | int retval = -ENOMEM; | ||
151 | |||
152 | if (!handler) | ||
153 | return -EINVAL; | ||
154 | |||
155 | action = kzalloc(sizeof(struct nmiaction), GFP_KERNEL); | ||
156 | if (!action) | ||
157 | goto fail_action; | ||
158 | |||
159 | action->handler = handler; | ||
160 | action->flags = nmiflags; | ||
161 | action->name = kstrndup(devname, NMI_MAX_NAMELEN, GFP_KERNEL); | ||
162 | if (!action->name) | ||
163 | goto fail_action_name; | ||
164 | |||
165 | retval = __setup_nmi(type, action); | ||
166 | |||
167 | if (retval) | ||
168 | goto fail_setup_nmi; | ||
169 | |||
170 | return retval; | ||
171 | |||
172 | fail_setup_nmi: | ||
173 | kfree(action->name); | ||
174 | fail_action_name: | ||
175 | kfree(action); | ||
176 | fail_action: | ||
177 | |||
178 | return retval; | ||
179 | } | ||
180 | EXPORT_SYMBOL_GPL(register_nmi_handler); | ||
181 | |||
182 | void unregister_nmi_handler(unsigned int type, const char *name) | ||
183 | { | ||
184 | struct nmiaction *a; | ||
185 | |||
186 | a = __free_nmi(type, name); | ||
187 | if (a) { | ||
188 | kfree(a->name); | ||
189 | kfree(a); | ||
190 | } | ||
191 | } | ||
192 | |||
193 | EXPORT_SYMBOL_GPL(unregister_nmi_handler); | ||
194 | |||
195 | static notrace __kprobes void | ||
196 | pci_serr_error(unsigned char reason, struct pt_regs *regs) | ||
197 | { | ||
198 | pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n", | ||
199 | reason, smp_processor_id()); | ||
200 | |||
201 | /* | ||
202 | * On some machines, PCI SERR line is used to report memory | ||
203 | * errors. EDAC makes use of it. | ||
204 | */ | ||
205 | #if defined(CONFIG_EDAC) | ||
206 | if (edac_handler_set()) { | ||
207 | edac_atomic_assert_error(); | ||
208 | return; | ||
209 | } | ||
210 | #endif | ||
211 | |||
212 | if (panic_on_unrecovered_nmi) | ||
213 | panic("NMI: Not continuing"); | ||
214 | |||
215 | pr_emerg("Dazed and confused, but trying to continue\n"); | ||
216 | |||
217 | /* Clear and disable the PCI SERR error line. */ | ||
218 | reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR; | ||
219 | outb(reason, NMI_REASON_PORT); | ||
220 | } | ||
221 | |||
222 | static notrace __kprobes void | ||
223 | io_check_error(unsigned char reason, struct pt_regs *regs) | ||
224 | { | ||
225 | unsigned long i; | ||
226 | |||
227 | pr_emerg( | ||
228 | "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n", | ||
229 | reason, smp_processor_id()); | ||
230 | show_registers(regs); | ||
231 | |||
232 | if (panic_on_io_nmi) | ||
233 | panic("NMI IOCK error: Not continuing"); | ||
234 | |||
235 | /* Re-enable the IOCK line, wait for a few seconds */ | ||
236 | reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK; | ||
237 | outb(reason, NMI_REASON_PORT); | ||
238 | |||
239 | i = 20000; | ||
240 | while (--i) { | ||
241 | touch_nmi_watchdog(); | ||
242 | udelay(100); | ||
243 | } | ||
244 | |||
245 | reason &= ~NMI_REASON_CLEAR_IOCHK; | ||
246 | outb(reason, NMI_REASON_PORT); | ||
247 | } | ||
248 | |||
249 | static notrace __kprobes void | ||
250 | unknown_nmi_error(unsigned char reason, struct pt_regs *regs) | ||
251 | { | ||
252 | int handled; | ||
253 | |||
254 | handled = nmi_handle(NMI_UNKNOWN, regs); | ||
255 | if (handled) | ||
256 | return; | ||
257 | #ifdef CONFIG_MCA | ||
258 | /* | ||
259 | * Might actually be able to figure out what the guilty party | ||
260 | * is: | ||
261 | */ | ||
262 | if (MCA_bus) { | ||
263 | mca_handle_nmi(); | ||
264 | return; | ||
265 | } | ||
266 | #endif | ||
267 | pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", | ||
268 | reason, smp_processor_id()); | ||
269 | |||
270 | pr_emerg("Do you have a strange power saving mode enabled?\n"); | ||
271 | if (unknown_nmi_panic || panic_on_unrecovered_nmi) | ||
272 | panic("NMI: Not continuing"); | ||
273 | |||
274 | pr_emerg("Dazed and confused, but trying to continue\n"); | ||
275 | } | ||
276 | |||
277 | static notrace __kprobes void default_do_nmi(struct pt_regs *regs) | ||
278 | { | ||
279 | unsigned char reason = 0; | ||
280 | int handled; | ||
281 | |||
282 | /* | ||
283 | * CPU-specific NMI must be processed before non-CPU-specific | ||
284 | * NMI, otherwise we may lose it, because the CPU-specific | ||
285 | * NMI can not be detected/processed on other CPUs. | ||
286 | */ | ||
287 | handled = nmi_handle(NMI_LOCAL, regs); | ||
288 | if (handled) | ||
289 | return; | ||
290 | |||
291 | /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */ | ||
292 | raw_spin_lock(&nmi_reason_lock); | ||
293 | reason = get_nmi_reason(); | ||
294 | |||
295 | if (reason & NMI_REASON_MASK) { | ||
296 | if (reason & NMI_REASON_SERR) | ||
297 | pci_serr_error(reason, regs); | ||
298 | else if (reason & NMI_REASON_IOCHK) | ||
299 | io_check_error(reason, regs); | ||
300 | #ifdef CONFIG_X86_32 | ||
301 | /* | ||
302 | * Reassert NMI in case it became active | ||
303 | * meanwhile as it's edge-triggered: | ||
304 | */ | ||
305 | reassert_nmi(); | ||
306 | #endif | ||
307 | raw_spin_unlock(&nmi_reason_lock); | ||
308 | return; | ||
309 | } | ||
310 | raw_spin_unlock(&nmi_reason_lock); | ||
311 | |||
312 | unknown_nmi_error(reason, regs); | ||
313 | } | ||
314 | |||
315 | dotraplinkage notrace __kprobes void | ||
316 | do_nmi(struct pt_regs *regs, long error_code) | ||
317 | { | ||
318 | nmi_enter(); | ||
319 | |||
320 | inc_irq_stat(__nmi_count); | ||
321 | |||
322 | if (!ignore_nmis) | ||
323 | default_do_nmi(regs); | ||
324 | |||
325 | nmi_exit(); | ||
326 | } | ||
327 | |||
328 | void stop_nmi(void) | ||
329 | { | ||
330 | ignore_nmis++; | ||
331 | } | ||
332 | |||
333 | void restart_nmi(void) | ||
334 | { | ||
335 | ignore_nmis--; | ||
336 | } | ||
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 9242436e9937..e334be1182b9 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c | |||
@@ -464,7 +464,7 @@ static inline void kb_wait(void) | |||
464 | } | 464 | } |
465 | } | 465 | } |
466 | 466 | ||
467 | static void vmxoff_nmi(int cpu, struct die_args *args) | 467 | static void vmxoff_nmi(int cpu, struct pt_regs *regs) |
468 | { | 468 | { |
469 | cpu_emergency_vmxoff(); | 469 | cpu_emergency_vmxoff(); |
470 | } | 470 | } |
@@ -736,14 +736,10 @@ static nmi_shootdown_cb shootdown_callback; | |||
736 | 736 | ||
737 | static atomic_t waiting_for_crash_ipi; | 737 | static atomic_t waiting_for_crash_ipi; |
738 | 738 | ||
739 | static int crash_nmi_callback(struct notifier_block *self, | 739 | static int crash_nmi_callback(unsigned int val, struct pt_regs *regs) |
740 | unsigned long val, void *data) | ||
741 | { | 740 | { |
742 | int cpu; | 741 | int cpu; |
743 | 742 | ||
744 | if (val != DIE_NMI) | ||
745 | return NOTIFY_OK; | ||
746 | |||
747 | cpu = raw_smp_processor_id(); | 743 | cpu = raw_smp_processor_id(); |
748 | 744 | ||
749 | /* Don't do anything if this handler is invoked on crashing cpu. | 745 | /* Don't do anything if this handler is invoked on crashing cpu. |
@@ -751,10 +747,10 @@ static int crash_nmi_callback(struct notifier_block *self, | |||
751 | * an NMI if system was initially booted with nmi_watchdog parameter. | 747 | * an NMI if system was initially booted with nmi_watchdog parameter. |
752 | */ | 748 | */ |
753 | if (cpu == crashing_cpu) | 749 | if (cpu == crashing_cpu) |
754 | return NOTIFY_STOP; | 750 | return NMI_HANDLED; |
755 | local_irq_disable(); | 751 | local_irq_disable(); |
756 | 752 | ||
757 | shootdown_callback(cpu, (struct die_args *)data); | 753 | shootdown_callback(cpu, regs); |
758 | 754 | ||
759 | atomic_dec(&waiting_for_crash_ipi); | 755 | atomic_dec(&waiting_for_crash_ipi); |
760 | /* Assume hlt works */ | 756 | /* Assume hlt works */ |
@@ -762,7 +758,7 @@ static int crash_nmi_callback(struct notifier_block *self, | |||
762 | for (;;) | 758 | for (;;) |
763 | cpu_relax(); | 759 | cpu_relax(); |
764 | 760 | ||
765 | return 1; | 761 | return NMI_HANDLED; |
766 | } | 762 | } |
767 | 763 | ||
768 | static void smp_send_nmi_allbutself(void) | 764 | static void smp_send_nmi_allbutself(void) |
@@ -770,12 +766,6 @@ static void smp_send_nmi_allbutself(void) | |||
770 | apic->send_IPI_allbutself(NMI_VECTOR); | 766 | apic->send_IPI_allbutself(NMI_VECTOR); |
771 | } | 767 | } |
772 | 768 | ||
773 | static struct notifier_block crash_nmi_nb = { | ||
774 | .notifier_call = crash_nmi_callback, | ||
775 | /* we want to be the first one called */ | ||
776 | .priority = NMI_LOCAL_HIGH_PRIOR+1, | ||
777 | }; | ||
778 | |||
779 | /* Halt all other CPUs, calling the specified function on each of them | 769 | /* Halt all other CPUs, calling the specified function on each of them |
780 | * | 770 | * |
781 | * This function can be used to halt all other CPUs on crash | 771 | * This function can be used to halt all other CPUs on crash |
@@ -794,7 +784,8 @@ void nmi_shootdown_cpus(nmi_shootdown_cb callback) | |||
794 | 784 | ||
795 | atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); | 785 | atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); |
796 | /* Would it be better to replace the trap vector here? */ | 786 | /* Would it be better to replace the trap vector here? */ |
797 | if (register_die_notifier(&crash_nmi_nb)) | 787 | if (register_nmi_handler(NMI_LOCAL, crash_nmi_callback, |
788 | NMI_FLAG_FIRST, "crash")) | ||
798 | return; /* return what? */ | 789 | return; /* return what? */ |
799 | /* Ensure the new callback function is set before sending | 790 | /* Ensure the new callback function is set before sending |
800 | * out the NMI | 791 | * out the NMI |
diff --git a/arch/x86/kernel/rtc.c b/arch/x86/kernel/rtc.c index 3f2ad2640d85..ccdbc16b8941 100644 --- a/arch/x86/kernel/rtc.c +++ b/arch/x86/kernel/rtc.c | |||
@@ -42,8 +42,11 @@ int mach_set_rtc_mmss(unsigned long nowtime) | |||
42 | { | 42 | { |
43 | int real_seconds, real_minutes, cmos_minutes; | 43 | int real_seconds, real_minutes, cmos_minutes; |
44 | unsigned char save_control, save_freq_select; | 44 | unsigned char save_control, save_freq_select; |
45 | unsigned long flags; | ||
45 | int retval = 0; | 46 | int retval = 0; |
46 | 47 | ||
48 | spin_lock_irqsave(&rtc_lock, flags); | ||
49 | |||
47 | /* tell the clock it's being set */ | 50 | /* tell the clock it's being set */ |
48 | save_control = CMOS_READ(RTC_CONTROL); | 51 | save_control = CMOS_READ(RTC_CONTROL); |
49 | CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); | 52 | CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL); |
@@ -93,12 +96,17 @@ int mach_set_rtc_mmss(unsigned long nowtime) | |||
93 | CMOS_WRITE(save_control, RTC_CONTROL); | 96 | CMOS_WRITE(save_control, RTC_CONTROL); |
94 | CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); | 97 | CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT); |
95 | 98 | ||
99 | spin_unlock_irqrestore(&rtc_lock, flags); | ||
100 | |||
96 | return retval; | 101 | return retval; |
97 | } | 102 | } |
98 | 103 | ||
99 | unsigned long mach_get_cmos_time(void) | 104 | unsigned long mach_get_cmos_time(void) |
100 | { | 105 | { |
101 | unsigned int status, year, mon, day, hour, min, sec, century = 0; | 106 | unsigned int status, year, mon, day, hour, min, sec, century = 0; |
107 | unsigned long flags; | ||
108 | |||
109 | spin_lock_irqsave(&rtc_lock, flags); | ||
102 | 110 | ||
103 | /* | 111 | /* |
104 | * If UIP is clear, then we have >= 244 microseconds before | 112 | * If UIP is clear, then we have >= 244 microseconds before |
@@ -125,6 +133,8 @@ unsigned long mach_get_cmos_time(void) | |||
125 | status = CMOS_READ(RTC_CONTROL); | 133 | status = CMOS_READ(RTC_CONTROL); |
126 | WARN_ON_ONCE(RTC_ALWAYS_BCD && (status & RTC_DM_BINARY)); | 134 | WARN_ON_ONCE(RTC_ALWAYS_BCD && (status & RTC_DM_BINARY)); |
127 | 135 | ||
136 | spin_unlock_irqrestore(&rtc_lock, flags); | ||
137 | |||
128 | if (RTC_ALWAYS_BCD || !(status & RTC_DM_BINARY)) { | 138 | if (RTC_ALWAYS_BCD || !(status & RTC_DM_BINARY)) { |
129 | sec = bcd2bin(sec); | 139 | sec = bcd2bin(sec); |
130 | min = bcd2bin(min); | 140 | min = bcd2bin(min); |
@@ -169,24 +179,15 @@ EXPORT_SYMBOL(rtc_cmos_write); | |||
169 | 179 | ||
170 | int update_persistent_clock(struct timespec now) | 180 | int update_persistent_clock(struct timespec now) |
171 | { | 181 | { |
172 | unsigned long flags; | 182 | return x86_platform.set_wallclock(now.tv_sec); |
173 | int retval; | ||
174 | |||
175 | spin_lock_irqsave(&rtc_lock, flags); | ||
176 | retval = x86_platform.set_wallclock(now.tv_sec); | ||
177 | spin_unlock_irqrestore(&rtc_lock, flags); | ||
178 | |||
179 | return retval; | ||
180 | } | 183 | } |
181 | 184 | ||
182 | /* not static: needed by APM */ | 185 | /* not static: needed by APM */ |
183 | void read_persistent_clock(struct timespec *ts) | 186 | void read_persistent_clock(struct timespec *ts) |
184 | { | 187 | { |
185 | unsigned long retval, flags; | 188 | unsigned long retval; |
186 | 189 | ||
187 | spin_lock_irqsave(&rtc_lock, flags); | ||
188 | retval = x86_platform.get_wallclock(); | 190 | retval = x86_platform.get_wallclock(); |
189 | spin_unlock_irqrestore(&rtc_lock, flags); | ||
190 | 191 | ||
191 | ts->tv_sec = retval; | 192 | ts->tv_sec = retval; |
192 | ts->tv_nsec = 0; | 193 | ts->tv_nsec = 0; |
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 6913369c234c..a8e3eb83466c 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c | |||
@@ -81,15 +81,6 @@ gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, }; | |||
81 | DECLARE_BITMAP(used_vectors, NR_VECTORS); | 81 | DECLARE_BITMAP(used_vectors, NR_VECTORS); |
82 | EXPORT_SYMBOL_GPL(used_vectors); | 82 | EXPORT_SYMBOL_GPL(used_vectors); |
83 | 83 | ||
84 | static int ignore_nmis; | ||
85 | |||
86 | int unknown_nmi_panic; | ||
87 | /* | ||
88 | * Prevent NMI reason port (0x61) being accessed simultaneously, can | ||
89 | * only be used in NMI handler. | ||
90 | */ | ||
91 | static DEFINE_RAW_SPINLOCK(nmi_reason_lock); | ||
92 | |||
93 | static inline void conditional_sti(struct pt_regs *regs) | 84 | static inline void conditional_sti(struct pt_regs *regs) |
94 | { | 85 | { |
95 | if (regs->flags & X86_EFLAGS_IF) | 86 | if (regs->flags & X86_EFLAGS_IF) |
@@ -307,152 +298,6 @@ gp_in_kernel: | |||
307 | die("general protection fault", regs, error_code); | 298 | die("general protection fault", regs, error_code); |
308 | } | 299 | } |
309 | 300 | ||
310 | static int __init setup_unknown_nmi_panic(char *str) | ||
311 | { | ||
312 | unknown_nmi_panic = 1; | ||
313 | return 1; | ||
314 | } | ||
315 | __setup("unknown_nmi_panic", setup_unknown_nmi_panic); | ||
316 | |||
317 | static notrace __kprobes void | ||
318 | pci_serr_error(unsigned char reason, struct pt_regs *regs) | ||
319 | { | ||
320 | pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n", | ||
321 | reason, smp_processor_id()); | ||
322 | |||
323 | /* | ||
324 | * On some machines, PCI SERR line is used to report memory | ||
325 | * errors. EDAC makes use of it. | ||
326 | */ | ||
327 | #if defined(CONFIG_EDAC) | ||
328 | if (edac_handler_set()) { | ||
329 | edac_atomic_assert_error(); | ||
330 | return; | ||
331 | } | ||
332 | #endif | ||
333 | |||
334 | if (panic_on_unrecovered_nmi) | ||
335 | panic("NMI: Not continuing"); | ||
336 | |||
337 | pr_emerg("Dazed and confused, but trying to continue\n"); | ||
338 | |||
339 | /* Clear and disable the PCI SERR error line. */ | ||
340 | reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR; | ||
341 | outb(reason, NMI_REASON_PORT); | ||
342 | } | ||
343 | |||
344 | static notrace __kprobes void | ||
345 | io_check_error(unsigned char reason, struct pt_regs *regs) | ||
346 | { | ||
347 | unsigned long i; | ||
348 | |||
349 | pr_emerg( | ||
350 | "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n", | ||
351 | reason, smp_processor_id()); | ||
352 | show_registers(regs); | ||
353 | |||
354 | if (panic_on_io_nmi) | ||
355 | panic("NMI IOCK error: Not continuing"); | ||
356 | |||
357 | /* Re-enable the IOCK line, wait for a few seconds */ | ||
358 | reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK; | ||
359 | outb(reason, NMI_REASON_PORT); | ||
360 | |||
361 | i = 20000; | ||
362 | while (--i) { | ||
363 | touch_nmi_watchdog(); | ||
364 | udelay(100); | ||
365 | } | ||
366 | |||
367 | reason &= ~NMI_REASON_CLEAR_IOCHK; | ||
368 | outb(reason, NMI_REASON_PORT); | ||
369 | } | ||
370 | |||
371 | static notrace __kprobes void | ||
372 | unknown_nmi_error(unsigned char reason, struct pt_regs *regs) | ||
373 | { | ||
374 | if (notify_die(DIE_NMIUNKNOWN, "nmi", regs, reason, 2, SIGINT) == | ||
375 | NOTIFY_STOP) | ||
376 | return; | ||
377 | #ifdef CONFIG_MCA | ||
378 | /* | ||
379 | * Might actually be able to figure out what the guilty party | ||
380 | * is: | ||
381 | */ | ||
382 | if (MCA_bus) { | ||
383 | mca_handle_nmi(); | ||
384 | return; | ||
385 | } | ||
386 | #endif | ||
387 | pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", | ||
388 | reason, smp_processor_id()); | ||
389 | |||
390 | pr_emerg("Do you have a strange power saving mode enabled?\n"); | ||
391 | if (unknown_nmi_panic || panic_on_unrecovered_nmi) | ||
392 | panic("NMI: Not continuing"); | ||
393 | |||
394 | pr_emerg("Dazed and confused, but trying to continue\n"); | ||
395 | } | ||
396 | |||
397 | static notrace __kprobes void default_do_nmi(struct pt_regs *regs) | ||
398 | { | ||
399 | unsigned char reason = 0; | ||
400 | |||
401 | /* | ||
402 | * CPU-specific NMI must be processed before non-CPU-specific | ||
403 | * NMI, otherwise we may lose it, because the CPU-specific | ||
404 | * NMI can not be detected/processed on other CPUs. | ||
405 | */ | ||
406 | if (notify_die(DIE_NMI, "nmi", regs, 0, 2, SIGINT) == NOTIFY_STOP) | ||
407 | return; | ||
408 | |||
409 | /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */ | ||
410 | raw_spin_lock(&nmi_reason_lock); | ||
411 | reason = get_nmi_reason(); | ||
412 | |||
413 | if (reason & NMI_REASON_MASK) { | ||
414 | if (reason & NMI_REASON_SERR) | ||
415 | pci_serr_error(reason, regs); | ||
416 | else if (reason & NMI_REASON_IOCHK) | ||
417 | io_check_error(reason, regs); | ||
418 | #ifdef CONFIG_X86_32 | ||
419 | /* | ||
420 | * Reassert NMI in case it became active | ||
421 | * meanwhile as it's edge-triggered: | ||
422 | */ | ||
423 | reassert_nmi(); | ||
424 | #endif | ||
425 | raw_spin_unlock(&nmi_reason_lock); | ||
426 | return; | ||
427 | } | ||
428 | raw_spin_unlock(&nmi_reason_lock); | ||
429 | |||
430 | unknown_nmi_error(reason, regs); | ||
431 | } | ||
432 | |||
433 | dotraplinkage notrace __kprobes void | ||
434 | do_nmi(struct pt_regs *regs, long error_code) | ||
435 | { | ||
436 | nmi_enter(); | ||
437 | |||
438 | inc_irq_stat(__nmi_count); | ||
439 | |||
440 | if (!ignore_nmis) | ||
441 | default_do_nmi(regs); | ||
442 | |||
443 | nmi_exit(); | ||
444 | } | ||
445 | |||
446 | void stop_nmi(void) | ||
447 | { | ||
448 | ignore_nmis++; | ||
449 | } | ||
450 | |||
451 | void restart_nmi(void) | ||
452 | { | ||
453 | ignore_nmis--; | ||
454 | } | ||
455 | |||
456 | /* May run on IST stack. */ | 301 | /* May run on IST stack. */ |
457 | dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code) | 302 | dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code) |
458 | { | 303 | { |
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 6f08bc940fa8..8b4cc5f067de 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -3603,7 +3603,7 @@ done_prefixes: | |||
3603 | break; | 3603 | break; |
3604 | case Src2CL: | 3604 | case Src2CL: |
3605 | ctxt->src2.bytes = 1; | 3605 | ctxt->src2.bytes = 1; |
3606 | ctxt->src2.val = ctxt->regs[VCPU_REGS_RCX] & 0x8; | 3606 | ctxt->src2.val = ctxt->regs[VCPU_REGS_RCX] & 0xff; |
3607 | break; | 3607 | break; |
3608 | case Src2ImmByte: | 3608 | case Src2ImmByte: |
3609 | rc = decode_imm(ctxt, &ctxt->src2, 1, true); | 3609 | rc = decode_imm(ctxt, &ctxt->src2, 1, true); |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 1c5b69373a00..8e8da7960dbe 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -400,7 +400,8 @@ static u64 __update_clear_spte_slow(u64 *sptep, u64 spte) | |||
400 | 400 | ||
401 | /* xchg acts as a barrier before the setting of the high bits */ | 401 | /* xchg acts as a barrier before the setting of the high bits */ |
402 | orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low); | 402 | orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low); |
403 | orig.spte_high = ssptep->spte_high = sspte.spte_high; | 403 | orig.spte_high = ssptep->spte_high; |
404 | ssptep->spte_high = sspte.spte_high; | ||
404 | count_spte_clear(sptep, spte); | 405 | count_spte_clear(sptep, spte); |
405 | 406 | ||
406 | return orig.spte; | 407 | return orig.spte; |
diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 7ca4d43e8988..990c35bfa88f 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c | |||
@@ -61,26 +61,15 @@ u64 op_x86_get_ctrl(struct op_x86_model_spec const *model, | |||
61 | } | 61 | } |
62 | 62 | ||
63 | 63 | ||
64 | static int profile_exceptions_notify(struct notifier_block *self, | 64 | static int profile_exceptions_notify(unsigned int val, struct pt_regs *regs) |
65 | unsigned long val, void *data) | ||
66 | { | 65 | { |
67 | struct die_args *args = (struct die_args *)data; | 66 | if (ctr_running) |
68 | int ret = NOTIFY_DONE; | 67 | model->check_ctrs(regs, &__get_cpu_var(cpu_msrs)); |
69 | 68 | else if (!nmi_enabled) | |
70 | switch (val) { | 69 | return NMI_DONE; |
71 | case DIE_NMI: | 70 | else |
72 | if (ctr_running) | 71 | model->stop(&__get_cpu_var(cpu_msrs)); |
73 | model->check_ctrs(args->regs, &__get_cpu_var(cpu_msrs)); | 72 | return NMI_HANDLED; |
74 | else if (!nmi_enabled) | ||
75 | break; | ||
76 | else | ||
77 | model->stop(&__get_cpu_var(cpu_msrs)); | ||
78 | ret = NOTIFY_STOP; | ||
79 | break; | ||
80 | default: | ||
81 | break; | ||
82 | } | ||
83 | return ret; | ||
84 | } | 73 | } |
85 | 74 | ||
86 | static void nmi_cpu_save_registers(struct op_msrs *msrs) | 75 | static void nmi_cpu_save_registers(struct op_msrs *msrs) |
@@ -363,12 +352,6 @@ static void nmi_cpu_setup(void *dummy) | |||
363 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 352 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
364 | } | 353 | } |
365 | 354 | ||
366 | static struct notifier_block profile_exceptions_nb = { | ||
367 | .notifier_call = profile_exceptions_notify, | ||
368 | .next = NULL, | ||
369 | .priority = NMI_LOCAL_LOW_PRIOR, | ||
370 | }; | ||
371 | |||
372 | static void nmi_cpu_restore_registers(struct op_msrs *msrs) | 355 | static void nmi_cpu_restore_registers(struct op_msrs *msrs) |
373 | { | 356 | { |
374 | struct op_msr *counters = msrs->counters; | 357 | struct op_msr *counters = msrs->counters; |
@@ -508,7 +491,8 @@ static int nmi_setup(void) | |||
508 | ctr_running = 0; | 491 | ctr_running = 0; |
509 | /* make variables visible to the nmi handler: */ | 492 | /* make variables visible to the nmi handler: */ |
510 | smp_mb(); | 493 | smp_mb(); |
511 | err = register_die_notifier(&profile_exceptions_nb); | 494 | err = register_nmi_handler(NMI_LOCAL, profile_exceptions_notify, |
495 | 0, "oprofile"); | ||
512 | if (err) | 496 | if (err) |
513 | goto fail; | 497 | goto fail; |
514 | 498 | ||
@@ -538,7 +522,7 @@ static void nmi_shutdown(void) | |||
538 | put_online_cpus(); | 522 | put_online_cpus(); |
539 | /* make variables visible to the nmi handler: */ | 523 | /* make variables visible to the nmi handler: */ |
540 | smp_mb(); | 524 | smp_mb(); |
541 | unregister_die_notifier(&profile_exceptions_nb); | 525 | unregister_nmi_handler(NMI_LOCAL, "oprofile"); |
542 | msrs = &get_cpu_var(cpu_msrs); | 526 | msrs = &get_cpu_var(cpu_msrs); |
543 | model->shutdown(msrs); | 527 | model->shutdown(msrs); |
544 | free_msrs(); | 528 | free_msrs(); |
diff --git a/arch/x86/platform/mrst/vrtc.c b/arch/x86/platform/mrst/vrtc.c index 73d70d65e76e..6d5dbcdd444a 100644 --- a/arch/x86/platform/mrst/vrtc.c +++ b/arch/x86/platform/mrst/vrtc.c | |||
@@ -58,8 +58,11 @@ EXPORT_SYMBOL_GPL(vrtc_cmos_write); | |||
58 | unsigned long vrtc_get_time(void) | 58 | unsigned long vrtc_get_time(void) |
59 | { | 59 | { |
60 | u8 sec, min, hour, mday, mon; | 60 | u8 sec, min, hour, mday, mon; |
61 | unsigned long flags; | ||
61 | u32 year; | 62 | u32 year; |
62 | 63 | ||
64 | spin_lock_irqsave(&rtc_lock, flags); | ||
65 | |||
63 | while ((vrtc_cmos_read(RTC_FREQ_SELECT) & RTC_UIP)) | 66 | while ((vrtc_cmos_read(RTC_FREQ_SELECT) & RTC_UIP)) |
64 | cpu_relax(); | 67 | cpu_relax(); |
65 | 68 | ||
@@ -70,6 +73,8 @@ unsigned long vrtc_get_time(void) | |||
70 | mon = vrtc_cmos_read(RTC_MONTH); | 73 | mon = vrtc_cmos_read(RTC_MONTH); |
71 | year = vrtc_cmos_read(RTC_YEAR); | 74 | year = vrtc_cmos_read(RTC_YEAR); |
72 | 75 | ||
76 | spin_unlock_irqrestore(&rtc_lock, flags); | ||
77 | |||
73 | /* vRTC YEAR reg contains the offset to 1960 */ | 78 | /* vRTC YEAR reg contains the offset to 1960 */ |
74 | year += 1960; | 79 | year += 1960; |
75 | 80 | ||
@@ -83,8 +88,10 @@ unsigned long vrtc_get_time(void) | |||
83 | int vrtc_set_mmss(unsigned long nowtime) | 88 | int vrtc_set_mmss(unsigned long nowtime) |
84 | { | 89 | { |
85 | int real_sec, real_min; | 90 | int real_sec, real_min; |
91 | unsigned long flags; | ||
86 | int vrtc_min; | 92 | int vrtc_min; |
87 | 93 | ||
94 | spin_lock_irqsave(&rtc_lock, flags); | ||
88 | vrtc_min = vrtc_cmos_read(RTC_MINUTES); | 95 | vrtc_min = vrtc_cmos_read(RTC_MINUTES); |
89 | 96 | ||
90 | real_sec = nowtime % 60; | 97 | real_sec = nowtime % 60; |
@@ -95,6 +102,8 @@ int vrtc_set_mmss(unsigned long nowtime) | |||
95 | 102 | ||
96 | vrtc_cmos_write(real_sec, RTC_SECONDS); | 103 | vrtc_cmos_write(real_sec, RTC_SECONDS); |
97 | vrtc_cmos_write(real_min, RTC_MINUTES); | 104 | vrtc_cmos_write(real_min, RTC_MINUTES); |
105 | spin_unlock_irqrestore(&rtc_lock, flags); | ||
106 | |||
98 | return 0; | 107 | return 0; |
99 | } | 108 | } |
100 | 109 | ||
diff --git a/block/blk-core.c b/block/blk-core.c index b2ed78afd9f0..d34433ae7917 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -348,9 +348,10 @@ void blk_put_queue(struct request_queue *q) | |||
348 | EXPORT_SYMBOL(blk_put_queue); | 348 | EXPORT_SYMBOL(blk_put_queue); |
349 | 349 | ||
350 | /* | 350 | /* |
351 | * Note: If a driver supplied the queue lock, it should not zap that lock | 351 | * Note: If a driver supplied the queue lock, it is disconnected |
352 | * unexpectedly as some queue cleanup components like elevator_exit() and | 352 | * by this function. The actual state of the lock doesn't matter |
353 | * blk_throtl_exit() need queue lock. | 353 | * here as the request_queue isn't accessible after this point |
354 | * (QUEUE_FLAG_DEAD is set) and no other requests will be queued. | ||
354 | */ | 355 | */ |
355 | void blk_cleanup_queue(struct request_queue *q) | 356 | void blk_cleanup_queue(struct request_queue *q) |
356 | { | 357 | { |
@@ -367,10 +368,8 @@ void blk_cleanup_queue(struct request_queue *q) | |||
367 | queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); | 368 | queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); |
368 | mutex_unlock(&q->sysfs_lock); | 369 | mutex_unlock(&q->sysfs_lock); |
369 | 370 | ||
370 | if (q->elevator) | 371 | if (q->queue_lock != &q->__queue_lock) |
371 | elevator_exit(q->elevator); | 372 | q->queue_lock = &q->__queue_lock; |
372 | |||
373 | blk_throtl_exit(q); | ||
374 | 373 | ||
375 | blk_put_queue(q); | 374 | blk_put_queue(q); |
376 | } | 375 | } |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index e681805cdb47..60fda88c57f0 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -479,6 +479,11 @@ static void blk_release_queue(struct kobject *kobj) | |||
479 | 479 | ||
480 | blk_sync_queue(q); | 480 | blk_sync_queue(q); |
481 | 481 | ||
482 | if (q->elevator) | ||
483 | elevator_exit(q->elevator); | ||
484 | |||
485 | blk_throtl_exit(q); | ||
486 | |||
482 | if (rl->rq_pool) | 487 | if (rl->rq_pool) |
483 | mempool_destroy(rl->rq_pool); | 488 | mempool_destroy(rl->rq_pool); |
484 | 489 | ||
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 0784f99a4665..b8e08cb67a18 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c | |||
@@ -50,6 +50,7 @@ | |||
50 | #include <acpi/hed.h> | 50 | #include <acpi/hed.h> |
51 | #include <asm/mce.h> | 51 | #include <asm/mce.h> |
52 | #include <asm/tlbflush.h> | 52 | #include <asm/tlbflush.h> |
53 | #include <asm/nmi.h> | ||
53 | 54 | ||
54 | #include "apei-internal.h" | 55 | #include "apei-internal.h" |
55 | 56 | ||
@@ -749,15 +750,11 @@ static void ghes_proc_in_irq(struct irq_work *irq_work) | |||
749 | } | 750 | } |
750 | } | 751 | } |
751 | 752 | ||
752 | static int ghes_notify_nmi(struct notifier_block *this, | 753 | static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs) |
753 | unsigned long cmd, void *data) | ||
754 | { | 754 | { |
755 | struct ghes *ghes, *ghes_global = NULL; | 755 | struct ghes *ghes, *ghes_global = NULL; |
756 | int sev, sev_global = -1; | 756 | int sev, sev_global = -1; |
757 | int ret = NOTIFY_DONE; | 757 | int ret = NMI_DONE; |
758 | |||
759 | if (cmd != DIE_NMI) | ||
760 | return ret; | ||
761 | 758 | ||
762 | raw_spin_lock(&ghes_nmi_lock); | 759 | raw_spin_lock(&ghes_nmi_lock); |
763 | list_for_each_entry_rcu(ghes, &ghes_nmi, list) { | 760 | list_for_each_entry_rcu(ghes, &ghes_nmi, list) { |
@@ -770,10 +767,10 @@ static int ghes_notify_nmi(struct notifier_block *this, | |||
770 | sev_global = sev; | 767 | sev_global = sev; |
771 | ghes_global = ghes; | 768 | ghes_global = ghes; |
772 | } | 769 | } |
773 | ret = NOTIFY_STOP; | 770 | ret = NMI_HANDLED; |
774 | } | 771 | } |
775 | 772 | ||
776 | if (ret == NOTIFY_DONE) | 773 | if (ret == NMI_DONE) |
777 | goto out; | 774 | goto out; |
778 | 775 | ||
779 | if (sev_global >= GHES_SEV_PANIC) { | 776 | if (sev_global >= GHES_SEV_PANIC) { |
@@ -825,10 +822,6 @@ static struct notifier_block ghes_notifier_sci = { | |||
825 | .notifier_call = ghes_notify_sci, | 822 | .notifier_call = ghes_notify_sci, |
826 | }; | 823 | }; |
827 | 824 | ||
828 | static struct notifier_block ghes_notifier_nmi = { | ||
829 | .notifier_call = ghes_notify_nmi, | ||
830 | }; | ||
831 | |||
832 | static unsigned long ghes_esource_prealloc_size( | 825 | static unsigned long ghes_esource_prealloc_size( |
833 | const struct acpi_hest_generic *generic) | 826 | const struct acpi_hest_generic *generic) |
834 | { | 827 | { |
@@ -918,7 +911,8 @@ static int __devinit ghes_probe(struct platform_device *ghes_dev) | |||
918 | ghes_estatus_pool_expand(len); | 911 | ghes_estatus_pool_expand(len); |
919 | mutex_lock(&ghes_list_mutex); | 912 | mutex_lock(&ghes_list_mutex); |
920 | if (list_empty(&ghes_nmi)) | 913 | if (list_empty(&ghes_nmi)) |
921 | register_die_notifier(&ghes_notifier_nmi); | 914 | register_nmi_handler(NMI_LOCAL, ghes_notify_nmi, 0, |
915 | "ghes"); | ||
922 | list_add_rcu(&ghes->list, &ghes_nmi); | 916 | list_add_rcu(&ghes->list, &ghes_nmi); |
923 | mutex_unlock(&ghes_list_mutex); | 917 | mutex_unlock(&ghes_list_mutex); |
924 | break; | 918 | break; |
@@ -964,7 +958,7 @@ static int __devexit ghes_remove(struct platform_device *ghes_dev) | |||
964 | mutex_lock(&ghes_list_mutex); | 958 | mutex_lock(&ghes_list_mutex); |
965 | list_del_rcu(&ghes->list); | 959 | list_del_rcu(&ghes->list); |
966 | if (list_empty(&ghes_nmi)) | 960 | if (list_empty(&ghes_nmi)) |
967 | unregister_die_notifier(&ghes_notifier_nmi); | 961 | unregister_nmi_handler(NMI_LOCAL, "ghes"); |
968 | mutex_unlock(&ghes_list_mutex); | 962 | mutex_unlock(&ghes_list_mutex); |
969 | /* | 963 | /* |
970 | * To synchronize with NMI handler, ghes can only be | 964 | * To synchronize with NMI handler, ghes can only be |
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c index 2c18d584066d..b97294e2d95b 100644 --- a/drivers/base/power/clock_ops.c +++ b/drivers/base/power/clock_ops.c | |||
@@ -42,6 +42,22 @@ static struct pm_clk_data *__to_pcd(struct device *dev) | |||
42 | } | 42 | } |
43 | 43 | ||
44 | /** | 44 | /** |
45 | * pm_clk_acquire - Acquire a device clock. | ||
46 | * @dev: Device whose clock is to be acquired. | ||
47 | * @ce: PM clock entry corresponding to the clock. | ||
48 | */ | ||
49 | static void pm_clk_acquire(struct device *dev, struct pm_clock_entry *ce) | ||
50 | { | ||
51 | ce->clk = clk_get(dev, ce->con_id); | ||
52 | if (IS_ERR(ce->clk)) { | ||
53 | ce->status = PCE_STATUS_ERROR; | ||
54 | } else { | ||
55 | ce->status = PCE_STATUS_ACQUIRED; | ||
56 | dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id); | ||
57 | } | ||
58 | } | ||
59 | |||
60 | /** | ||
45 | * pm_clk_add - Start using a device clock for power management. | 61 | * pm_clk_add - Start using a device clock for power management. |
46 | * @dev: Device whose clock is going to be used for power management. | 62 | * @dev: Device whose clock is going to be used for power management. |
47 | * @con_id: Connection ID of the clock. | 63 | * @con_id: Connection ID of the clock. |
@@ -73,6 +89,8 @@ int pm_clk_add(struct device *dev, const char *con_id) | |||
73 | } | 89 | } |
74 | } | 90 | } |
75 | 91 | ||
92 | pm_clk_acquire(dev, ce); | ||
93 | |||
76 | spin_lock_irq(&pcd->lock); | 94 | spin_lock_irq(&pcd->lock); |
77 | list_add_tail(&ce->node, &pcd->clock_list); | 95 | list_add_tail(&ce->node, &pcd->clock_list); |
78 | spin_unlock_irq(&pcd->lock); | 96 | spin_unlock_irq(&pcd->lock); |
@@ -82,17 +100,12 @@ int pm_clk_add(struct device *dev, const char *con_id) | |||
82 | /** | 100 | /** |
83 | * __pm_clk_remove - Destroy PM clock entry. | 101 | * __pm_clk_remove - Destroy PM clock entry. |
84 | * @ce: PM clock entry to destroy. | 102 | * @ce: PM clock entry to destroy. |
85 | * | ||
86 | * This routine must be called under the spinlock protecting the PM list of | ||
87 | * clocks corresponding the the @ce's device. | ||
88 | */ | 103 | */ |
89 | static void __pm_clk_remove(struct pm_clock_entry *ce) | 104 | static void __pm_clk_remove(struct pm_clock_entry *ce) |
90 | { | 105 | { |
91 | if (!ce) | 106 | if (!ce) |
92 | return; | 107 | return; |
93 | 108 | ||
94 | list_del(&ce->node); | ||
95 | |||
96 | if (ce->status < PCE_STATUS_ERROR) { | 109 | if (ce->status < PCE_STATUS_ERROR) { |
97 | if (ce->status == PCE_STATUS_ENABLED) | 110 | if (ce->status == PCE_STATUS_ENABLED) |
98 | clk_disable(ce->clk); | 111 | clk_disable(ce->clk); |
@@ -126,18 +139,22 @@ void pm_clk_remove(struct device *dev, const char *con_id) | |||
126 | spin_lock_irq(&pcd->lock); | 139 | spin_lock_irq(&pcd->lock); |
127 | 140 | ||
128 | list_for_each_entry(ce, &pcd->clock_list, node) { | 141 | list_for_each_entry(ce, &pcd->clock_list, node) { |
129 | if (!con_id && !ce->con_id) { | 142 | if (!con_id && !ce->con_id) |
130 | __pm_clk_remove(ce); | 143 | goto remove; |
131 | break; | 144 | else if (!con_id || !ce->con_id) |
132 | } else if (!con_id || !ce->con_id) { | ||
133 | continue; | 145 | continue; |
134 | } else if (!strcmp(con_id, ce->con_id)) { | 146 | else if (!strcmp(con_id, ce->con_id)) |
135 | __pm_clk_remove(ce); | 147 | goto remove; |
136 | break; | ||
137 | } | ||
138 | } | 148 | } |
139 | 149 | ||
140 | spin_unlock_irq(&pcd->lock); | 150 | spin_unlock_irq(&pcd->lock); |
151 | return; | ||
152 | |||
153 | remove: | ||
154 | list_del(&ce->node); | ||
155 | spin_unlock_irq(&pcd->lock); | ||
156 | |||
157 | __pm_clk_remove(ce); | ||
141 | } | 158 | } |
142 | 159 | ||
143 | /** | 160 | /** |
@@ -175,20 +192,27 @@ void pm_clk_destroy(struct device *dev) | |||
175 | { | 192 | { |
176 | struct pm_clk_data *pcd = __to_pcd(dev); | 193 | struct pm_clk_data *pcd = __to_pcd(dev); |
177 | struct pm_clock_entry *ce, *c; | 194 | struct pm_clock_entry *ce, *c; |
195 | struct list_head list; | ||
178 | 196 | ||
179 | if (!pcd) | 197 | if (!pcd) |
180 | return; | 198 | return; |
181 | 199 | ||
182 | dev->power.subsys_data = NULL; | 200 | dev->power.subsys_data = NULL; |
201 | INIT_LIST_HEAD(&list); | ||
183 | 202 | ||
184 | spin_lock_irq(&pcd->lock); | 203 | spin_lock_irq(&pcd->lock); |
185 | 204 | ||
186 | list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node) | 205 | list_for_each_entry_safe_reverse(ce, c, &pcd->clock_list, node) |
187 | __pm_clk_remove(ce); | 206 | list_move(&ce->node, &list); |
188 | 207 | ||
189 | spin_unlock_irq(&pcd->lock); | 208 | spin_unlock_irq(&pcd->lock); |
190 | 209 | ||
191 | kfree(pcd); | 210 | kfree(pcd); |
211 | |||
212 | list_for_each_entry_safe_reverse(ce, c, &list, node) { | ||
213 | list_del(&ce->node); | ||
214 | __pm_clk_remove(ce); | ||
215 | } | ||
192 | } | 216 | } |
193 | 217 | ||
194 | #endif /* CONFIG_PM */ | 218 | #endif /* CONFIG_PM */ |
@@ -196,23 +220,6 @@ void pm_clk_destroy(struct device *dev) | |||
196 | #ifdef CONFIG_PM_RUNTIME | 220 | #ifdef CONFIG_PM_RUNTIME |
197 | 221 | ||
198 | /** | 222 | /** |
199 | * pm_clk_acquire - Acquire a device clock. | ||
200 | * @dev: Device whose clock is to be acquired. | ||
201 | * @con_id: Connection ID of the clock. | ||
202 | */ | ||
203 | static void pm_clk_acquire(struct device *dev, | ||
204 | struct pm_clock_entry *ce) | ||
205 | { | ||
206 | ce->clk = clk_get(dev, ce->con_id); | ||
207 | if (IS_ERR(ce->clk)) { | ||
208 | ce->status = PCE_STATUS_ERROR; | ||
209 | } else { | ||
210 | ce->status = PCE_STATUS_ACQUIRED; | ||
211 | dev_dbg(dev, "Clock %s managed by runtime PM.\n", ce->con_id); | ||
212 | } | ||
213 | } | ||
214 | |||
215 | /** | ||
216 | * pm_clk_suspend - Disable clocks in a device's PM clock list. | 223 | * pm_clk_suspend - Disable clocks in a device's PM clock list. |
217 | * @dev: Device to disable the clocks for. | 224 | * @dev: Device to disable the clocks for. |
218 | */ | 225 | */ |
@@ -230,9 +237,6 @@ int pm_clk_suspend(struct device *dev) | |||
230 | spin_lock_irqsave(&pcd->lock, flags); | 237 | spin_lock_irqsave(&pcd->lock, flags); |
231 | 238 | ||
232 | list_for_each_entry_reverse(ce, &pcd->clock_list, node) { | 239 | list_for_each_entry_reverse(ce, &pcd->clock_list, node) { |
233 | if (ce->status == PCE_STATUS_NONE) | ||
234 | pm_clk_acquire(dev, ce); | ||
235 | |||
236 | if (ce->status < PCE_STATUS_ERROR) { | 240 | if (ce->status < PCE_STATUS_ERROR) { |
237 | clk_disable(ce->clk); | 241 | clk_disable(ce->clk); |
238 | ce->status = PCE_STATUS_ACQUIRED; | 242 | ce->status = PCE_STATUS_ACQUIRED; |
@@ -262,9 +266,6 @@ int pm_clk_resume(struct device *dev) | |||
262 | spin_lock_irqsave(&pcd->lock, flags); | 266 | spin_lock_irqsave(&pcd->lock, flags); |
263 | 267 | ||
264 | list_for_each_entry(ce, &pcd->clock_list, node) { | 268 | list_for_each_entry(ce, &pcd->clock_list, node) { |
265 | if (ce->status == PCE_STATUS_NONE) | ||
266 | pm_clk_acquire(dev, ce); | ||
267 | |||
268 | if (ce->status < PCE_STATUS_ERROR) { | 269 | if (ce->status < PCE_STATUS_ERROR) { |
269 | clk_enable(ce->clk); | 270 | clk_enable(ce->clk); |
270 | ce->status = PCE_STATUS_ENABLED; | 271 | ce->status = PCE_STATUS_ENABLED; |
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index 3302586655c4..c2917ffad2c2 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c | |||
@@ -65,6 +65,7 @@ | |||
65 | * mechanism for it at that time. | 65 | * mechanism for it at that time. |
66 | */ | 66 | */ |
67 | #include <asm/kdebug.h> | 67 | #include <asm/kdebug.h> |
68 | #include <asm/nmi.h> | ||
68 | #define HAVE_DIE_NMI | 69 | #define HAVE_DIE_NMI |
69 | #endif | 70 | #endif |
70 | 71 | ||
@@ -1077,17 +1078,8 @@ static void ipmi_unregister_watchdog(int ipmi_intf) | |||
1077 | 1078 | ||
1078 | #ifdef HAVE_DIE_NMI | 1079 | #ifdef HAVE_DIE_NMI |
1079 | static int | 1080 | static int |
1080 | ipmi_nmi(struct notifier_block *self, unsigned long val, void *data) | 1081 | ipmi_nmi(unsigned int val, struct pt_regs *regs) |
1081 | { | 1082 | { |
1082 | struct die_args *args = data; | ||
1083 | |||
1084 | if (val != DIE_NMIUNKNOWN) | ||
1085 | return NOTIFY_OK; | ||
1086 | |||
1087 | /* Hack, if it's a memory or I/O error, ignore it. */ | ||
1088 | if (args->err & 0xc0) | ||
1089 | return NOTIFY_OK; | ||
1090 | |||
1091 | /* | 1083 | /* |
1092 | * If we get here, it's an NMI that's not a memory or I/O | 1084 | * If we get here, it's an NMI that's not a memory or I/O |
1093 | * error. We can't truly tell if it's from IPMI or not | 1085 | * error. We can't truly tell if it's from IPMI or not |
@@ -1097,15 +1089,15 @@ ipmi_nmi(struct notifier_block *self, unsigned long val, void *data) | |||
1097 | 1089 | ||
1098 | if (testing_nmi) { | 1090 | if (testing_nmi) { |
1099 | testing_nmi = 2; | 1091 | testing_nmi = 2; |
1100 | return NOTIFY_STOP; | 1092 | return NMI_HANDLED; |
1101 | } | 1093 | } |
1102 | 1094 | ||
1103 | /* If we are not expecting a timeout, ignore it. */ | 1095 | /* If we are not expecting a timeout, ignore it. */ |
1104 | if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) | 1096 | if (ipmi_watchdog_state == WDOG_TIMEOUT_NONE) |
1105 | return NOTIFY_OK; | 1097 | return NMI_DONE; |
1106 | 1098 | ||
1107 | if (preaction_val != WDOG_PRETIMEOUT_NMI) | 1099 | if (preaction_val != WDOG_PRETIMEOUT_NMI) |
1108 | return NOTIFY_OK; | 1100 | return NMI_DONE; |
1109 | 1101 | ||
1110 | /* | 1102 | /* |
1111 | * If no one else handled the NMI, we assume it was the IPMI | 1103 | * If no one else handled the NMI, we assume it was the IPMI |
@@ -1120,12 +1112,8 @@ ipmi_nmi(struct notifier_block *self, unsigned long val, void *data) | |||
1120 | panic(PFX "pre-timeout"); | 1112 | panic(PFX "pre-timeout"); |
1121 | } | 1113 | } |
1122 | 1114 | ||
1123 | return NOTIFY_STOP; | 1115 | return NMI_HANDLED; |
1124 | } | 1116 | } |
1125 | |||
1126 | static struct notifier_block ipmi_nmi_handler = { | ||
1127 | .notifier_call = ipmi_nmi | ||
1128 | }; | ||
1129 | #endif | 1117 | #endif |
1130 | 1118 | ||
1131 | static int wdog_reboot_handler(struct notifier_block *this, | 1119 | static int wdog_reboot_handler(struct notifier_block *this, |
@@ -1290,7 +1278,8 @@ static void check_parms(void) | |||
1290 | } | 1278 | } |
1291 | } | 1279 | } |
1292 | if (do_nmi && !nmi_handler_registered) { | 1280 | if (do_nmi && !nmi_handler_registered) { |
1293 | rv = register_die_notifier(&ipmi_nmi_handler); | 1281 | rv = register_nmi_handler(NMI_UNKNOWN, ipmi_nmi, 0, |
1282 | "ipmi"); | ||
1294 | if (rv) { | 1283 | if (rv) { |
1295 | printk(KERN_WARNING PFX | 1284 | printk(KERN_WARNING PFX |
1296 | "Can't register nmi handler\n"); | 1285 | "Can't register nmi handler\n"); |
@@ -1298,7 +1287,7 @@ static void check_parms(void) | |||
1298 | } else | 1287 | } else |
1299 | nmi_handler_registered = 1; | 1288 | nmi_handler_registered = 1; |
1300 | } else if (!do_nmi && nmi_handler_registered) { | 1289 | } else if (!do_nmi && nmi_handler_registered) { |
1301 | unregister_die_notifier(&ipmi_nmi_handler); | 1290 | unregister_nmi_handler(NMI_UNKNOWN, "ipmi"); |
1302 | nmi_handler_registered = 0; | 1291 | nmi_handler_registered = 0; |
1303 | } | 1292 | } |
1304 | #endif | 1293 | #endif |
@@ -1336,7 +1325,7 @@ static int __init ipmi_wdog_init(void) | |||
1336 | if (rv) { | 1325 | if (rv) { |
1337 | #ifdef HAVE_DIE_NMI | 1326 | #ifdef HAVE_DIE_NMI |
1338 | if (nmi_handler_registered) | 1327 | if (nmi_handler_registered) |
1339 | unregister_die_notifier(&ipmi_nmi_handler); | 1328 | unregister_nmi_handler(NMI_UNKNOWN, "ipmi"); |
1340 | #endif | 1329 | #endif |
1341 | atomic_notifier_chain_unregister(&panic_notifier_list, | 1330 | atomic_notifier_chain_unregister(&panic_notifier_list, |
1342 | &wdog_panic_notifier); | 1331 | &wdog_panic_notifier); |
@@ -1357,7 +1346,7 @@ static void __exit ipmi_wdog_exit(void) | |||
1357 | 1346 | ||
1358 | #ifdef HAVE_DIE_NMI | 1347 | #ifdef HAVE_DIE_NMI |
1359 | if (nmi_handler_registered) | 1348 | if (nmi_handler_registered) |
1360 | unregister_die_notifier(&ipmi_nmi_handler); | 1349 | unregister_nmi_handler(NMI_UNKNOWN, "ipmi"); |
1361 | #endif | 1350 | #endif |
1362 | 1351 | ||
1363 | atomic_notifier_chain_unregister(&panic_notifier_list, | 1352 | atomic_notifier_chain_unregister(&panic_notifier_list, |
diff --git a/drivers/char/tpm/Kconfig b/drivers/char/tpm/Kconfig index f6595aba4f0f..fa567f1158c2 100644 --- a/drivers/char/tpm/Kconfig +++ b/drivers/char/tpm/Kconfig | |||
@@ -43,6 +43,7 @@ config TCG_NSC | |||
43 | 43 | ||
44 | config TCG_ATMEL | 44 | config TCG_ATMEL |
45 | tristate "Atmel TPM Interface" | 45 | tristate "Atmel TPM Interface" |
46 | depends on PPC64 || HAS_IOPORT | ||
46 | ---help--- | 47 | ---help--- |
47 | If you have a TPM security chip from Atmel say Yes and it | 48 | If you have a TPM security chip from Atmel say Yes and it |
48 | will be accessible from within Linux. To compile this driver | 49 | will be accessible from within Linux. To compile this driver |
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index caf8012ef47c..9ca5c021d0b6 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c | |||
@@ -383,6 +383,9 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf, | |||
383 | u32 count, ordinal; | 383 | u32 count, ordinal; |
384 | unsigned long stop; | 384 | unsigned long stop; |
385 | 385 | ||
386 | if (bufsiz > TPM_BUFSIZE) | ||
387 | bufsiz = TPM_BUFSIZE; | ||
388 | |||
386 | count = be32_to_cpu(*((__be32 *) (buf + 2))); | 389 | count = be32_to_cpu(*((__be32 *) (buf + 2))); |
387 | ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); | 390 | ordinal = be32_to_cpu(*((__be32 *) (buf + 6))); |
388 | if (count == 0) | 391 | if (count == 0) |
@@ -1102,6 +1105,7 @@ ssize_t tpm_read(struct file *file, char __user *buf, | |||
1102 | { | 1105 | { |
1103 | struct tpm_chip *chip = file->private_data; | 1106 | struct tpm_chip *chip = file->private_data; |
1104 | ssize_t ret_size; | 1107 | ssize_t ret_size; |
1108 | int rc; | ||
1105 | 1109 | ||
1106 | del_singleshot_timer_sync(&chip->user_read_timer); | 1110 | del_singleshot_timer_sync(&chip->user_read_timer); |
1107 | flush_work_sync(&chip->work); | 1111 | flush_work_sync(&chip->work); |
@@ -1112,8 +1116,11 @@ ssize_t tpm_read(struct file *file, char __user *buf, | |||
1112 | ret_size = size; | 1116 | ret_size = size; |
1113 | 1117 | ||
1114 | mutex_lock(&chip->buffer_mutex); | 1118 | mutex_lock(&chip->buffer_mutex); |
1115 | if (copy_to_user(buf, chip->data_buffer, ret_size)) | 1119 | rc = copy_to_user(buf, chip->data_buffer, ret_size); |
1120 | memset(chip->data_buffer, 0, ret_size); | ||
1121 | if (rc) | ||
1116 | ret_size = -EFAULT; | 1122 | ret_size = -EFAULT; |
1123 | |||
1117 | mutex_unlock(&chip->buffer_mutex); | 1124 | mutex_unlock(&chip->buffer_mutex); |
1118 | } | 1125 | } |
1119 | 1126 | ||
diff --git a/drivers/char/tpm/tpm_nsc.c b/drivers/char/tpm/tpm_nsc.c index 82facc9104c7..4d2464871ada 100644 --- a/drivers/char/tpm/tpm_nsc.c +++ b/drivers/char/tpm/tpm_nsc.c | |||
@@ -396,8 +396,6 @@ static void __exit cleanup_nsc(void) | |||
396 | if (pdev) { | 396 | if (pdev) { |
397 | tpm_nsc_remove(&pdev->dev); | 397 | tpm_nsc_remove(&pdev->dev); |
398 | platform_device_unregister(pdev); | 398 | platform_device_unregister(pdev); |
399 | kfree(pdev); | ||
400 | pdev = NULL; | ||
401 | } | 399 | } |
402 | 400 | ||
403 | platform_driver_unregister(&nsc_drv); | 401 | platform_driver_unregister(&nsc_drv); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index ce045a8cf82c..f07e4252b708 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -67,11 +67,11 @@ module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600); | |||
67 | MODULE_PARM_DESC(i915_enable_rc6, | 67 | MODULE_PARM_DESC(i915_enable_rc6, |
68 | "Enable power-saving render C-state 6 (default: true)"); | 68 | "Enable power-saving render C-state 6 (default: true)"); |
69 | 69 | ||
70 | unsigned int i915_enable_fbc __read_mostly = 1; | 70 | unsigned int i915_enable_fbc __read_mostly = -1; |
71 | module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); | 71 | module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); |
72 | MODULE_PARM_DESC(i915_enable_fbc, | 72 | MODULE_PARM_DESC(i915_enable_fbc, |
73 | "Enable frame buffer compression for power savings " | 73 | "Enable frame buffer compression for power savings " |
74 | "(default: false)"); | 74 | "(default: -1 (use per-chip default))"); |
75 | 75 | ||
76 | unsigned int i915_lvds_downclock __read_mostly = 0; | 76 | unsigned int i915_lvds_downclock __read_mostly = 0; |
77 | module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); | 77 | module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 56a8554d9039..04411ad2e779 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -1799,6 +1799,7 @@ static void intel_update_fbc(struct drm_device *dev) | |||
1799 | struct drm_framebuffer *fb; | 1799 | struct drm_framebuffer *fb; |
1800 | struct intel_framebuffer *intel_fb; | 1800 | struct intel_framebuffer *intel_fb; |
1801 | struct drm_i915_gem_object *obj; | 1801 | struct drm_i915_gem_object *obj; |
1802 | int enable_fbc; | ||
1802 | 1803 | ||
1803 | DRM_DEBUG_KMS("\n"); | 1804 | DRM_DEBUG_KMS("\n"); |
1804 | 1805 | ||
@@ -1839,8 +1840,15 @@ static void intel_update_fbc(struct drm_device *dev) | |||
1839 | intel_fb = to_intel_framebuffer(fb); | 1840 | intel_fb = to_intel_framebuffer(fb); |
1840 | obj = intel_fb->obj; | 1841 | obj = intel_fb->obj; |
1841 | 1842 | ||
1842 | if (!i915_enable_fbc) { | 1843 | enable_fbc = i915_enable_fbc; |
1843 | DRM_DEBUG_KMS("fbc disabled per module param (default off)\n"); | 1844 | if (enable_fbc < 0) { |
1845 | DRM_DEBUG_KMS("fbc set to per-chip default\n"); | ||
1846 | enable_fbc = 1; | ||
1847 | if (INTEL_INFO(dev)->gen <= 5) | ||
1848 | enable_fbc = 0; | ||
1849 | } | ||
1850 | if (!enable_fbc) { | ||
1851 | DRM_DEBUG_KMS("fbc disabled per module param\n"); | ||
1844 | dev_priv->no_fbc_reason = FBC_MODULE_PARAM; | 1852 | dev_priv->no_fbc_reason = FBC_MODULE_PARAM; |
1845 | goto out_disable; | 1853 | goto out_disable; |
1846 | } | 1854 | } |
@@ -4687,13 +4695,13 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, | |||
4687 | bpc = 6; /* min is 18bpp */ | 4695 | bpc = 6; /* min is 18bpp */ |
4688 | break; | 4696 | break; |
4689 | case 24: | 4697 | case 24: |
4690 | bpc = min((unsigned int)8, display_bpc); | 4698 | bpc = 8; |
4691 | break; | 4699 | break; |
4692 | case 30: | 4700 | case 30: |
4693 | bpc = min((unsigned int)10, display_bpc); | 4701 | bpc = 10; |
4694 | break; | 4702 | break; |
4695 | case 48: | 4703 | case 48: |
4696 | bpc = min((unsigned int)12, display_bpc); | 4704 | bpc = 12; |
4697 | break; | 4705 | break; |
4698 | default: | 4706 | default: |
4699 | DRM_DEBUG("unsupported depth, assuming 24 bits\n"); | 4707 | DRM_DEBUG("unsupported depth, assuming 24 bits\n"); |
@@ -4701,10 +4709,12 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, | |||
4701 | break; | 4709 | break; |
4702 | } | 4710 | } |
4703 | 4711 | ||
4712 | display_bpc = min(display_bpc, bpc); | ||
4713 | |||
4704 | DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n", | 4714 | DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n", |
4705 | bpc, display_bpc); | 4715 | bpc, display_bpc); |
4706 | 4716 | ||
4707 | *pipe_bpp = bpc * 3; | 4717 | *pipe_bpp = display_bpc * 3; |
4708 | 4718 | ||
4709 | return display_bpc != bpc; | 4719 | return display_bpc != bpc; |
4710 | } | 4720 | } |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 0b2ee9d39980..fe1099d8817e 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -337,9 +337,6 @@ extern void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, | |||
337 | struct drm_connector *connector, | 337 | struct drm_connector *connector, |
338 | struct intel_load_detect_pipe *old); | 338 | struct intel_load_detect_pipe *old); |
339 | 339 | ||
340 | extern struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB); | ||
341 | extern int intel_sdvo_supports_hotplug(struct drm_connector *connector); | ||
342 | extern void intel_sdvo_set_hotplug(struct drm_connector *connector, int enable); | ||
343 | extern void intelfb_restore(void); | 340 | extern void intelfb_restore(void); |
344 | extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | 341 | extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, |
345 | u16 blue, int regno); | 342 | u16 blue, int regno); |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 30fe554d8936..6348c499616f 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -92,6 +92,11 @@ struct intel_sdvo { | |||
92 | */ | 92 | */ |
93 | uint16_t attached_output; | 93 | uint16_t attached_output; |
94 | 94 | ||
95 | /* | ||
96 | * Hotplug activation bits for this device | ||
97 | */ | ||
98 | uint8_t hotplug_active[2]; | ||
99 | |||
95 | /** | 100 | /** |
96 | * This is used to select the color range of RBG outputs in HDMI mode. | 101 | * This is used to select the color range of RBG outputs in HDMI mode. |
97 | * It is only valid when using TMDS encoding and 8 bit per color mode. | 102 | * It is only valid when using TMDS encoding and 8 bit per color mode. |
@@ -1208,74 +1213,20 @@ static bool intel_sdvo_get_capabilities(struct intel_sdvo *intel_sdvo, struct in | |||
1208 | return true; | 1213 | return true; |
1209 | } | 1214 | } |
1210 | 1215 | ||
1211 | /* No use! */ | 1216 | static int intel_sdvo_supports_hotplug(struct intel_sdvo *intel_sdvo) |
1212 | #if 0 | ||
1213 | struct drm_connector* intel_sdvo_find(struct drm_device *dev, int sdvoB) | ||
1214 | { | ||
1215 | struct drm_connector *connector = NULL; | ||
1216 | struct intel_sdvo *iout = NULL; | ||
1217 | struct intel_sdvo *sdvo; | ||
1218 | |||
1219 | /* find the sdvo connector */ | ||
1220 | list_for_each_entry(connector, &dev->mode_config.connector_list, head) { | ||
1221 | iout = to_intel_sdvo(connector); | ||
1222 | |||
1223 | if (iout->type != INTEL_OUTPUT_SDVO) | ||
1224 | continue; | ||
1225 | |||
1226 | sdvo = iout->dev_priv; | ||
1227 | |||
1228 | if (sdvo->sdvo_reg == SDVOB && sdvoB) | ||
1229 | return connector; | ||
1230 | |||
1231 | if (sdvo->sdvo_reg == SDVOC && !sdvoB) | ||
1232 | return connector; | ||
1233 | |||
1234 | } | ||
1235 | |||
1236 | return NULL; | ||
1237 | } | ||
1238 | |||
1239 | int intel_sdvo_supports_hotplug(struct drm_connector *connector) | ||
1240 | { | 1217 | { |
1241 | u8 response[2]; | 1218 | u8 response[2]; |
1242 | u8 status; | ||
1243 | struct intel_sdvo *intel_sdvo; | ||
1244 | DRM_DEBUG_KMS("\n"); | ||
1245 | |||
1246 | if (!connector) | ||
1247 | return 0; | ||
1248 | |||
1249 | intel_sdvo = to_intel_sdvo(connector); | ||
1250 | 1219 | ||
1251 | return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, | 1220 | return intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, |
1252 | &response, 2) && response[0]; | 1221 | &response, 2) && response[0]; |
1253 | } | 1222 | } |
1254 | 1223 | ||
1255 | void intel_sdvo_set_hotplug(struct drm_connector *connector, int on) | 1224 | static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder) |
1256 | { | 1225 | { |
1257 | u8 response[2]; | 1226 | struct intel_sdvo *intel_sdvo = to_intel_sdvo(&encoder->base); |
1258 | u8 status; | ||
1259 | struct intel_sdvo *intel_sdvo = to_intel_sdvo(connector); | ||
1260 | |||
1261 | intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); | ||
1262 | intel_sdvo_read_response(intel_sdvo, &response, 2); | ||
1263 | |||
1264 | if (on) { | ||
1265 | intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_HOT_PLUG_SUPPORT, NULL, 0); | ||
1266 | status = intel_sdvo_read_response(intel_sdvo, &response, 2); | ||
1267 | |||
1268 | intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); | ||
1269 | } else { | ||
1270 | response[0] = 0; | ||
1271 | response[1] = 0; | ||
1272 | intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &response, 2); | ||
1273 | } | ||
1274 | 1227 | ||
1275 | intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, NULL, 0); | 1228 | intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, &intel_sdvo->hotplug_active, 2); |
1276 | intel_sdvo_read_response(intel_sdvo, &response, 2); | ||
1277 | } | 1229 | } |
1278 | #endif | ||
1279 | 1230 | ||
1280 | static bool | 1231 | static bool |
1281 | intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) | 1232 | intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) |
@@ -2045,6 +1996,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) | |||
2045 | { | 1996 | { |
2046 | struct drm_encoder *encoder = &intel_sdvo->base.base; | 1997 | struct drm_encoder *encoder = &intel_sdvo->base.base; |
2047 | struct drm_connector *connector; | 1998 | struct drm_connector *connector; |
1999 | struct intel_encoder *intel_encoder = to_intel_encoder(encoder); | ||
2048 | struct intel_connector *intel_connector; | 2000 | struct intel_connector *intel_connector; |
2049 | struct intel_sdvo_connector *intel_sdvo_connector; | 2001 | struct intel_sdvo_connector *intel_sdvo_connector; |
2050 | 2002 | ||
@@ -2062,7 +2014,17 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) | |||
2062 | 2014 | ||
2063 | intel_connector = &intel_sdvo_connector->base; | 2015 | intel_connector = &intel_sdvo_connector->base; |
2064 | connector = &intel_connector->base; | 2016 | connector = &intel_connector->base; |
2065 | connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; | 2017 | if (intel_sdvo_supports_hotplug(intel_sdvo) & (1 << device)) { |
2018 | connector->polled = DRM_CONNECTOR_POLL_HPD; | ||
2019 | intel_sdvo->hotplug_active[0] |= 1 << device; | ||
2020 | /* Some SDVO devices have one-shot hotplug interrupts. | ||
2021 | * Ensure that they get re-enabled when an interrupt happens. | ||
2022 | */ | ||
2023 | intel_encoder->hot_plug = intel_sdvo_enable_hotplug; | ||
2024 | intel_sdvo_enable_hotplug(intel_encoder); | ||
2025 | } | ||
2026 | else | ||
2027 | connector->polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT; | ||
2066 | encoder->encoder_type = DRM_MODE_ENCODER_TMDS; | 2028 | encoder->encoder_type = DRM_MODE_ENCODER_TMDS; |
2067 | connector->connector_type = DRM_MODE_CONNECTOR_DVID; | 2029 | connector->connector_type = DRM_MODE_CONNECTOR_DVID; |
2068 | 2030 | ||
@@ -2569,6 +2531,14 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) | |||
2569 | if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) | 2531 | if (!intel_sdvo_get_capabilities(intel_sdvo, &intel_sdvo->caps)) |
2570 | goto err; | 2532 | goto err; |
2571 | 2533 | ||
2534 | /* Set up hotplug command - note paranoia about contents of reply. | ||
2535 | * We assume that the hardware is in a sane state, and only touch | ||
2536 | * the bits we think we understand. | ||
2537 | */ | ||
2538 | intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ACTIVE_HOT_PLUG, | ||
2539 | &intel_sdvo->hotplug_active, 2); | ||
2540 | intel_sdvo->hotplug_active[0] &= ~0x3; | ||
2541 | |||
2572 | if (intel_sdvo_output_setup(intel_sdvo, | 2542 | if (intel_sdvo_output_setup(intel_sdvo, |
2573 | intel_sdvo->caps.output_flags) != true) { | 2543 | intel_sdvo->caps.output_flags) != true) { |
2574 | DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", | 2544 | DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", |
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 7ad43c6b1db7..4da23889fea6 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c | |||
@@ -115,6 +115,7 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, | |||
115 | u8 msg[20]; | 115 | u8 msg[20]; |
116 | int msg_bytes = send_bytes + 4; | 116 | int msg_bytes = send_bytes + 4; |
117 | u8 ack; | 117 | u8 ack; |
118 | unsigned retry; | ||
118 | 119 | ||
119 | if (send_bytes > 16) | 120 | if (send_bytes > 16) |
120 | return -1; | 121 | return -1; |
@@ -125,20 +126,20 @@ static int radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, | |||
125 | msg[3] = (msg_bytes << 4) | (send_bytes - 1); | 126 | msg[3] = (msg_bytes << 4) | (send_bytes - 1); |
126 | memcpy(&msg[4], send, send_bytes); | 127 | memcpy(&msg[4], send, send_bytes); |
127 | 128 | ||
128 | while (1) { | 129 | for (retry = 0; retry < 4; retry++) { |
129 | ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, | 130 | ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, |
130 | msg, msg_bytes, NULL, 0, delay, &ack); | 131 | msg, msg_bytes, NULL, 0, delay, &ack); |
131 | if (ret < 0) | 132 | if (ret < 0) |
132 | return ret; | 133 | return ret; |
133 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) | 134 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) |
134 | break; | 135 | return send_bytes; |
135 | else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) | 136 | else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) |
136 | udelay(400); | 137 | udelay(400); |
137 | else | 138 | else |
138 | return -EIO; | 139 | return -EIO; |
139 | } | 140 | } |
140 | 141 | ||
141 | return send_bytes; | 142 | return -EIO; |
142 | } | 143 | } |
143 | 144 | ||
144 | static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, | 145 | static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, |
@@ -149,26 +150,29 @@ static int radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, | |||
149 | int msg_bytes = 4; | 150 | int msg_bytes = 4; |
150 | u8 ack; | 151 | u8 ack; |
151 | int ret; | 152 | int ret; |
153 | unsigned retry; | ||
152 | 154 | ||
153 | msg[0] = address; | 155 | msg[0] = address; |
154 | msg[1] = address >> 8; | 156 | msg[1] = address >> 8; |
155 | msg[2] = AUX_NATIVE_READ << 4; | 157 | msg[2] = AUX_NATIVE_READ << 4; |
156 | msg[3] = (msg_bytes << 4) | (recv_bytes - 1); | 158 | msg[3] = (msg_bytes << 4) | (recv_bytes - 1); |
157 | 159 | ||
158 | while (1) { | 160 | for (retry = 0; retry < 4; retry++) { |
159 | ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, | 161 | ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, |
160 | msg, msg_bytes, recv, recv_bytes, delay, &ack); | 162 | msg, msg_bytes, recv, recv_bytes, delay, &ack); |
161 | if (ret == 0) | ||
162 | return -EPROTO; | ||
163 | if (ret < 0) | 163 | if (ret < 0) |
164 | return ret; | 164 | return ret; |
165 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) | 165 | if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) |
166 | return ret; | 166 | return ret; |
167 | else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) | 167 | else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) |
168 | udelay(400); | 168 | udelay(400); |
169 | else if (ret == 0) | ||
170 | return -EPROTO; | ||
169 | else | 171 | else |
170 | return -EIO; | 172 | return -EIO; |
171 | } | 173 | } |
174 | |||
175 | return -EIO; | ||
172 | } | 176 | } |
173 | 177 | ||
174 | static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector, | 178 | static void radeon_write_dpcd_reg(struct radeon_connector *radeon_connector, |
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index e8a746712b5b..c4ffa14fb2f4 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c | |||
@@ -1590,48 +1590,6 @@ static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev, | |||
1590 | return backend_map; | 1590 | return backend_map; |
1591 | } | 1591 | } |
1592 | 1592 | ||
1593 | static void evergreen_program_channel_remap(struct radeon_device *rdev) | ||
1594 | { | ||
1595 | u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp; | ||
1596 | |||
1597 | tmp = RREG32(MC_SHARED_CHMAP); | ||
1598 | switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { | ||
1599 | case 0: | ||
1600 | case 1: | ||
1601 | case 2: | ||
1602 | case 3: | ||
1603 | default: | ||
1604 | /* default mapping */ | ||
1605 | mc_shared_chremap = 0x00fac688; | ||
1606 | break; | ||
1607 | } | ||
1608 | |||
1609 | switch (rdev->family) { | ||
1610 | case CHIP_HEMLOCK: | ||
1611 | case CHIP_CYPRESS: | ||
1612 | case CHIP_BARTS: | ||
1613 | tcp_chan_steer_lo = 0x54763210; | ||
1614 | tcp_chan_steer_hi = 0x0000ba98; | ||
1615 | break; | ||
1616 | case CHIP_JUNIPER: | ||
1617 | case CHIP_REDWOOD: | ||
1618 | case CHIP_CEDAR: | ||
1619 | case CHIP_PALM: | ||
1620 | case CHIP_SUMO: | ||
1621 | case CHIP_SUMO2: | ||
1622 | case CHIP_TURKS: | ||
1623 | case CHIP_CAICOS: | ||
1624 | default: | ||
1625 | tcp_chan_steer_lo = 0x76543210; | ||
1626 | tcp_chan_steer_hi = 0x0000ba98; | ||
1627 | break; | ||
1628 | } | ||
1629 | |||
1630 | WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo); | ||
1631 | WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi); | ||
1632 | WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); | ||
1633 | } | ||
1634 | |||
1635 | static void evergreen_gpu_init(struct radeon_device *rdev) | 1593 | static void evergreen_gpu_init(struct radeon_device *rdev) |
1636 | { | 1594 | { |
1637 | u32 cc_rb_backend_disable = 0; | 1595 | u32 cc_rb_backend_disable = 0; |
@@ -2078,8 +2036,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev) | |||
2078 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); | 2036 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); |
2079 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); | 2037 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); |
2080 | 2038 | ||
2081 | evergreen_program_channel_remap(rdev); | ||
2082 | |||
2083 | num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1; | 2039 | num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1; |
2084 | grbm_gfx_index = INSTANCE_BROADCAST_WRITES; | 2040 | grbm_gfx_index = INSTANCE_BROADCAST_WRITES; |
2085 | 2041 | ||
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 99fbd793c08c..8c79ca97753d 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c | |||
@@ -569,36 +569,6 @@ static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev, | |||
569 | return backend_map; | 569 | return backend_map; |
570 | } | 570 | } |
571 | 571 | ||
572 | static void cayman_program_channel_remap(struct radeon_device *rdev) | ||
573 | { | ||
574 | u32 tcp_chan_steer_lo, tcp_chan_steer_hi, mc_shared_chremap, tmp; | ||
575 | |||
576 | tmp = RREG32(MC_SHARED_CHMAP); | ||
577 | switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { | ||
578 | case 0: | ||
579 | case 1: | ||
580 | case 2: | ||
581 | case 3: | ||
582 | default: | ||
583 | /* default mapping */ | ||
584 | mc_shared_chremap = 0x00fac688; | ||
585 | break; | ||
586 | } | ||
587 | |||
588 | switch (rdev->family) { | ||
589 | case CHIP_CAYMAN: | ||
590 | default: | ||
591 | //tcp_chan_steer_lo = 0x54763210 | ||
592 | tcp_chan_steer_lo = 0x76543210; | ||
593 | tcp_chan_steer_hi = 0x0000ba98; | ||
594 | break; | ||
595 | } | ||
596 | |||
597 | WREG32(TCP_CHAN_STEER_LO, tcp_chan_steer_lo); | ||
598 | WREG32(TCP_CHAN_STEER_HI, tcp_chan_steer_hi); | ||
599 | WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); | ||
600 | } | ||
601 | |||
602 | static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev, | 572 | static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev, |
603 | u32 disable_mask_per_se, | 573 | u32 disable_mask_per_se, |
604 | u32 max_disable_mask_per_se, | 574 | u32 max_disable_mask_per_se, |
@@ -842,8 +812,6 @@ static void cayman_gpu_init(struct radeon_device *rdev) | |||
842 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); | 812 | WREG32(DMIF_ADDR_CONFIG, gb_addr_config); |
843 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); | 813 | WREG32(HDP_ADDR_CONFIG, gb_addr_config); |
844 | 814 | ||
845 | cayman_program_channel_remap(rdev); | ||
846 | |||
847 | /* primary versions */ | 815 | /* primary versions */ |
848 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); | 816 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); |
849 | WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); | 817 | WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); |
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index 5b1837b4aacf..7fcdbbbf2979 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c | |||
@@ -773,8 +773,8 @@ int r100_copy_blit(struct radeon_device *rdev, | |||
773 | radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); | 773 | radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); |
774 | radeon_ring_write(rdev, 0); | 774 | radeon_ring_write(rdev, 0); |
775 | radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); | 775 | radeon_ring_write(rdev, (0x1fff) | (0x1fff << 16)); |
776 | radeon_ring_write(rdev, cur_pages); | 776 | radeon_ring_write(rdev, num_gpu_pages); |
777 | radeon_ring_write(rdev, cur_pages); | 777 | radeon_ring_write(rdev, num_gpu_pages); |
778 | radeon_ring_write(rdev, cur_pages | (stride_pixels << 16)); | 778 | radeon_ring_write(rdev, cur_pages | (stride_pixels << 16)); |
779 | } | 779 | } |
780 | radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); | 780 | radeon_ring_write(rdev, PACKET0(RADEON_DSTCACHE_CTLSTAT, 0)); |
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c index c4b8741dbf58..bce63fd329d4 100644 --- a/drivers/gpu/drm/radeon/radeon_connectors.c +++ b/drivers/gpu/drm/radeon/radeon_connectors.c | |||
@@ -68,11 +68,11 @@ void radeon_connector_hotplug(struct drm_connector *connector) | |||
68 | if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { | 68 | if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { |
69 | int saved_dpms = connector->dpms; | 69 | int saved_dpms = connector->dpms; |
70 | 70 | ||
71 | if (radeon_hpd_sense(rdev, radeon_connector->hpd.hpd) && | 71 | /* Only turn off the display it it's physically disconnected */ |
72 | radeon_dp_needs_link_train(radeon_connector)) | 72 | if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) |
73 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); | ||
74 | else | ||
75 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); | 73 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF); |
74 | else if (radeon_dp_needs_link_train(radeon_connector)) | ||
75 | drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON); | ||
76 | connector->dpms = saved_dpms; | 76 | connector->dpms = saved_dpms; |
77 | } | 77 | } |
78 | } | 78 | } |
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c index 3189a7efb2e9..fde25c0d65a0 100644 --- a/drivers/gpu/drm/radeon/radeon_cursor.c +++ b/drivers/gpu/drm/radeon/radeon_cursor.c | |||
@@ -208,23 +208,25 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, | |||
208 | int xorigin = 0, yorigin = 0; | 208 | int xorigin = 0, yorigin = 0; |
209 | int w = radeon_crtc->cursor_width; | 209 | int w = radeon_crtc->cursor_width; |
210 | 210 | ||
211 | if (x < 0) | ||
212 | xorigin = -x + 1; | ||
213 | if (y < 0) | ||
214 | yorigin = -y + 1; | ||
215 | if (xorigin >= CURSOR_WIDTH) | ||
216 | xorigin = CURSOR_WIDTH - 1; | ||
217 | if (yorigin >= CURSOR_HEIGHT) | ||
218 | yorigin = CURSOR_HEIGHT - 1; | ||
219 | |||
220 | if (ASIC_IS_AVIVO(rdev)) { | 211 | if (ASIC_IS_AVIVO(rdev)) { |
221 | int i = 0; | ||
222 | struct drm_crtc *crtc_p; | ||
223 | |||
224 | /* avivo cursor are offset into the total surface */ | 212 | /* avivo cursor are offset into the total surface */ |
225 | x += crtc->x; | 213 | x += crtc->x; |
226 | y += crtc->y; | 214 | y += crtc->y; |
227 | DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); | 215 | } |
216 | DRM_DEBUG("x %d y %d c->x %d c->y %d\n", x, y, crtc->x, crtc->y); | ||
217 | |||
218 | if (x < 0) { | ||
219 | xorigin = min(-x, CURSOR_WIDTH - 1); | ||
220 | x = 0; | ||
221 | } | ||
222 | if (y < 0) { | ||
223 | yorigin = min(-y, CURSOR_HEIGHT - 1); | ||
224 | y = 0; | ||
225 | } | ||
226 | |||
227 | if (ASIC_IS_AVIVO(rdev)) { | ||
228 | int i = 0; | ||
229 | struct drm_crtc *crtc_p; | ||
228 | 230 | ||
229 | /* avivo cursor image can't end on 128 pixel boundary or | 231 | /* avivo cursor image can't end on 128 pixel boundary or |
230 | * go past the end of the frame if both crtcs are enabled | 232 | * go past the end of the frame if both crtcs are enabled |
@@ -253,16 +255,12 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, | |||
253 | 255 | ||
254 | radeon_lock_cursor(crtc, true); | 256 | radeon_lock_cursor(crtc, true); |
255 | if (ASIC_IS_DCE4(rdev)) { | 257 | if (ASIC_IS_DCE4(rdev)) { |
256 | WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, | 258 | WREG32(EVERGREEN_CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y); |
257 | ((xorigin ? 0 : x) << 16) | | ||
258 | (yorigin ? 0 : y)); | ||
259 | WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); | 259 | WREG32(EVERGREEN_CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); |
260 | WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset, | 260 | WREG32(EVERGREEN_CUR_SIZE + radeon_crtc->crtc_offset, |
261 | ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); | 261 | ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); |
262 | } else if (ASIC_IS_AVIVO(rdev)) { | 262 | } else if (ASIC_IS_AVIVO(rdev)) { |
263 | WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, | 263 | WREG32(AVIVO_D1CUR_POSITION + radeon_crtc->crtc_offset, (x << 16) | y); |
264 | ((xorigin ? 0 : x) << 16) | | ||
265 | (yorigin ? 0 : y)); | ||
266 | WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); | 264 | WREG32(AVIVO_D1CUR_HOT_SPOT + radeon_crtc->crtc_offset, (xorigin << 16) | yorigin); |
267 | WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset, | 265 | WREG32(AVIVO_D1CUR_SIZE + radeon_crtc->crtc_offset, |
268 | ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); | 266 | ((w - 1) << 16) | (radeon_crtc->cursor_height - 1)); |
@@ -276,8 +274,8 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc, | |||
276 | | yorigin)); | 274 | | yorigin)); |
277 | WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset, | 275 | WREG32(RADEON_CUR_HORZ_VERT_POSN + radeon_crtc->crtc_offset, |
278 | (RADEON_CUR_LOCK | 276 | (RADEON_CUR_LOCK |
279 | | ((xorigin ? 0 : x) << 16) | 277 | | (x << 16) |
280 | | (yorigin ? 0 : y))); | 278 | | y)); |
281 | /* offset is from DISP(2)_BASE_ADDRESS */ | 279 | /* offset is from DISP(2)_BASE_ADDRESS */ |
282 | WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset + | 280 | WREG32(RADEON_CUR_OFFSET + radeon_crtc->crtc_offset, (radeon_crtc->legacy_cursor_offset + |
283 | (yorigin * 256))); | 281 | (yorigin * 256))); |
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c index 319d85d7e759..13690f3eb4a4 100644 --- a/drivers/gpu/drm/radeon/radeon_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_encoders.c | |||
@@ -1507,7 +1507,14 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode) | |||
1507 | switch (mode) { | 1507 | switch (mode) { |
1508 | case DRM_MODE_DPMS_ON: | 1508 | case DRM_MODE_DPMS_ON: |
1509 | args.ucAction = ATOM_ENABLE; | 1509 | args.ucAction = ATOM_ENABLE; |
1510 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 1510 | /* workaround for DVOOutputControl on some RS690 systems */ |
1511 | if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_DDI) { | ||
1512 | u32 reg = RREG32(RADEON_BIOS_3_SCRATCH); | ||
1513 | WREG32(RADEON_BIOS_3_SCRATCH, reg & ~ATOM_S3_DFP2I_ACTIVE); | ||
1514 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1515 | WREG32(RADEON_BIOS_3_SCRATCH, reg); | ||
1516 | } else | ||
1517 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | ||
1511 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | 1518 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
1512 | args.ucAction = ATOM_LCD_BLON; | 1519 | args.ucAction = ATOM_LCD_BLON; |
1513 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); | 1520 | atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); |
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 4720d000d440..b13c2eedc321 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c | |||
@@ -536,55 +536,6 @@ static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev, | |||
536 | return backend_map; | 536 | return backend_map; |
537 | } | 537 | } |
538 | 538 | ||
539 | static void rv770_program_channel_remap(struct radeon_device *rdev) | ||
540 | { | ||
541 | u32 tcp_chan_steer, mc_shared_chremap, tmp; | ||
542 | bool force_no_swizzle; | ||
543 | |||
544 | switch (rdev->family) { | ||
545 | case CHIP_RV770: | ||
546 | case CHIP_RV730: | ||
547 | force_no_swizzle = false; | ||
548 | break; | ||
549 | case CHIP_RV710: | ||
550 | case CHIP_RV740: | ||
551 | default: | ||
552 | force_no_swizzle = true; | ||
553 | break; | ||
554 | } | ||
555 | |||
556 | tmp = RREG32(MC_SHARED_CHMAP); | ||
557 | switch ((tmp & NOOFCHAN_MASK) >> NOOFCHAN_SHIFT) { | ||
558 | case 0: | ||
559 | case 1: | ||
560 | default: | ||
561 | /* default mapping */ | ||
562 | mc_shared_chremap = 0x00fac688; | ||
563 | break; | ||
564 | case 2: | ||
565 | case 3: | ||
566 | if (force_no_swizzle) | ||
567 | mc_shared_chremap = 0x00fac688; | ||
568 | else | ||
569 | mc_shared_chremap = 0x00bbc298; | ||
570 | break; | ||
571 | } | ||
572 | |||
573 | if (rdev->family == CHIP_RV740) | ||
574 | tcp_chan_steer = 0x00ef2a60; | ||
575 | else | ||
576 | tcp_chan_steer = 0x00fac688; | ||
577 | |||
578 | /* RV770 CE has special chremap setup */ | ||
579 | if (rdev->pdev->device == 0x944e) { | ||
580 | tcp_chan_steer = 0x00b08b08; | ||
581 | mc_shared_chremap = 0x00b08b08; | ||
582 | } | ||
583 | |||
584 | WREG32(TCP_CHAN_STEER, tcp_chan_steer); | ||
585 | WREG32(MC_SHARED_CHREMAP, mc_shared_chremap); | ||
586 | } | ||
587 | |||
588 | static void rv770_gpu_init(struct radeon_device *rdev) | 539 | static void rv770_gpu_init(struct radeon_device *rdev) |
589 | { | 540 | { |
590 | int i, j, num_qd_pipes; | 541 | int i, j, num_qd_pipes; |
@@ -785,8 +736,6 @@ static void rv770_gpu_init(struct radeon_device *rdev) | |||
785 | WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); | 736 | WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); |
786 | WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); | 737 | WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); |
787 | 738 | ||
788 | rv770_program_channel_remap(rdev); | ||
789 | |||
790 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); | 739 | WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); |
791 | WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); | 740 | WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); |
792 | WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); | 741 | WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); |
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index 411257676133..932383786642 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c | |||
@@ -36,17 +36,25 @@ | |||
36 | #include <linux/cpu.h> | 36 | #include <linux/cpu.h> |
37 | #include <linux/pci.h> | 37 | #include <linux/pci.h> |
38 | #include <linux/smp.h> | 38 | #include <linux/smp.h> |
39 | #include <linux/moduleparam.h> | ||
39 | #include <asm/msr.h> | 40 | #include <asm/msr.h> |
40 | #include <asm/processor.h> | 41 | #include <asm/processor.h> |
41 | 42 | ||
42 | #define DRVNAME "coretemp" | 43 | #define DRVNAME "coretemp" |
43 | 44 | ||
45 | /* | ||
46 | * force_tjmax only matters when TjMax can't be read from the CPU itself. | ||
47 | * When set, it replaces the driver's suboptimal heuristic. | ||
48 | */ | ||
49 | static int force_tjmax; | ||
50 | module_param_named(tjmax, force_tjmax, int, 0444); | ||
51 | MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius"); | ||
52 | |||
44 | #define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */ | 53 | #define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */ |
45 | #define NUM_REAL_CORES 16 /* Number of Real cores per cpu */ | 54 | #define NUM_REAL_CORES 16 /* Number of Real cores per cpu */ |
46 | #define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */ | 55 | #define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */ |
47 | #define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */ | 56 | #define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */ |
48 | #define MAX_THRESH_ATTRS 3 /* Maximum no of Threshold attrs */ | 57 | #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1) |
49 | #define TOTAL_ATTRS (MAX_CORE_ATTRS + MAX_THRESH_ATTRS) | ||
50 | #define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO) | 58 | #define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO) |
51 | 59 | ||
52 | #ifdef CONFIG_SMP | 60 | #ifdef CONFIG_SMP |
@@ -69,8 +77,6 @@ | |||
69 | * This value is passed as "id" field to rdmsr/wrmsr functions. | 77 | * This value is passed as "id" field to rdmsr/wrmsr functions. |
70 | * @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS, | 78 | * @status_reg: One of IA32_THERM_STATUS or IA32_PACKAGE_THERM_STATUS, |
71 | * from where the temperature values should be read. | 79 | * from where the temperature values should be read. |
72 | * @intrpt_reg: One of IA32_THERM_INTERRUPT or IA32_PACKAGE_THERM_INTERRUPT, | ||
73 | * from where the thresholds are read. | ||
74 | * @attr_size: Total number of pre-core attrs displayed in the sysfs. | 80 | * @attr_size: Total number of pre-core attrs displayed in the sysfs. |
75 | * @is_pkg_data: If this is 1, the temp_data holds pkgtemp data. | 81 | * @is_pkg_data: If this is 1, the temp_data holds pkgtemp data. |
76 | * Otherwise, temp_data holds coretemp data. | 82 | * Otherwise, temp_data holds coretemp data. |
@@ -79,13 +85,11 @@ | |||
79 | struct temp_data { | 85 | struct temp_data { |
80 | int temp; | 86 | int temp; |
81 | int ttarget; | 87 | int ttarget; |
82 | int tmin; | ||
83 | int tjmax; | 88 | int tjmax; |
84 | unsigned long last_updated; | 89 | unsigned long last_updated; |
85 | unsigned int cpu; | 90 | unsigned int cpu; |
86 | u32 cpu_core_id; | 91 | u32 cpu_core_id; |
87 | u32 status_reg; | 92 | u32 status_reg; |
88 | u32 intrpt_reg; | ||
89 | int attr_size; | 93 | int attr_size; |
90 | bool is_pkg_data; | 94 | bool is_pkg_data; |
91 | bool valid; | 95 | bool valid; |
@@ -143,19 +147,6 @@ static ssize_t show_crit_alarm(struct device *dev, | |||
143 | return sprintf(buf, "%d\n", (eax >> 5) & 1); | 147 | return sprintf(buf, "%d\n", (eax >> 5) & 1); |
144 | } | 148 | } |
145 | 149 | ||
146 | static ssize_t show_max_alarm(struct device *dev, | ||
147 | struct device_attribute *devattr, char *buf) | ||
148 | { | ||
149 | u32 eax, edx; | ||
150 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); | ||
151 | struct platform_data *pdata = dev_get_drvdata(dev); | ||
152 | struct temp_data *tdata = pdata->core_data[attr->index]; | ||
153 | |||
154 | rdmsr_on_cpu(tdata->cpu, tdata->status_reg, &eax, &edx); | ||
155 | |||
156 | return sprintf(buf, "%d\n", !!(eax & THERM_STATUS_THRESHOLD1)); | ||
157 | } | ||
158 | |||
159 | static ssize_t show_tjmax(struct device *dev, | 150 | static ssize_t show_tjmax(struct device *dev, |
160 | struct device_attribute *devattr, char *buf) | 151 | struct device_attribute *devattr, char *buf) |
161 | { | 152 | { |
@@ -174,83 +165,6 @@ static ssize_t show_ttarget(struct device *dev, | |||
174 | return sprintf(buf, "%d\n", pdata->core_data[attr->index]->ttarget); | 165 | return sprintf(buf, "%d\n", pdata->core_data[attr->index]->ttarget); |
175 | } | 166 | } |
176 | 167 | ||
177 | static ssize_t store_ttarget(struct device *dev, | ||
178 | struct device_attribute *devattr, | ||
179 | const char *buf, size_t count) | ||
180 | { | ||
181 | struct platform_data *pdata = dev_get_drvdata(dev); | ||
182 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); | ||
183 | struct temp_data *tdata = pdata->core_data[attr->index]; | ||
184 | u32 eax, edx; | ||
185 | unsigned long val; | ||
186 | int diff; | ||
187 | |||
188 | if (strict_strtoul(buf, 10, &val)) | ||
189 | return -EINVAL; | ||
190 | |||
191 | /* | ||
192 | * THERM_MASK_THRESHOLD1 is 7 bits wide. Values are entered in terms | ||
193 | * of milli degree celsius. Hence don't accept val > (127 * 1000) | ||
194 | */ | ||
195 | if (val > tdata->tjmax || val > 127000) | ||
196 | return -EINVAL; | ||
197 | |||
198 | diff = (tdata->tjmax - val) / 1000; | ||
199 | |||
200 | mutex_lock(&tdata->update_lock); | ||
201 | rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx); | ||
202 | eax = (eax & ~THERM_MASK_THRESHOLD1) | | ||
203 | (diff << THERM_SHIFT_THRESHOLD1); | ||
204 | wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx); | ||
205 | tdata->ttarget = val; | ||
206 | mutex_unlock(&tdata->update_lock); | ||
207 | |||
208 | return count; | ||
209 | } | ||
210 | |||
211 | static ssize_t show_tmin(struct device *dev, | ||
212 | struct device_attribute *devattr, char *buf) | ||
213 | { | ||
214 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); | ||
215 | struct platform_data *pdata = dev_get_drvdata(dev); | ||
216 | |||
217 | return sprintf(buf, "%d\n", pdata->core_data[attr->index]->tmin); | ||
218 | } | ||
219 | |||
220 | static ssize_t store_tmin(struct device *dev, | ||
221 | struct device_attribute *devattr, | ||
222 | const char *buf, size_t count) | ||
223 | { | ||
224 | struct platform_data *pdata = dev_get_drvdata(dev); | ||
225 | struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); | ||
226 | struct temp_data *tdata = pdata->core_data[attr->index]; | ||
227 | u32 eax, edx; | ||
228 | unsigned long val; | ||
229 | int diff; | ||
230 | |||
231 | if (strict_strtoul(buf, 10, &val)) | ||
232 | return -EINVAL; | ||
233 | |||
234 | /* | ||
235 | * THERM_MASK_THRESHOLD0 is 7 bits wide. Values are entered in terms | ||
236 | * of milli degree celsius. Hence don't accept val > (127 * 1000) | ||
237 | */ | ||
238 | if (val > tdata->tjmax || val > 127000) | ||
239 | return -EINVAL; | ||
240 | |||
241 | diff = (tdata->tjmax - val) / 1000; | ||
242 | |||
243 | mutex_lock(&tdata->update_lock); | ||
244 | rdmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, &eax, &edx); | ||
245 | eax = (eax & ~THERM_MASK_THRESHOLD0) | | ||
246 | (diff << THERM_SHIFT_THRESHOLD0); | ||
247 | wrmsr_on_cpu(tdata->cpu, tdata->intrpt_reg, eax, edx); | ||
248 | tdata->tmin = val; | ||
249 | mutex_unlock(&tdata->update_lock); | ||
250 | |||
251 | return count; | ||
252 | } | ||
253 | |||
254 | static ssize_t show_temp(struct device *dev, | 168 | static ssize_t show_temp(struct device *dev, |
255 | struct device_attribute *devattr, char *buf) | 169 | struct device_attribute *devattr, char *buf) |
256 | { | 170 | { |
@@ -374,7 +288,6 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) | |||
374 | 288 | ||
375 | static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) | 289 | static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) |
376 | { | 290 | { |
377 | /* The 100C is default for both mobile and non mobile CPUs */ | ||
378 | int err; | 291 | int err; |
379 | u32 eax, edx; | 292 | u32 eax, edx; |
380 | u32 val; | 293 | u32 val; |
@@ -385,7 +298,8 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) | |||
385 | */ | 298 | */ |
386 | err = rdmsr_safe_on_cpu(id, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx); | 299 | err = rdmsr_safe_on_cpu(id, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx); |
387 | if (err) { | 300 | if (err) { |
388 | dev_warn(dev, "Unable to read TjMax from CPU.\n"); | 301 | if (c->x86_model > 0xe && c->x86_model != 0x1c) |
302 | dev_warn(dev, "Unable to read TjMax from CPU %u\n", id); | ||
389 | } else { | 303 | } else { |
390 | val = (eax >> 16) & 0xff; | 304 | val = (eax >> 16) & 0xff; |
391 | /* | 305 | /* |
@@ -393,11 +307,17 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev) | |||
393 | * will be used | 307 | * will be used |
394 | */ | 308 | */ |
395 | if (val) { | 309 | if (val) { |
396 | dev_info(dev, "TjMax is %d C.\n", val); | 310 | dev_dbg(dev, "TjMax is %d degrees C\n", val); |
397 | return val * 1000; | 311 | return val * 1000; |
398 | } | 312 | } |
399 | } | 313 | } |
400 | 314 | ||
315 | if (force_tjmax) { | ||
316 | dev_notice(dev, "TjMax forced to %d degrees C by user\n", | ||
317 | force_tjmax); | ||
318 | return force_tjmax * 1000; | ||
319 | } | ||
320 | |||
401 | /* | 321 | /* |
402 | * An assumption is made for early CPUs and unreadable MSR. | 322 | * An assumption is made for early CPUs and unreadable MSR. |
403 | * NOTE: the calculated value may not be correct. | 323 | * NOTE: the calculated value may not be correct. |
@@ -414,21 +334,6 @@ static void __devinit get_ucode_rev_on_cpu(void *edx) | |||
414 | rdmsr(MSR_IA32_UCODE_REV, eax, *(u32 *)edx); | 334 | rdmsr(MSR_IA32_UCODE_REV, eax, *(u32 *)edx); |
415 | } | 335 | } |
416 | 336 | ||
417 | static int get_pkg_tjmax(unsigned int cpu, struct device *dev) | ||
418 | { | ||
419 | int err; | ||
420 | u32 eax, edx, val; | ||
421 | |||
422 | err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, &eax, &edx); | ||
423 | if (!err) { | ||
424 | val = (eax >> 16) & 0xff; | ||
425 | if (val) | ||
426 | return val * 1000; | ||
427 | } | ||
428 | dev_warn(dev, "Unable to read Pkg-TjMax from CPU:%u\n", cpu); | ||
429 | return 100000; /* Default TjMax: 100 degree celsius */ | ||
430 | } | ||
431 | |||
432 | static int create_name_attr(struct platform_data *pdata, struct device *dev) | 337 | static int create_name_attr(struct platform_data *pdata, struct device *dev) |
433 | { | 338 | { |
434 | sysfs_attr_init(&pdata->name_attr.attr); | 339 | sysfs_attr_init(&pdata->name_attr.attr); |
@@ -442,19 +347,14 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev, | |||
442 | int attr_no) | 347 | int attr_no) |
443 | { | 348 | { |
444 | int err, i; | 349 | int err, i; |
445 | static ssize_t (*rd_ptr[TOTAL_ATTRS]) (struct device *dev, | 350 | static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev, |
446 | struct device_attribute *devattr, char *buf) = { | 351 | struct device_attribute *devattr, char *buf) = { |
447 | show_label, show_crit_alarm, show_temp, show_tjmax, | 352 | show_label, show_crit_alarm, show_temp, show_tjmax, |
448 | show_max_alarm, show_ttarget, show_tmin }; | 353 | show_ttarget }; |
449 | static ssize_t (*rw_ptr[TOTAL_ATTRS]) (struct device *dev, | 354 | static const char *const names[TOTAL_ATTRS] = { |
450 | struct device_attribute *devattr, const char *buf, | ||
451 | size_t count) = { NULL, NULL, NULL, NULL, NULL, | ||
452 | store_ttarget, store_tmin }; | ||
453 | static const char *names[TOTAL_ATTRS] = { | ||
454 | "temp%d_label", "temp%d_crit_alarm", | 355 | "temp%d_label", "temp%d_crit_alarm", |
455 | "temp%d_input", "temp%d_crit", | 356 | "temp%d_input", "temp%d_crit", |
456 | "temp%d_max_alarm", "temp%d_max", | 357 | "temp%d_max" }; |
457 | "temp%d_max_hyst" }; | ||
458 | 358 | ||
459 | for (i = 0; i < tdata->attr_size; i++) { | 359 | for (i = 0; i < tdata->attr_size; i++) { |
460 | snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i], | 360 | snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i], |
@@ -462,10 +362,6 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev, | |||
462 | sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr); | 362 | sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr); |
463 | tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i]; | 363 | tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i]; |
464 | tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO; | 364 | tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO; |
465 | if (rw_ptr[i]) { | ||
466 | tdata->sd_attrs[i].dev_attr.attr.mode |= S_IWUSR; | ||
467 | tdata->sd_attrs[i].dev_attr.store = rw_ptr[i]; | ||
468 | } | ||
469 | tdata->sd_attrs[i].dev_attr.show = rd_ptr[i]; | 365 | tdata->sd_attrs[i].dev_attr.show = rd_ptr[i]; |
470 | tdata->sd_attrs[i].index = attr_no; | 366 | tdata->sd_attrs[i].index = attr_no; |
471 | err = device_create_file(dev, &tdata->sd_attrs[i].dev_attr); | 367 | err = device_create_file(dev, &tdata->sd_attrs[i].dev_attr); |
@@ -481,9 +377,9 @@ exit_free: | |||
481 | } | 377 | } |
482 | 378 | ||
483 | 379 | ||
484 | static int __devinit chk_ucode_version(struct platform_device *pdev) | 380 | static int __cpuinit chk_ucode_version(unsigned int cpu) |
485 | { | 381 | { |
486 | struct cpuinfo_x86 *c = &cpu_data(pdev->id); | 382 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
487 | int err; | 383 | int err; |
488 | u32 edx; | 384 | u32 edx; |
489 | 385 | ||
@@ -494,17 +390,15 @@ static int __devinit chk_ucode_version(struct platform_device *pdev) | |||
494 | */ | 390 | */ |
495 | if (c->x86_model == 0xe && c->x86_mask < 0xc) { | 391 | if (c->x86_model == 0xe && c->x86_mask < 0xc) { |
496 | /* check for microcode update */ | 392 | /* check for microcode update */ |
497 | err = smp_call_function_single(pdev->id, get_ucode_rev_on_cpu, | 393 | err = smp_call_function_single(cpu, get_ucode_rev_on_cpu, |
498 | &edx, 1); | 394 | &edx, 1); |
499 | if (err) { | 395 | if (err) { |
500 | dev_err(&pdev->dev, | 396 | pr_err("Cannot determine microcode revision of " |
501 | "Cannot determine microcode revision of " | 397 | "CPU#%u (%d)!\n", cpu, err); |
502 | "CPU#%u (%d)!\n", pdev->id, err); | ||
503 | return -ENODEV; | 398 | return -ENODEV; |
504 | } else if (edx < 0x39) { | 399 | } else if (edx < 0x39) { |
505 | dev_err(&pdev->dev, | 400 | pr_err("Errata AE18 not fixed, update BIOS or " |
506 | "Errata AE18 not fixed, update BIOS or " | 401 | "microcode of the CPU!\n"); |
507 | "microcode of the CPU!\n"); | ||
508 | return -ENODEV; | 402 | return -ENODEV; |
509 | } | 403 | } |
510 | } | 404 | } |
@@ -538,8 +432,6 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag) | |||
538 | 432 | ||
539 | tdata->status_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_STATUS : | 433 | tdata->status_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_STATUS : |
540 | MSR_IA32_THERM_STATUS; | 434 | MSR_IA32_THERM_STATUS; |
541 | tdata->intrpt_reg = pkg_flag ? MSR_IA32_PACKAGE_THERM_INTERRUPT : | ||
542 | MSR_IA32_THERM_INTERRUPT; | ||
543 | tdata->is_pkg_data = pkg_flag; | 435 | tdata->is_pkg_data = pkg_flag; |
544 | tdata->cpu = cpu; | 436 | tdata->cpu = cpu; |
545 | tdata->cpu_core_id = TO_CORE_ID(cpu); | 437 | tdata->cpu_core_id = TO_CORE_ID(cpu); |
@@ -548,11 +440,11 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag) | |||
548 | return tdata; | 440 | return tdata; |
549 | } | 441 | } |
550 | 442 | ||
551 | static int create_core_data(struct platform_data *pdata, | 443 | static int create_core_data(struct platform_device *pdev, |
552 | struct platform_device *pdev, | ||
553 | unsigned int cpu, int pkg_flag) | 444 | unsigned int cpu, int pkg_flag) |
554 | { | 445 | { |
555 | struct temp_data *tdata; | 446 | struct temp_data *tdata; |
447 | struct platform_data *pdata = platform_get_drvdata(pdev); | ||
556 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 448 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
557 | u32 eax, edx; | 449 | u32 eax, edx; |
558 | int err, attr_no; | 450 | int err, attr_no; |
@@ -588,25 +480,21 @@ static int create_core_data(struct platform_data *pdata, | |||
588 | goto exit_free; | 480 | goto exit_free; |
589 | 481 | ||
590 | /* We can access status register. Get Critical Temperature */ | 482 | /* We can access status register. Get Critical Temperature */ |
591 | if (pkg_flag) | 483 | tdata->tjmax = get_tjmax(c, cpu, &pdev->dev); |
592 | tdata->tjmax = get_pkg_tjmax(pdev->id, &pdev->dev); | ||
593 | else | ||
594 | tdata->tjmax = get_tjmax(c, cpu, &pdev->dev); | ||
595 | 484 | ||
596 | /* | 485 | /* |
597 | * Test if we can access the intrpt register. If so, increase the | 486 | * Read the still undocumented bits 8:15 of IA32_TEMPERATURE_TARGET. |
598 | * 'size' enough to have ttarget/tmin/max_alarm interfaces. | 487 | * The target temperature is available on older CPUs but not in this |
599 | * Initialize ttarget with bits 16:22 of MSR_IA32_THERM_INTERRUPT | 488 | * register. Atoms don't have the register at all. |
600 | */ | 489 | */ |
601 | err = rdmsr_safe_on_cpu(cpu, tdata->intrpt_reg, &eax, &edx); | 490 | if (c->x86_model > 0xe && c->x86_model != 0x1c) { |
602 | if (!err) { | 491 | err = rdmsr_safe_on_cpu(cpu, MSR_IA32_TEMPERATURE_TARGET, |
603 | tdata->attr_size += MAX_THRESH_ATTRS; | 492 | &eax, &edx); |
604 | tdata->tmin = tdata->tjmax - | 493 | if (!err) { |
605 | ((eax & THERM_MASK_THRESHOLD0) >> | 494 | tdata->ttarget |
606 | THERM_SHIFT_THRESHOLD0) * 1000; | 495 | = tdata->tjmax - ((eax >> 8) & 0xff) * 1000; |
607 | tdata->ttarget = tdata->tjmax - | 496 | tdata->attr_size++; |
608 | ((eax & THERM_MASK_THRESHOLD1) >> | 497 | } |
609 | THERM_SHIFT_THRESHOLD1) * 1000; | ||
610 | } | 498 | } |
611 | 499 | ||
612 | pdata->core_data[attr_no] = tdata; | 500 | pdata->core_data[attr_no] = tdata; |
@@ -618,22 +506,20 @@ static int create_core_data(struct platform_data *pdata, | |||
618 | 506 | ||
619 | return 0; | 507 | return 0; |
620 | exit_free: | 508 | exit_free: |
509 | pdata->core_data[attr_no] = NULL; | ||
621 | kfree(tdata); | 510 | kfree(tdata); |
622 | return err; | 511 | return err; |
623 | } | 512 | } |
624 | 513 | ||
625 | static void coretemp_add_core(unsigned int cpu, int pkg_flag) | 514 | static void coretemp_add_core(unsigned int cpu, int pkg_flag) |
626 | { | 515 | { |
627 | struct platform_data *pdata; | ||
628 | struct platform_device *pdev = coretemp_get_pdev(cpu); | 516 | struct platform_device *pdev = coretemp_get_pdev(cpu); |
629 | int err; | 517 | int err; |
630 | 518 | ||
631 | if (!pdev) | 519 | if (!pdev) |
632 | return; | 520 | return; |
633 | 521 | ||
634 | pdata = platform_get_drvdata(pdev); | 522 | err = create_core_data(pdev, cpu, pkg_flag); |
635 | |||
636 | err = create_core_data(pdata, pdev, cpu, pkg_flag); | ||
637 | if (err) | 523 | if (err) |
638 | dev_err(&pdev->dev, "Adding Core %u failed\n", cpu); | 524 | dev_err(&pdev->dev, "Adding Core %u failed\n", cpu); |
639 | } | 525 | } |
@@ -657,11 +543,6 @@ static int __devinit coretemp_probe(struct platform_device *pdev) | |||
657 | struct platform_data *pdata; | 543 | struct platform_data *pdata; |
658 | int err; | 544 | int err; |
659 | 545 | ||
660 | /* Check the microcode version of the CPU */ | ||
661 | err = chk_ucode_version(pdev); | ||
662 | if (err) | ||
663 | return err; | ||
664 | |||
665 | /* Initialize the per-package data structures */ | 546 | /* Initialize the per-package data structures */ |
666 | pdata = kzalloc(sizeof(struct platform_data), GFP_KERNEL); | 547 | pdata = kzalloc(sizeof(struct platform_data), GFP_KERNEL); |
667 | if (!pdata) | 548 | if (!pdata) |
@@ -671,7 +552,7 @@ static int __devinit coretemp_probe(struct platform_device *pdev) | |||
671 | if (err) | 552 | if (err) |
672 | goto exit_free; | 553 | goto exit_free; |
673 | 554 | ||
674 | pdata->phys_proc_id = TO_PHYS_ID(pdev->id); | 555 | pdata->phys_proc_id = pdev->id; |
675 | platform_set_drvdata(pdev, pdata); | 556 | platform_set_drvdata(pdev, pdata); |
676 | 557 | ||
677 | pdata->hwmon_dev = hwmon_device_register(&pdev->dev); | 558 | pdata->hwmon_dev = hwmon_device_register(&pdev->dev); |
@@ -723,7 +604,7 @@ static int __cpuinit coretemp_device_add(unsigned int cpu) | |||
723 | 604 | ||
724 | mutex_lock(&pdev_list_mutex); | 605 | mutex_lock(&pdev_list_mutex); |
725 | 606 | ||
726 | pdev = platform_device_alloc(DRVNAME, cpu); | 607 | pdev = platform_device_alloc(DRVNAME, TO_PHYS_ID(cpu)); |
727 | if (!pdev) { | 608 | if (!pdev) { |
728 | err = -ENOMEM; | 609 | err = -ENOMEM; |
729 | pr_err("Device allocation failed\n"); | 610 | pr_err("Device allocation failed\n"); |
@@ -743,7 +624,7 @@ static int __cpuinit coretemp_device_add(unsigned int cpu) | |||
743 | } | 624 | } |
744 | 625 | ||
745 | pdev_entry->pdev = pdev; | 626 | pdev_entry->pdev = pdev; |
746 | pdev_entry->phys_proc_id = TO_PHYS_ID(cpu); | 627 | pdev_entry->phys_proc_id = pdev->id; |
747 | 628 | ||
748 | list_add_tail(&pdev_entry->list, &pdev_list); | 629 | list_add_tail(&pdev_entry->list, &pdev_list); |
749 | mutex_unlock(&pdev_list_mutex); | 630 | mutex_unlock(&pdev_list_mutex); |
@@ -804,6 +685,10 @@ static void __cpuinit get_core_online(unsigned int cpu) | |||
804 | return; | 685 | return; |
805 | 686 | ||
806 | if (!pdev) { | 687 | if (!pdev) { |
688 | /* Check the microcode version of the CPU */ | ||
689 | if (chk_ucode_version(cpu)) | ||
690 | return; | ||
691 | |||
807 | /* | 692 | /* |
808 | * Alright, we have DTS support. | 693 | * Alright, we have DTS support. |
809 | * We are bringing the _first_ core in this pkg | 694 | * We are bringing the _first_ core in this pkg |
diff --git a/drivers/hwmon/ds620.c b/drivers/hwmon/ds620.c index 257957c69d92..4f7c3fc40a89 100644 --- a/drivers/hwmon/ds620.c +++ b/drivers/hwmon/ds620.c | |||
@@ -72,7 +72,7 @@ struct ds620_data { | |||
72 | char valid; /* !=0 if following fields are valid */ | 72 | char valid; /* !=0 if following fields are valid */ |
73 | unsigned long last_updated; /* In jiffies */ | 73 | unsigned long last_updated; /* In jiffies */ |
74 | 74 | ||
75 | u16 temp[3]; /* Register values, word */ | 75 | s16 temp[3]; /* Register values, word */ |
76 | }; | 76 | }; |
77 | 77 | ||
78 | /* | 78 | /* |
diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c index 17cf1ab95521..8c2844e5691c 100644 --- a/drivers/hwmon/w83791d.c +++ b/drivers/hwmon/w83791d.c | |||
@@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_client *client, | |||
329 | struct i2c_board_info *info); | 329 | struct i2c_board_info *info); |
330 | static int w83791d_remove(struct i2c_client *client); | 330 | static int w83791d_remove(struct i2c_client *client); |
331 | 331 | ||
332 | static int w83791d_read(struct i2c_client *client, u8 register); | 332 | static int w83791d_read(struct i2c_client *client, u8 reg); |
333 | static int w83791d_write(struct i2c_client *client, u8 register, u8 value); | 333 | static int w83791d_write(struct i2c_client *client, u8 reg, u8 value); |
334 | static struct w83791d_data *w83791d_update_device(struct device *dev); | 334 | static struct w83791d_data *w83791d_update_device(struct device *dev); |
335 | 335 | ||
336 | #ifdef DEBUG | 336 | #ifdef DEBUG |
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 274798068a54..16f69be820c7 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c | |||
@@ -435,7 +435,12 @@ static int idedisk_prep_fn(struct request_queue *q, struct request *rq) | |||
435 | if (!(rq->cmd_flags & REQ_FLUSH)) | 435 | if (!(rq->cmd_flags & REQ_FLUSH)) |
436 | return BLKPREP_OK; | 436 | return BLKPREP_OK; |
437 | 437 | ||
438 | cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); | 438 | if (rq->special) { |
439 | cmd = rq->special; | ||
440 | memset(cmd, 0, sizeof(*cmd)); | ||
441 | } else { | ||
442 | cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); | ||
443 | } | ||
439 | 444 | ||
440 | /* FIXME: map struct ide_taskfile on rq->cmd[] */ | 445 | /* FIXME: map struct ide_taskfile on rq->cmd[] */ |
441 | BUG_ON(cmd == NULL); | 446 | BUG_ON(cmd == NULL); |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index 17bf9d95463c..6cd642aaa4de 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c | |||
@@ -287,7 +287,7 @@ void __free_ep(struct kref *kref) | |||
287 | if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { | 287 | if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { |
288 | cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid); | 288 | cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid); |
289 | dst_release(ep->dst); | 289 | dst_release(ep->dst); |
290 | l2t_release(L2DATA(ep->com.tdev), ep->l2t); | 290 | l2t_release(ep->com.tdev, ep->l2t); |
291 | } | 291 | } |
292 | kfree(ep); | 292 | kfree(ep); |
293 | } | 293 | } |
@@ -1178,7 +1178,7 @@ static int act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |||
1178 | release_tid(ep->com.tdev, GET_TID(rpl), NULL); | 1178 | release_tid(ep->com.tdev, GET_TID(rpl), NULL); |
1179 | cxgb3_free_atid(ep->com.tdev, ep->atid); | 1179 | cxgb3_free_atid(ep->com.tdev, ep->atid); |
1180 | dst_release(ep->dst); | 1180 | dst_release(ep->dst); |
1181 | l2t_release(L2DATA(ep->com.tdev), ep->l2t); | 1181 | l2t_release(ep->com.tdev, ep->l2t); |
1182 | put_ep(&ep->com); | 1182 | put_ep(&ep->com); |
1183 | return CPL_RET_BUF_DONE; | 1183 | return CPL_RET_BUF_DONE; |
1184 | } | 1184 | } |
@@ -1377,7 +1377,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |||
1377 | if (!child_ep) { | 1377 | if (!child_ep) { |
1378 | printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", | 1378 | printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", |
1379 | __func__); | 1379 | __func__); |
1380 | l2t_release(L2DATA(tdev), l2t); | 1380 | l2t_release(tdev, l2t); |
1381 | dst_release(dst); | 1381 | dst_release(dst); |
1382 | goto reject; | 1382 | goto reject; |
1383 | } | 1383 | } |
@@ -1956,7 +1956,7 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) | |||
1956 | if (!err) | 1956 | if (!err) |
1957 | goto out; | 1957 | goto out; |
1958 | 1958 | ||
1959 | l2t_release(L2DATA(h->rdev.t3cdev_p), ep->l2t); | 1959 | l2t_release(h->rdev.t3cdev_p, ep->l2t); |
1960 | fail4: | 1960 | fail4: |
1961 | dst_release(ep->dst); | 1961 | dst_release(ep->dst); |
1962 | fail3: | 1962 | fail3: |
@@ -2127,7 +2127,7 @@ int iwch_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new, | |||
2127 | PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new, | 2127 | PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new, |
2128 | l2t); | 2128 | l2t); |
2129 | dst_hold(new); | 2129 | dst_hold(new); |
2130 | l2t_release(L2DATA(ep->com.tdev), ep->l2t); | 2130 | l2t_release(ep->com.tdev, ep->l2t); |
2131 | ep->l2t = l2t; | 2131 | ep->l2t = l2t; |
2132 | dst_release(old); | 2132 | dst_release(old); |
2133 | ep->dst = new; | 2133 | ep->dst = new; |
diff --git a/drivers/media/video/omap/omap_vout.c b/drivers/media/video/omap/omap_vout.c index b5ef36222440..b3a5ecdb33ac 100644 --- a/drivers/media/video/omap/omap_vout.c +++ b/drivers/media/video/omap/omap_vout.c | |||
@@ -2194,19 +2194,6 @@ static int __init omap_vout_probe(struct platform_device *pdev) | |||
2194 | "'%s' Display already enabled\n", | 2194 | "'%s' Display already enabled\n", |
2195 | def_display->name); | 2195 | def_display->name); |
2196 | } | 2196 | } |
2197 | /* set the update mode */ | ||
2198 | if (def_display->caps & | ||
2199 | OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE) { | ||
2200 | if (dssdrv->enable_te) | ||
2201 | dssdrv->enable_te(def_display, 0); | ||
2202 | if (dssdrv->set_update_mode) | ||
2203 | dssdrv->set_update_mode(def_display, | ||
2204 | OMAP_DSS_UPDATE_MANUAL); | ||
2205 | } else { | ||
2206 | if (dssdrv->set_update_mode) | ||
2207 | dssdrv->set_update_mode(def_display, | ||
2208 | OMAP_DSS_UPDATE_AUTO); | ||
2209 | } | ||
2210 | } | 2197 | } |
2211 | } | 2198 | } |
2212 | 2199 | ||
diff --git a/drivers/media/video/omap3isp/ispccdc.c b/drivers/media/video/omap3isp/ispccdc.c index 9d3459de04b2..80796eb0c53e 100644 --- a/drivers/media/video/omap3isp/ispccdc.c +++ b/drivers/media/video/omap3isp/ispccdc.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/dma-mapping.h> | 31 | #include <linux/dma-mapping.h> |
32 | #include <linux/mm.h> | 32 | #include <linux/mm.h> |
33 | #include <linux/sched.h> | 33 | #include <linux/sched.h> |
34 | #include <linux/slab.h> | ||
34 | #include <media/v4l2-event.h> | 35 | #include <media/v4l2-event.h> |
35 | 36 | ||
36 | #include "isp.h" | 37 | #include "isp.h" |
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c index d29f9c2d0854..e4100b1f68df 100644 --- a/drivers/media/video/uvc/uvc_driver.c +++ b/drivers/media/video/uvc/uvc_driver.c | |||
@@ -1961,7 +1961,7 @@ static int __uvc_resume(struct usb_interface *intf, int reset) | |||
1961 | 1961 | ||
1962 | list_for_each_entry(stream, &dev->streams, list) { | 1962 | list_for_each_entry(stream, &dev->streams, list) { |
1963 | if (stream->intf == intf) | 1963 | if (stream->intf == intf) |
1964 | return uvc_video_resume(stream); | 1964 | return uvc_video_resume(stream, reset); |
1965 | } | 1965 | } |
1966 | 1966 | ||
1967 | uvc_trace(UVC_TRACE_SUSPEND, "Resume: video streaming USB interface " | 1967 | uvc_trace(UVC_TRACE_SUSPEND, "Resume: video streaming USB interface " |
diff --git a/drivers/media/video/uvc/uvc_entity.c b/drivers/media/video/uvc/uvc_entity.c index 48fea373c25a..29e239911d0e 100644 --- a/drivers/media/video/uvc/uvc_entity.c +++ b/drivers/media/video/uvc/uvc_entity.c | |||
@@ -49,7 +49,7 @@ static int uvc_mc_register_entity(struct uvc_video_chain *chain, | |||
49 | if (remote == NULL) | 49 | if (remote == NULL) |
50 | return -EINVAL; | 50 | return -EINVAL; |
51 | 51 | ||
52 | source = (UVC_ENTITY_TYPE(remote) != UVC_TT_STREAMING) | 52 | source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING) |
53 | ? (remote->vdev ? &remote->vdev->entity : NULL) | 53 | ? (remote->vdev ? &remote->vdev->entity : NULL) |
54 | : &remote->subdev.entity; | 54 | : &remote->subdev.entity; |
55 | if (source == NULL) | 55 | if (source == NULL) |
diff --git a/drivers/media/video/uvc/uvc_video.c b/drivers/media/video/uvc/uvc_video.c index 8244167c8915..ffd1158628b6 100644 --- a/drivers/media/video/uvc/uvc_video.c +++ b/drivers/media/video/uvc/uvc_video.c | |||
@@ -1104,10 +1104,18 @@ int uvc_video_suspend(struct uvc_streaming *stream) | |||
1104 | * buffers, making sure userspace applications are notified of the problem | 1104 | * buffers, making sure userspace applications are notified of the problem |
1105 | * instead of waiting forever. | 1105 | * instead of waiting forever. |
1106 | */ | 1106 | */ |
1107 | int uvc_video_resume(struct uvc_streaming *stream) | 1107 | int uvc_video_resume(struct uvc_streaming *stream, int reset) |
1108 | { | 1108 | { |
1109 | int ret; | 1109 | int ret; |
1110 | 1110 | ||
1111 | /* If the bus has been reset on resume, set the alternate setting to 0. | ||
1112 | * This should be the default value, but some devices crash or otherwise | ||
1113 | * misbehave if they don't receive a SET_INTERFACE request before any | ||
1114 | * other video control request. | ||
1115 | */ | ||
1116 | if (reset) | ||
1117 | usb_set_interface(stream->dev->udev, stream->intfnum, 0); | ||
1118 | |||
1111 | stream->frozen = 0; | 1119 | stream->frozen = 0; |
1112 | 1120 | ||
1113 | ret = uvc_commit_video(stream, &stream->ctrl); | 1121 | ret = uvc_commit_video(stream, &stream->ctrl); |
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h index df32a43ca86a..cbdd49bf8b67 100644 --- a/drivers/media/video/uvc/uvcvideo.h +++ b/drivers/media/video/uvc/uvcvideo.h | |||
@@ -638,7 +638,7 @@ extern void uvc_mc_cleanup_entity(struct uvc_entity *entity); | |||
638 | /* Video */ | 638 | /* Video */ |
639 | extern int uvc_video_init(struct uvc_streaming *stream); | 639 | extern int uvc_video_init(struct uvc_streaming *stream); |
640 | extern int uvc_video_suspend(struct uvc_streaming *stream); | 640 | extern int uvc_video_suspend(struct uvc_streaming *stream); |
641 | extern int uvc_video_resume(struct uvc_streaming *stream); | 641 | extern int uvc_video_resume(struct uvc_streaming *stream, int reset); |
642 | extern int uvc_video_enable(struct uvc_streaming *stream, int enable); | 642 | extern int uvc_video_enable(struct uvc_streaming *stream, int enable); |
643 | extern int uvc_probe_video(struct uvc_streaming *stream, | 643 | extern int uvc_probe_video(struct uvc_streaming *stream, |
644 | struct uvc_streaming_control *probe); | 644 | struct uvc_streaming_control *probe); |
diff --git a/drivers/media/video/v4l2-dev.c b/drivers/media/video/v4l2-dev.c index 06f14008b346..d72156517726 100644 --- a/drivers/media/video/v4l2-dev.c +++ b/drivers/media/video/v4l2-dev.c | |||
@@ -173,6 +173,17 @@ static void v4l2_device_release(struct device *cd) | |||
173 | media_device_unregister_entity(&vdev->entity); | 173 | media_device_unregister_entity(&vdev->entity); |
174 | #endif | 174 | #endif |
175 | 175 | ||
176 | /* Do not call v4l2_device_put if there is no release callback set. | ||
177 | * Drivers that have no v4l2_device release callback might free the | ||
178 | * v4l2_dev instance in the video_device release callback below, so we | ||
179 | * must perform this check here. | ||
180 | * | ||
181 | * TODO: In the long run all drivers that use v4l2_device should use the | ||
182 | * v4l2_device release callback. This check will then be unnecessary. | ||
183 | */ | ||
184 | if (v4l2_dev->release == NULL) | ||
185 | v4l2_dev = NULL; | ||
186 | |||
176 | /* Release video_device and perform other | 187 | /* Release video_device and perform other |
177 | cleanups as needed. */ | 188 | cleanups as needed. */ |
178 | vdev->release(vdev); | 189 | vdev->release(vdev); |
diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c index c72856c41434..e6a2c3b302d4 100644 --- a/drivers/media/video/v4l2-device.c +++ b/drivers/media/video/v4l2-device.c | |||
@@ -38,6 +38,7 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev) | |||
38 | mutex_init(&v4l2_dev->ioctl_lock); | 38 | mutex_init(&v4l2_dev->ioctl_lock); |
39 | v4l2_prio_init(&v4l2_dev->prio); | 39 | v4l2_prio_init(&v4l2_dev->prio); |
40 | kref_init(&v4l2_dev->ref); | 40 | kref_init(&v4l2_dev->ref); |
41 | get_device(dev); | ||
41 | v4l2_dev->dev = dev; | 42 | v4l2_dev->dev = dev; |
42 | if (dev == NULL) { | 43 | if (dev == NULL) { |
43 | /* If dev == NULL, then name must be filled in by the caller */ | 44 | /* If dev == NULL, then name must be filled in by the caller */ |
@@ -93,6 +94,7 @@ void v4l2_device_disconnect(struct v4l2_device *v4l2_dev) | |||
93 | 94 | ||
94 | if (dev_get_drvdata(v4l2_dev->dev) == v4l2_dev) | 95 | if (dev_get_drvdata(v4l2_dev->dev) == v4l2_dev) |
95 | dev_set_drvdata(v4l2_dev->dev, NULL); | 96 | dev_set_drvdata(v4l2_dev->dev, NULL); |
97 | put_device(v4l2_dev->dev); | ||
96 | v4l2_dev->dev = NULL; | 98 | v4l2_dev->dev = NULL; |
97 | } | 99 | } |
98 | EXPORT_SYMBOL_GPL(v4l2_device_disconnect); | 100 | EXPORT_SYMBOL_GPL(v4l2_device_disconnect); |
diff --git a/drivers/mfd/jz4740-adc.c b/drivers/mfd/jz4740-adc.c index 21131c7b0f1e..563654c9b19e 100644 --- a/drivers/mfd/jz4740-adc.c +++ b/drivers/mfd/jz4740-adc.c | |||
@@ -273,7 +273,7 @@ static int __devinit jz4740_adc_probe(struct platform_device *pdev) | |||
273 | ct->regs.ack = JZ_REG_ADC_STATUS; | 273 | ct->regs.ack = JZ_REG_ADC_STATUS; |
274 | ct->chip.irq_mask = irq_gc_mask_set_bit; | 274 | ct->chip.irq_mask = irq_gc_mask_set_bit; |
275 | ct->chip.irq_unmask = irq_gc_mask_clr_bit; | 275 | ct->chip.irq_unmask = irq_gc_mask_clr_bit; |
276 | ct->chip.irq_ack = irq_gc_ack; | 276 | ct->chip.irq_ack = irq_gc_ack_set_bit; |
277 | 277 | ||
278 | irq_setup_generic_chip(gc, IRQ_MSK(5), 0, 0, IRQ_NOPROBE | IRQ_LEVEL); | 278 | irq_setup_generic_chip(gc, IRQ_MSK(5), 0, 0, IRQ_NOPROBE | IRQ_LEVEL); |
279 | 279 | ||
diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c index b928bc14e97b..8b51cd62d067 100644 --- a/drivers/misc/lis3lv02d/lis3lv02d.c +++ b/drivers/misc/lis3lv02d/lis3lv02d.c | |||
@@ -375,12 +375,14 @@ void lis3lv02d_poweron(struct lis3lv02d *lis3) | |||
375 | * both have been read. So the value read will always be correct. | 375 | * both have been read. So the value read will always be correct. |
376 | * Set BOOT bit to refresh factory tuning values. | 376 | * Set BOOT bit to refresh factory tuning values. |
377 | */ | 377 | */ |
378 | lis3->read(lis3, CTRL_REG2, ®); | 378 | if (lis3->pdata) { |
379 | if (lis3->whoami == WAI_12B) | 379 | lis3->read(lis3, CTRL_REG2, ®); |
380 | reg |= CTRL2_BDU | CTRL2_BOOT; | 380 | if (lis3->whoami == WAI_12B) |
381 | else | 381 | reg |= CTRL2_BDU | CTRL2_BOOT; |
382 | reg |= CTRL2_BOOT_8B; | 382 | else |
383 | lis3->write(lis3, CTRL_REG2, reg); | 383 | reg |= CTRL2_BOOT_8B; |
384 | lis3->write(lis3, CTRL_REG2, reg); | ||
385 | } | ||
384 | 386 | ||
385 | /* LIS3 power on delay is quite long */ | 387 | /* LIS3 power on delay is quite long */ |
386 | msleep(lis3->pwron_delay / lis3lv02d_get_odr()); | 388 | msleep(lis3->pwron_delay / lis3lv02d_get_odr()); |
diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c index a1e004a82f7a..0b4acf67e0c6 100644 --- a/drivers/net/bnx2x/bnx2x_dcb.c +++ b/drivers/net/bnx2x/bnx2x_dcb.c | |||
@@ -2120,6 +2120,7 @@ static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap) | |||
2120 | break; | 2120 | break; |
2121 | case DCB_CAP_ATTR_DCBX: | 2121 | case DCB_CAP_ATTR_DCBX: |
2122 | *cap = BNX2X_DCBX_CAPS; | 2122 | *cap = BNX2X_DCBX_CAPS; |
2123 | break; | ||
2123 | default: | 2124 | default: |
2124 | rval = -EINVAL; | 2125 | rval = -EINVAL; |
2125 | break; | 2126 | break; |
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c index c027e9341a1a..15f800085bb2 100644 --- a/drivers/net/bnx2x/bnx2x_main.c +++ b/drivers/net/bnx2x/bnx2x_main.c | |||
@@ -4943,7 +4943,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) | |||
4943 | int igu_seg_id; | 4943 | int igu_seg_id; |
4944 | int port = BP_PORT(bp); | 4944 | int port = BP_PORT(bp); |
4945 | int func = BP_FUNC(bp); | 4945 | int func = BP_FUNC(bp); |
4946 | int reg_offset; | 4946 | int reg_offset, reg_offset_en5; |
4947 | u64 section; | 4947 | u64 section; |
4948 | int index; | 4948 | int index; |
4949 | struct hc_sp_status_block_data sp_sb_data; | 4949 | struct hc_sp_status_block_data sp_sb_data; |
@@ -4966,6 +4966,8 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) | |||
4966 | 4966 | ||
4967 | reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : | 4967 | reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 : |
4968 | MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); | 4968 | MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0); |
4969 | reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 : | ||
4970 | MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0); | ||
4969 | for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { | 4971 | for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) { |
4970 | int sindex; | 4972 | int sindex; |
4971 | /* take care of sig[0]..sig[4] */ | 4973 | /* take care of sig[0]..sig[4] */ |
@@ -4980,7 +4982,7 @@ static void bnx2x_init_def_sb(struct bnx2x *bp) | |||
4980 | * and not 16 between the different groups | 4982 | * and not 16 between the different groups |
4981 | */ | 4983 | */ |
4982 | bp->attn_group[index].sig[4] = REG_RD(bp, | 4984 | bp->attn_group[index].sig[4] = REG_RD(bp, |
4983 | reg_offset + 0x10 + 0x4*index); | 4985 | reg_offset_en5 + 0x4*index); |
4984 | else | 4986 | else |
4985 | bp->attn_group[index].sig[4] = 0; | 4987 | bp->attn_group[index].sig[4] = 0; |
4986 | } | 4988 | } |
@@ -7625,8 +7627,11 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) | |||
7625 | u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; | 7627 | u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; |
7626 | u8 *mac_addr = bp->dev->dev_addr; | 7628 | u8 *mac_addr = bp->dev->dev_addr; |
7627 | u32 val; | 7629 | u32 val; |
7630 | u16 pmc; | ||
7631 | |||
7628 | /* The mac address is written to entries 1-4 to | 7632 | /* The mac address is written to entries 1-4 to |
7629 | preserve entry 0 which is used by the PMF */ | 7633 | * preserve entry 0 which is used by the PMF |
7634 | */ | ||
7630 | u8 entry = (BP_VN(bp) + 1)*8; | 7635 | u8 entry = (BP_VN(bp) + 1)*8; |
7631 | 7636 | ||
7632 | val = (mac_addr[0] << 8) | mac_addr[1]; | 7637 | val = (mac_addr[0] << 8) | mac_addr[1]; |
@@ -7636,6 +7641,11 @@ u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode) | |||
7636 | (mac_addr[4] << 8) | mac_addr[5]; | 7641 | (mac_addr[4] << 8) | mac_addr[5]; |
7637 | EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); | 7642 | EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); |
7638 | 7643 | ||
7644 | /* Enable the PME and clear the status */ | ||
7645 | pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc); | ||
7646 | pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS; | ||
7647 | pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc); | ||
7648 | |||
7639 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; | 7649 | reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; |
7640 | 7650 | ||
7641 | } else | 7651 | } else |
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h index 750e8445dac4..fc7bd0f23c0b 100644 --- a/drivers/net/bnx2x/bnx2x_reg.h +++ b/drivers/net/bnx2x/bnx2x_reg.h | |||
@@ -1384,6 +1384,18 @@ | |||
1384 | Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ | 1384 | Latched ump_tx_parity; [31] MCP Latched scpad_parity; */ |
1385 | #define MISC_REG_AEU_ENABLE4_PXP_0 0xa108 | 1385 | #define MISC_REG_AEU_ENABLE4_PXP_0 0xa108 |
1386 | #define MISC_REG_AEU_ENABLE4_PXP_1 0xa1a8 | 1386 | #define MISC_REG_AEU_ENABLE4_PXP_1 0xa1a8 |
1387 | /* [RW 32] fifth 32b for enabling the output for function 0 output0. Mapped | ||
1388 | * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC | ||
1389 | * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6] | ||
1390 | * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1 | ||
1391 | * parity; [31-10] Reserved; */ | ||
1392 | #define MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0 0xa688 | ||
1393 | /* [RW 32] Fifth 32b for enabling the output for function 1 output0. Mapped | ||
1394 | * as follows: [0] PGLUE config_space; [1] PGLUE misc_flr; [2] PGLUE B RBC | ||
1395 | * attention [3] PGLUE B RBC parity; [4] ATC attention; [5] ATC parity; [6] | ||
1396 | * mstat0 attention; [7] mstat0 parity; [8] mstat1 attention; [9] mstat1 | ||
1397 | * parity; [31-10] Reserved; */ | ||
1398 | #define MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 0xa6b0 | ||
1387 | /* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu | 1399 | /* [RW 1] set/clr general attention 0; this will set/clr bit 94 in the aeu |
1388 | 128 bit vector */ | 1400 | 128 bit vector */ |
1389 | #define MISC_REG_AEU_GENERAL_ATTN_0 0xa000 | 1401 | #define MISC_REG_AEU_GENERAL_ATTN_0 0xa000 |
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index a047eb973e3b..47b928ed08f8 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c | |||
@@ -2168,7 +2168,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work) | |||
2168 | } | 2168 | } |
2169 | 2169 | ||
2170 | re_arm: | 2170 | re_arm: |
2171 | queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks); | 2171 | if (!bond->kill_timers) |
2172 | queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks); | ||
2172 | out: | 2173 | out: |
2173 | read_unlock(&bond->lock); | 2174 | read_unlock(&bond->lock); |
2174 | } | 2175 | } |
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 7f8b20a34ee3..d4fbd2e62616 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c | |||
@@ -1440,7 +1440,8 @@ void bond_alb_monitor(struct work_struct *work) | |||
1440 | } | 1440 | } |
1441 | 1441 | ||
1442 | re_arm: | 1442 | re_arm: |
1443 | queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks); | 1443 | if (!bond->kill_timers) |
1444 | queue_delayed_work(bond->wq, &bond->alb_work, alb_delta_in_ticks); | ||
1444 | out: | 1445 | out: |
1445 | read_unlock(&bond->lock); | 1446 | read_unlock(&bond->lock); |
1446 | } | 1447 | } |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 43f2ea541088..6d79b78cfc75 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -777,6 +777,9 @@ static void bond_resend_igmp_join_requests(struct bonding *bond) | |||
777 | 777 | ||
778 | read_lock(&bond->lock); | 778 | read_lock(&bond->lock); |
779 | 779 | ||
780 | if (bond->kill_timers) | ||
781 | goto out; | ||
782 | |||
780 | /* rejoin all groups on bond device */ | 783 | /* rejoin all groups on bond device */ |
781 | __bond_resend_igmp_join_requests(bond->dev); | 784 | __bond_resend_igmp_join_requests(bond->dev); |
782 | 785 | ||
@@ -790,9 +793,9 @@ static void bond_resend_igmp_join_requests(struct bonding *bond) | |||
790 | __bond_resend_igmp_join_requests(vlan_dev); | 793 | __bond_resend_igmp_join_requests(vlan_dev); |
791 | } | 794 | } |
792 | 795 | ||
793 | if (--bond->igmp_retrans > 0) | 796 | if ((--bond->igmp_retrans > 0) && !bond->kill_timers) |
794 | queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); | 797 | queue_delayed_work(bond->wq, &bond->mcast_work, HZ/5); |
795 | 798 | out: | |
796 | read_unlock(&bond->lock); | 799 | read_unlock(&bond->lock); |
797 | } | 800 | } |
798 | 801 | ||
@@ -2538,7 +2541,7 @@ void bond_mii_monitor(struct work_struct *work) | |||
2538 | } | 2541 | } |
2539 | 2542 | ||
2540 | re_arm: | 2543 | re_arm: |
2541 | if (bond->params.miimon) | 2544 | if (bond->params.miimon && !bond->kill_timers) |
2542 | queue_delayed_work(bond->wq, &bond->mii_work, | 2545 | queue_delayed_work(bond->wq, &bond->mii_work, |
2543 | msecs_to_jiffies(bond->params.miimon)); | 2546 | msecs_to_jiffies(bond->params.miimon)); |
2544 | out: | 2547 | out: |
@@ -2886,7 +2889,7 @@ void bond_loadbalance_arp_mon(struct work_struct *work) | |||
2886 | } | 2889 | } |
2887 | 2890 | ||
2888 | re_arm: | 2891 | re_arm: |
2889 | if (bond->params.arp_interval) | 2892 | if (bond->params.arp_interval && !bond->kill_timers) |
2890 | queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); | 2893 | queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); |
2891 | out: | 2894 | out: |
2892 | read_unlock(&bond->lock); | 2895 | read_unlock(&bond->lock); |
@@ -3154,7 +3157,7 @@ void bond_activebackup_arp_mon(struct work_struct *work) | |||
3154 | bond_ab_arp_probe(bond); | 3157 | bond_ab_arp_probe(bond); |
3155 | 3158 | ||
3156 | re_arm: | 3159 | re_arm: |
3157 | if (bond->params.arp_interval) | 3160 | if (bond->params.arp_interval && !bond->kill_timers) |
3158 | queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); | 3161 | queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks); |
3159 | out: | 3162 | out: |
3160 | read_unlock(&bond->lock); | 3163 | read_unlock(&bond->lock); |
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c index 805076c54f1b..da5a5d9b8aff 100644 --- a/drivers/net/cxgb3/cxgb3_offload.c +++ b/drivers/net/cxgb3/cxgb3_offload.c | |||
@@ -1146,12 +1146,14 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new) | |||
1146 | if (te && te->ctx && te->client && te->client->redirect) { | 1146 | if (te && te->ctx && te->client && te->client->redirect) { |
1147 | update_tcb = te->client->redirect(te->ctx, old, new, e); | 1147 | update_tcb = te->client->redirect(te->ctx, old, new, e); |
1148 | if (update_tcb) { | 1148 | if (update_tcb) { |
1149 | rcu_read_lock(); | ||
1149 | l2t_hold(L2DATA(tdev), e); | 1150 | l2t_hold(L2DATA(tdev), e); |
1151 | rcu_read_unlock(); | ||
1150 | set_l2t_ix(tdev, tid, e); | 1152 | set_l2t_ix(tdev, tid, e); |
1151 | } | 1153 | } |
1152 | } | 1154 | } |
1153 | } | 1155 | } |
1154 | l2t_release(L2DATA(tdev), e); | 1156 | l2t_release(tdev, e); |
1155 | } | 1157 | } |
1156 | 1158 | ||
1157 | /* | 1159 | /* |
@@ -1264,7 +1266,7 @@ int cxgb3_offload_activate(struct adapter *adapter) | |||
1264 | goto out_free; | 1266 | goto out_free; |
1265 | 1267 | ||
1266 | err = -ENOMEM; | 1268 | err = -ENOMEM; |
1267 | L2DATA(dev) = t3_init_l2t(l2t_capacity); | 1269 | RCU_INIT_POINTER(dev->l2opt, t3_init_l2t(l2t_capacity)); |
1268 | if (!L2DATA(dev)) | 1270 | if (!L2DATA(dev)) |
1269 | goto out_free; | 1271 | goto out_free; |
1270 | 1272 | ||
@@ -1298,16 +1300,24 @@ int cxgb3_offload_activate(struct adapter *adapter) | |||
1298 | 1300 | ||
1299 | out_free_l2t: | 1301 | out_free_l2t: |
1300 | t3_free_l2t(L2DATA(dev)); | 1302 | t3_free_l2t(L2DATA(dev)); |
1301 | L2DATA(dev) = NULL; | 1303 | rcu_assign_pointer(dev->l2opt, NULL); |
1302 | out_free: | 1304 | out_free: |
1303 | kfree(t); | 1305 | kfree(t); |
1304 | return err; | 1306 | return err; |
1305 | } | 1307 | } |
1306 | 1308 | ||
1309 | static void clean_l2_data(struct rcu_head *head) | ||
1310 | { | ||
1311 | struct l2t_data *d = container_of(head, struct l2t_data, rcu_head); | ||
1312 | t3_free_l2t(d); | ||
1313 | } | ||
1314 | |||
1315 | |||
1307 | void cxgb3_offload_deactivate(struct adapter *adapter) | 1316 | void cxgb3_offload_deactivate(struct adapter *adapter) |
1308 | { | 1317 | { |
1309 | struct t3cdev *tdev = &adapter->tdev; | 1318 | struct t3cdev *tdev = &adapter->tdev; |
1310 | struct t3c_data *t = T3C_DATA(tdev); | 1319 | struct t3c_data *t = T3C_DATA(tdev); |
1320 | struct l2t_data *d; | ||
1311 | 1321 | ||
1312 | remove_adapter(adapter); | 1322 | remove_adapter(adapter); |
1313 | if (list_empty(&adapter_list)) | 1323 | if (list_empty(&adapter_list)) |
@@ -1315,8 +1325,11 @@ void cxgb3_offload_deactivate(struct adapter *adapter) | |||
1315 | 1325 | ||
1316 | free_tid_maps(&t->tid_maps); | 1326 | free_tid_maps(&t->tid_maps); |
1317 | T3C_DATA(tdev) = NULL; | 1327 | T3C_DATA(tdev) = NULL; |
1318 | t3_free_l2t(L2DATA(tdev)); | 1328 | rcu_read_lock(); |
1319 | L2DATA(tdev) = NULL; | 1329 | d = L2DATA(tdev); |
1330 | rcu_read_unlock(); | ||
1331 | rcu_assign_pointer(tdev->l2opt, NULL); | ||
1332 | call_rcu(&d->rcu_head, clean_l2_data); | ||
1320 | if (t->nofail_skb) | 1333 | if (t->nofail_skb) |
1321 | kfree_skb(t->nofail_skb); | 1334 | kfree_skb(t->nofail_skb); |
1322 | kfree(t); | 1335 | kfree(t); |
diff --git a/drivers/net/cxgb3/l2t.c b/drivers/net/cxgb3/l2t.c index f452c4003253..41540978a173 100644 --- a/drivers/net/cxgb3/l2t.c +++ b/drivers/net/cxgb3/l2t.c | |||
@@ -300,14 +300,21 @@ static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh) | |||
300 | struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh, | 300 | struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh, |
301 | struct net_device *dev) | 301 | struct net_device *dev) |
302 | { | 302 | { |
303 | struct l2t_entry *e; | 303 | struct l2t_entry *e = NULL; |
304 | struct l2t_data *d = L2DATA(cdev); | 304 | struct l2t_data *d; |
305 | int hash; | ||
305 | u32 addr = *(u32 *) neigh->primary_key; | 306 | u32 addr = *(u32 *) neigh->primary_key; |
306 | int ifidx = neigh->dev->ifindex; | 307 | int ifidx = neigh->dev->ifindex; |
307 | int hash = arp_hash(addr, ifidx, d); | ||
308 | struct port_info *p = netdev_priv(dev); | 308 | struct port_info *p = netdev_priv(dev); |
309 | int smt_idx = p->port_id; | 309 | int smt_idx = p->port_id; |
310 | 310 | ||
311 | rcu_read_lock(); | ||
312 | d = L2DATA(cdev); | ||
313 | if (!d) | ||
314 | goto done_rcu; | ||
315 | |||
316 | hash = arp_hash(addr, ifidx, d); | ||
317 | |||
311 | write_lock_bh(&d->lock); | 318 | write_lock_bh(&d->lock); |
312 | for (e = d->l2tab[hash].first; e; e = e->next) | 319 | for (e = d->l2tab[hash].first; e; e = e->next) |
313 | if (e->addr == addr && e->ifindex == ifidx && | 320 | if (e->addr == addr && e->ifindex == ifidx && |
@@ -338,6 +345,8 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct neighbour *neigh, | |||
338 | } | 345 | } |
339 | done: | 346 | done: |
340 | write_unlock_bh(&d->lock); | 347 | write_unlock_bh(&d->lock); |
348 | done_rcu: | ||
349 | rcu_read_unlock(); | ||
341 | return e; | 350 | return e; |
342 | } | 351 | } |
343 | 352 | ||
diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h index 7a12d52ed4fc..c5f54796e2cb 100644 --- a/drivers/net/cxgb3/l2t.h +++ b/drivers/net/cxgb3/l2t.h | |||
@@ -76,6 +76,7 @@ struct l2t_data { | |||
76 | atomic_t nfree; /* number of free entries */ | 76 | atomic_t nfree; /* number of free entries */ |
77 | rwlock_t lock; | 77 | rwlock_t lock; |
78 | struct l2t_entry l2tab[0]; | 78 | struct l2t_entry l2tab[0]; |
79 | struct rcu_head rcu_head; /* to handle rcu cleanup */ | ||
79 | }; | 80 | }; |
80 | 81 | ||
81 | typedef void (*arp_failure_handler_func)(struct t3cdev * dev, | 82 | typedef void (*arp_failure_handler_func)(struct t3cdev * dev, |
@@ -99,7 +100,7 @@ static inline void set_arp_failure_handler(struct sk_buff *skb, | |||
99 | /* | 100 | /* |
100 | * Getting to the L2 data from an offload device. | 101 | * Getting to the L2 data from an offload device. |
101 | */ | 102 | */ |
102 | #define L2DATA(dev) ((dev)->l2opt) | 103 | #define L2DATA(cdev) (rcu_dereference((cdev)->l2opt)) |
103 | 104 | ||
104 | #define W_TCB_L2T_IX 0 | 105 | #define W_TCB_L2T_IX 0 |
105 | #define S_TCB_L2T_IX 7 | 106 | #define S_TCB_L2T_IX 7 |
@@ -126,15 +127,22 @@ static inline int l2t_send(struct t3cdev *dev, struct sk_buff *skb, | |||
126 | return t3_l2t_send_slow(dev, skb, e); | 127 | return t3_l2t_send_slow(dev, skb, e); |
127 | } | 128 | } |
128 | 129 | ||
129 | static inline void l2t_release(struct l2t_data *d, struct l2t_entry *e) | 130 | static inline void l2t_release(struct t3cdev *t, struct l2t_entry *e) |
130 | { | 131 | { |
131 | if (atomic_dec_and_test(&e->refcnt)) | 132 | struct l2t_data *d; |
133 | |||
134 | rcu_read_lock(); | ||
135 | d = L2DATA(t); | ||
136 | |||
137 | if (atomic_dec_and_test(&e->refcnt) && d) | ||
132 | t3_l2e_free(d, e); | 138 | t3_l2e_free(d, e); |
139 | |||
140 | rcu_read_unlock(); | ||
133 | } | 141 | } |
134 | 142 | ||
135 | static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) | 143 | static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e) |
136 | { | 144 | { |
137 | if (atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ | 145 | if (d && atomic_add_return(1, &e->refcnt) == 1) /* 0 -> 1 transition */ |
138 | atomic_dec(&d->nfree); | 146 | atomic_dec(&d->nfree); |
139 | } | 147 | } |
140 | 148 | ||
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c index c9957b7f17b5..b4efa292fd6f 100644 --- a/drivers/net/cxgb4/cxgb4_main.c +++ b/drivers/net/cxgb4/cxgb4_main.c | |||
@@ -3712,6 +3712,9 @@ static int __devinit init_one(struct pci_dev *pdev, | |||
3712 | setup_debugfs(adapter); | 3712 | setup_debugfs(adapter); |
3713 | } | 3713 | } |
3714 | 3714 | ||
3715 | /* PCIe EEH recovery on powerpc platforms needs fundamental reset */ | ||
3716 | pdev->needs_freset = 1; | ||
3717 | |||
3715 | if (is_offload(adapter)) | 3718 | if (is_offload(adapter)) |
3716 | attach_ulds(adapter); | 3719 | attach_ulds(adapter); |
3717 | 3720 | ||
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c index 8dd5fccef725..d393f1e764ed 100644 --- a/drivers/net/ibmveth.c +++ b/drivers/net/ibmveth.c | |||
@@ -636,8 +636,8 @@ static int ibmveth_open(struct net_device *netdev) | |||
636 | netdev_err(netdev, "unable to request irq 0x%x, rc %d\n", | 636 | netdev_err(netdev, "unable to request irq 0x%x, rc %d\n", |
637 | netdev->irq, rc); | 637 | netdev->irq, rc); |
638 | do { | 638 | do { |
639 | rc = h_free_logical_lan(adapter->vdev->unit_address); | 639 | lpar_rc = h_free_logical_lan(adapter->vdev->unit_address); |
640 | } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY)); | 640 | } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY)); |
641 | 641 | ||
642 | goto err_out; | 642 | goto err_out; |
643 | } | 643 | } |
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c index 567ff10889be..b8b4ba27b0e7 100644 --- a/drivers/net/pch_gbe/pch_gbe_main.c +++ b/drivers/net/pch_gbe/pch_gbe_main.c | |||
@@ -1199,6 +1199,8 @@ static irqreturn_t pch_gbe_intr(int irq, void *data) | |||
1199 | iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR), | 1199 | iowrite32((int_en & ~PCH_GBE_INT_RX_FIFO_ERR), |
1200 | &hw->reg->INT_EN); | 1200 | &hw->reg->INT_EN); |
1201 | pch_gbe_stop_receive(adapter); | 1201 | pch_gbe_stop_receive(adapter); |
1202 | int_st |= ioread32(&hw->reg->INT_ST); | ||
1203 | int_st = int_st & ioread32(&hw->reg->INT_EN); | ||
1202 | } | 1204 | } |
1203 | if (int_st & PCH_GBE_INT_RX_DMA_ERR) | 1205 | if (int_st & PCH_GBE_INT_RX_DMA_ERR) |
1204 | adapter->stats.intr_rx_dma_err_count++; | 1206 | adapter->stats.intr_rx_dma_err_count++; |
@@ -1218,14 +1220,11 @@ static irqreturn_t pch_gbe_intr(int irq, void *data) | |||
1218 | /* Set Pause packet */ | 1220 | /* Set Pause packet */ |
1219 | pch_gbe_mac_set_pause_packet(hw); | 1221 | pch_gbe_mac_set_pause_packet(hw); |
1220 | } | 1222 | } |
1221 | if ((int_en & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) | ||
1222 | == 0) { | ||
1223 | return IRQ_HANDLED; | ||
1224 | } | ||
1225 | } | 1223 | } |
1226 | 1224 | ||
1227 | /* When request status is Receive interruption */ | 1225 | /* When request status is Receive interruption */ |
1228 | if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT))) { | 1226 | if ((int_st & (PCH_GBE_INT_RX_DMA_CMPLT | PCH_GBE_INT_TX_CMPLT)) || |
1227 | (adapter->rx_stop_flag == true)) { | ||
1229 | if (likely(napi_schedule_prep(&adapter->napi))) { | 1228 | if (likely(napi_schedule_prep(&adapter->napi))) { |
1230 | /* Enable only Rx Descriptor empty */ | 1229 | /* Enable only Rx Descriptor empty */ |
1231 | atomic_inc(&adapter->irq_sem); | 1230 | atomic_inc(&adapter->irq_sem); |
@@ -1385,7 +1384,7 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, | |||
1385 | struct sk_buff *skb; | 1384 | struct sk_buff *skb; |
1386 | unsigned int i; | 1385 | unsigned int i; |
1387 | unsigned int cleaned_count = 0; | 1386 | unsigned int cleaned_count = 0; |
1388 | bool cleaned = false; | 1387 | bool cleaned = true; |
1389 | 1388 | ||
1390 | pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean); | 1389 | pr_debug("next_to_clean : %d\n", tx_ring->next_to_clean); |
1391 | 1390 | ||
@@ -1396,7 +1395,6 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, | |||
1396 | 1395 | ||
1397 | while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) { | 1396 | while ((tx_desc->gbec_status & DSC_INIT16) == 0x0000) { |
1398 | pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status); | 1397 | pr_debug("gbec_status:0x%04x\n", tx_desc->gbec_status); |
1399 | cleaned = true; | ||
1400 | buffer_info = &tx_ring->buffer_info[i]; | 1398 | buffer_info = &tx_ring->buffer_info[i]; |
1401 | skb = buffer_info->skb; | 1399 | skb = buffer_info->skb; |
1402 | 1400 | ||
@@ -1439,8 +1437,10 @@ pch_gbe_clean_tx(struct pch_gbe_adapter *adapter, | |||
1439 | tx_desc = PCH_GBE_TX_DESC(*tx_ring, i); | 1437 | tx_desc = PCH_GBE_TX_DESC(*tx_ring, i); |
1440 | 1438 | ||
1441 | /* weight of a sort for tx, to avoid endless transmit cleanup */ | 1439 | /* weight of a sort for tx, to avoid endless transmit cleanup */ |
1442 | if (cleaned_count++ == PCH_GBE_TX_WEIGHT) | 1440 | if (cleaned_count++ == PCH_GBE_TX_WEIGHT) { |
1441 | cleaned = false; | ||
1443 | break; | 1442 | break; |
1443 | } | ||
1444 | } | 1444 | } |
1445 | pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n", | 1445 | pr_debug("called pch_gbe_unmap_and_free_tx_resource() %d count\n", |
1446 | cleaned_count); | 1446 | cleaned_count); |
@@ -2168,7 +2168,6 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget) | |||
2168 | { | 2168 | { |
2169 | struct pch_gbe_adapter *adapter = | 2169 | struct pch_gbe_adapter *adapter = |
2170 | container_of(napi, struct pch_gbe_adapter, napi); | 2170 | container_of(napi, struct pch_gbe_adapter, napi); |
2171 | struct net_device *netdev = adapter->netdev; | ||
2172 | int work_done = 0; | 2171 | int work_done = 0; |
2173 | bool poll_end_flag = false; | 2172 | bool poll_end_flag = false; |
2174 | bool cleaned = false; | 2173 | bool cleaned = false; |
@@ -2176,33 +2175,32 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget) | |||
2176 | 2175 | ||
2177 | pr_debug("budget : %d\n", budget); | 2176 | pr_debug("budget : %d\n", budget); |
2178 | 2177 | ||
2179 | /* Keep link state information with original netdev */ | 2178 | pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); |
2180 | if (!netif_carrier_ok(netdev)) { | 2179 | cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring); |
2180 | |||
2181 | if (!cleaned) | ||
2182 | work_done = budget; | ||
2183 | /* If no Tx and not enough Rx work done, | ||
2184 | * exit the polling mode | ||
2185 | */ | ||
2186 | if (work_done < budget) | ||
2181 | poll_end_flag = true; | 2187 | poll_end_flag = true; |
2182 | } else { | 2188 | |
2183 | pch_gbe_clean_rx(adapter, adapter->rx_ring, &work_done, budget); | 2189 | if (poll_end_flag) { |
2190 | napi_complete(napi); | ||
2191 | if (adapter->rx_stop_flag) { | ||
2192 | adapter->rx_stop_flag = false; | ||
2193 | pch_gbe_start_receive(&adapter->hw); | ||
2194 | } | ||
2195 | pch_gbe_irq_enable(adapter); | ||
2196 | } else | ||
2184 | if (adapter->rx_stop_flag) { | 2197 | if (adapter->rx_stop_flag) { |
2185 | adapter->rx_stop_flag = false; | 2198 | adapter->rx_stop_flag = false; |
2186 | pch_gbe_start_receive(&adapter->hw); | 2199 | pch_gbe_start_receive(&adapter->hw); |
2187 | int_en = ioread32(&adapter->hw.reg->INT_EN); | 2200 | int_en = ioread32(&adapter->hw.reg->INT_EN); |
2188 | iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR), | 2201 | iowrite32((int_en | PCH_GBE_INT_RX_FIFO_ERR), |
2189 | &adapter->hw.reg->INT_EN); | 2202 | &adapter->hw.reg->INT_EN); |
2190 | } | 2203 | } |
2191 | cleaned = pch_gbe_clean_tx(adapter, adapter->tx_ring); | ||
2192 | |||
2193 | if (cleaned) | ||
2194 | work_done = budget; | ||
2195 | /* If no Tx and not enough Rx work done, | ||
2196 | * exit the polling mode | ||
2197 | */ | ||
2198 | if ((work_done < budget) || !netif_running(netdev)) | ||
2199 | poll_end_flag = true; | ||
2200 | } | ||
2201 | |||
2202 | if (poll_end_flag) { | ||
2203 | napi_complete(napi); | ||
2204 | pch_gbe_irq_enable(adapter); | ||
2205 | } | ||
2206 | 2204 | ||
2207 | pr_debug("poll_end_flag : %d work_done : %d budget : %d\n", | 2205 | pr_debug("poll_end_flag : %d work_done : %d budget : %d\n", |
2208 | poll_end_flag, work_done, budget); | 2206 | poll_end_flag, work_done, budget); |
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index cb6e0b486b1e..edd7304773eb 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c | |||
@@ -589,7 +589,7 @@ static void decode_rxts(struct dp83640_private *dp83640, | |||
589 | prune_rx_ts(dp83640); | 589 | prune_rx_ts(dp83640); |
590 | 590 | ||
591 | if (list_empty(&dp83640->rxpool)) { | 591 | if (list_empty(&dp83640->rxpool)) { |
592 | pr_warning("dp83640: rx timestamp pool is empty\n"); | 592 | pr_debug("dp83640: rx timestamp pool is empty\n"); |
593 | goto out; | 593 | goto out; |
594 | } | 594 | } |
595 | rxts = list_first_entry(&dp83640->rxpool, struct rxts, list); | 595 | rxts = list_first_entry(&dp83640->rxpool, struct rxts, list); |
@@ -612,7 +612,7 @@ static void decode_txts(struct dp83640_private *dp83640, | |||
612 | skb = skb_dequeue(&dp83640->tx_queue); | 612 | skb = skb_dequeue(&dp83640->tx_queue); |
613 | 613 | ||
614 | if (!skb) { | 614 | if (!skb) { |
615 | pr_warning("dp83640: have timestamp but tx_queue empty\n"); | 615 | pr_debug("dp83640: have timestamp but tx_queue empty\n"); |
616 | return; | 616 | return; |
617 | } | 617 | } |
618 | ns = phy2txts(phy_txts); | 618 | ns = phy2txts(phy_txts); |
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h index 2339728a7306..3e69c631ebb4 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h +++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h | |||
@@ -1514,7 +1514,7 @@ static const u32 ar9300_2p2_mac_core[][2] = { | |||
1514 | {0x00008258, 0x00000000}, | 1514 | {0x00008258, 0x00000000}, |
1515 | {0x0000825c, 0x40000000}, | 1515 | {0x0000825c, 0x40000000}, |
1516 | {0x00008260, 0x00080922}, | 1516 | {0x00008260, 0x00080922}, |
1517 | {0x00008264, 0x9bc00010}, | 1517 | {0x00008264, 0x9d400010}, |
1518 | {0x00008268, 0xffffffff}, | 1518 | {0x00008268, 0xffffffff}, |
1519 | {0x0000826c, 0x0000ffff}, | 1519 | {0x0000826c, 0x0000ffff}, |
1520 | {0x00008270, 0x00000000}, | 1520 | {0x00008270, 0x00000000}, |
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c index 9a4850154fb2..4c21f8cbdeb5 100644 --- a/drivers/net/wireless/ath/ath9k/recv.c +++ b/drivers/net/wireless/ath/ath9k/recv.c | |||
@@ -205,14 +205,22 @@ static void ath_rx_remove_buffer(struct ath_softc *sc, | |||
205 | 205 | ||
206 | static void ath_rx_edma_cleanup(struct ath_softc *sc) | 206 | static void ath_rx_edma_cleanup(struct ath_softc *sc) |
207 | { | 207 | { |
208 | struct ath_hw *ah = sc->sc_ah; | ||
209 | struct ath_common *common = ath9k_hw_common(ah); | ||
208 | struct ath_buf *bf; | 210 | struct ath_buf *bf; |
209 | 211 | ||
210 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); | 212 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_LP); |
211 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); | 213 | ath_rx_remove_buffer(sc, ATH9K_RX_QUEUE_HP); |
212 | 214 | ||
213 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { | 215 | list_for_each_entry(bf, &sc->rx.rxbuf, list) { |
214 | if (bf->bf_mpdu) | 216 | if (bf->bf_mpdu) { |
217 | dma_unmap_single(sc->dev, bf->bf_buf_addr, | ||
218 | common->rx_bufsize, | ||
219 | DMA_BIDIRECTIONAL); | ||
215 | dev_kfree_skb_any(bf->bf_mpdu); | 220 | dev_kfree_skb_any(bf->bf_mpdu); |
221 | bf->bf_buf_addr = 0; | ||
222 | bf->bf_mpdu = NULL; | ||
223 | } | ||
216 | } | 224 | } |
217 | 225 | ||
218 | INIT_LIST_HEAD(&sc->rx.rxbuf); | 226 | INIT_LIST_HEAD(&sc->rx.rxbuf); |
diff --git a/drivers/net/wireless/iwlegacy/iwl-core.c b/drivers/net/wireless/iwlegacy/iwl-core.c index 35cd2537e7fd..e5971fe9d169 100644 --- a/drivers/net/wireless/iwlegacy/iwl-core.c +++ b/drivers/net/wireless/iwlegacy/iwl-core.c | |||
@@ -937,7 +937,7 @@ void iwl_legacy_irq_handle_error(struct iwl_priv *priv) | |||
937 | &priv->contexts[IWL_RXON_CTX_BSS]); | 937 | &priv->contexts[IWL_RXON_CTX_BSS]); |
938 | #endif | 938 | #endif |
939 | 939 | ||
940 | wake_up_interruptible(&priv->wait_command_queue); | 940 | wake_up(&priv->wait_command_queue); |
941 | 941 | ||
942 | /* Keep the restart process from trying to send host | 942 | /* Keep the restart process from trying to send host |
943 | * commands by clearing the INIT status bit */ | 943 | * commands by clearing the INIT status bit */ |
@@ -1746,7 +1746,7 @@ int iwl_legacy_force_reset(struct iwl_priv *priv, bool external) | |||
1746 | 1746 | ||
1747 | /* Set the FW error flag -- cleared on iwl_down */ | 1747 | /* Set the FW error flag -- cleared on iwl_down */ |
1748 | set_bit(STATUS_FW_ERROR, &priv->status); | 1748 | set_bit(STATUS_FW_ERROR, &priv->status); |
1749 | wake_up_interruptible(&priv->wait_command_queue); | 1749 | wake_up(&priv->wait_command_queue); |
1750 | /* | 1750 | /* |
1751 | * Keep the restart process from trying to send host | 1751 | * Keep the restart process from trying to send host |
1752 | * commands by clearing the INIT status bit | 1752 | * commands by clearing the INIT status bit |
diff --git a/drivers/net/wireless/iwlegacy/iwl-hcmd.c b/drivers/net/wireless/iwlegacy/iwl-hcmd.c index 62b4b09122cb..ce1fc9feb61f 100644 --- a/drivers/net/wireless/iwlegacy/iwl-hcmd.c +++ b/drivers/net/wireless/iwlegacy/iwl-hcmd.c | |||
@@ -167,7 +167,7 @@ int iwl_legacy_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd) | |||
167 | goto out; | 167 | goto out; |
168 | } | 168 | } |
169 | 169 | ||
170 | ret = wait_event_interruptible_timeout(priv->wait_command_queue, | 170 | ret = wait_event_timeout(priv->wait_command_queue, |
171 | !test_bit(STATUS_HCMD_ACTIVE, &priv->status), | 171 | !test_bit(STATUS_HCMD_ACTIVE, &priv->status), |
172 | HOST_COMPLETE_TIMEOUT); | 172 | HOST_COMPLETE_TIMEOUT); |
173 | if (!ret) { | 173 | if (!ret) { |
diff --git a/drivers/net/wireless/iwlegacy/iwl-tx.c b/drivers/net/wireless/iwlegacy/iwl-tx.c index 4fff995c6f3e..ef9e268bf8a0 100644 --- a/drivers/net/wireless/iwlegacy/iwl-tx.c +++ b/drivers/net/wireless/iwlegacy/iwl-tx.c | |||
@@ -625,6 +625,8 @@ iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
625 | cmd = txq->cmd[cmd_index]; | 625 | cmd = txq->cmd[cmd_index]; |
626 | meta = &txq->meta[cmd_index]; | 626 | meta = &txq->meta[cmd_index]; |
627 | 627 | ||
628 | txq->time_stamp = jiffies; | ||
629 | |||
628 | pci_unmap_single(priv->pci_dev, | 630 | pci_unmap_single(priv->pci_dev, |
629 | dma_unmap_addr(meta, mapping), | 631 | dma_unmap_addr(meta, mapping), |
630 | dma_unmap_len(meta, len), | 632 | dma_unmap_len(meta, len), |
@@ -645,7 +647,7 @@ iwl_legacy_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) | |||
645 | clear_bit(STATUS_HCMD_ACTIVE, &priv->status); | 647 | clear_bit(STATUS_HCMD_ACTIVE, &priv->status); |
646 | IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n", | 648 | IWL_DEBUG_INFO(priv, "Clearing HCMD_ACTIVE for command %s\n", |
647 | iwl_legacy_get_cmd_string(cmd->hdr.cmd)); | 649 | iwl_legacy_get_cmd_string(cmd->hdr.cmd)); |
648 | wake_up_interruptible(&priv->wait_command_queue); | 650 | wake_up(&priv->wait_command_queue); |
649 | } | 651 | } |
650 | 652 | ||
651 | /* Mark as unmapped */ | 653 | /* Mark as unmapped */ |
diff --git a/drivers/net/wireless/iwlegacy/iwl3945-base.c b/drivers/net/wireless/iwlegacy/iwl3945-base.c index 795826a014ed..66ee15629a76 100644 --- a/drivers/net/wireless/iwlegacy/iwl3945-base.c +++ b/drivers/net/wireless/iwlegacy/iwl3945-base.c | |||
@@ -841,7 +841,7 @@ static void iwl3945_rx_card_state_notif(struct iwl_priv *priv, | |||
841 | wiphy_rfkill_set_hw_state(priv->hw->wiphy, | 841 | wiphy_rfkill_set_hw_state(priv->hw->wiphy, |
842 | test_bit(STATUS_RF_KILL_HW, &priv->status)); | 842 | test_bit(STATUS_RF_KILL_HW, &priv->status)); |
843 | else | 843 | else |
844 | wake_up_interruptible(&priv->wait_command_queue); | 844 | wake_up(&priv->wait_command_queue); |
845 | } | 845 | } |
846 | 846 | ||
847 | /** | 847 | /** |
@@ -2269,7 +2269,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv) | |||
2269 | iwl3945_reg_txpower_periodic(priv); | 2269 | iwl3945_reg_txpower_periodic(priv); |
2270 | 2270 | ||
2271 | IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); | 2271 | IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); |
2272 | wake_up_interruptible(&priv->wait_command_queue); | 2272 | wake_up(&priv->wait_command_queue); |
2273 | 2273 | ||
2274 | return; | 2274 | return; |
2275 | 2275 | ||
@@ -2300,7 +2300,7 @@ static void __iwl3945_down(struct iwl_priv *priv) | |||
2300 | iwl_legacy_clear_driver_stations(priv); | 2300 | iwl_legacy_clear_driver_stations(priv); |
2301 | 2301 | ||
2302 | /* Unblock any waiting calls */ | 2302 | /* Unblock any waiting calls */ |
2303 | wake_up_interruptible_all(&priv->wait_command_queue); | 2303 | wake_up_all(&priv->wait_command_queue); |
2304 | 2304 | ||
2305 | /* Wipe out the EXIT_PENDING status bit if we are not actually | 2305 | /* Wipe out the EXIT_PENDING status bit if we are not actually |
2306 | * exiting the module */ | 2306 | * exiting the module */ |
@@ -2853,7 +2853,7 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw) | |||
2853 | 2853 | ||
2854 | /* Wait for START_ALIVE from ucode. Otherwise callbacks from | 2854 | /* Wait for START_ALIVE from ucode. Otherwise callbacks from |
2855 | * mac80211 will not be run successfully. */ | 2855 | * mac80211 will not be run successfully. */ |
2856 | ret = wait_event_interruptible_timeout(priv->wait_command_queue, | 2856 | ret = wait_event_timeout(priv->wait_command_queue, |
2857 | test_bit(STATUS_READY, &priv->status), | 2857 | test_bit(STATUS_READY, &priv->status), |
2858 | UCODE_READY_TIMEOUT); | 2858 | UCODE_READY_TIMEOUT); |
2859 | if (!ret) { | 2859 | if (!ret) { |
diff --git a/drivers/net/wireless/iwlegacy/iwl4965-base.c b/drivers/net/wireless/iwlegacy/iwl4965-base.c index 14334668034e..aa0c2539761e 100644 --- a/drivers/net/wireless/iwlegacy/iwl4965-base.c +++ b/drivers/net/wireless/iwlegacy/iwl4965-base.c | |||
@@ -576,7 +576,7 @@ static void iwl4965_rx_card_state_notif(struct iwl_priv *priv, | |||
576 | wiphy_rfkill_set_hw_state(priv->hw->wiphy, | 576 | wiphy_rfkill_set_hw_state(priv->hw->wiphy, |
577 | test_bit(STATUS_RF_KILL_HW, &priv->status)); | 577 | test_bit(STATUS_RF_KILL_HW, &priv->status)); |
578 | else | 578 | else |
579 | wake_up_interruptible(&priv->wait_command_queue); | 579 | wake_up(&priv->wait_command_queue); |
580 | } | 580 | } |
581 | 581 | ||
582 | /** | 582 | /** |
@@ -926,7 +926,7 @@ static void iwl4965_irq_tasklet(struct iwl_priv *priv) | |||
926 | handled |= CSR_INT_BIT_FH_TX; | 926 | handled |= CSR_INT_BIT_FH_TX; |
927 | /* Wake up uCode load routine, now that load is complete */ | 927 | /* Wake up uCode load routine, now that load is complete */ |
928 | priv->ucode_write_complete = 1; | 928 | priv->ucode_write_complete = 1; |
929 | wake_up_interruptible(&priv->wait_command_queue); | 929 | wake_up(&priv->wait_command_queue); |
930 | } | 930 | } |
931 | 931 | ||
932 | if (inta & ~handled) { | 932 | if (inta & ~handled) { |
@@ -1795,7 +1795,7 @@ static void iwl4965_alive_start(struct iwl_priv *priv) | |||
1795 | iwl4965_rf_kill_ct_config(priv); | 1795 | iwl4965_rf_kill_ct_config(priv); |
1796 | 1796 | ||
1797 | IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); | 1797 | IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); |
1798 | wake_up_interruptible(&priv->wait_command_queue); | 1798 | wake_up(&priv->wait_command_queue); |
1799 | 1799 | ||
1800 | iwl_legacy_power_update_mode(priv, true); | 1800 | iwl_legacy_power_update_mode(priv, true); |
1801 | IWL_DEBUG_INFO(priv, "Updated power mode\n"); | 1801 | IWL_DEBUG_INFO(priv, "Updated power mode\n"); |
@@ -1828,7 +1828,7 @@ static void __iwl4965_down(struct iwl_priv *priv) | |||
1828 | iwl_legacy_clear_driver_stations(priv); | 1828 | iwl_legacy_clear_driver_stations(priv); |
1829 | 1829 | ||
1830 | /* Unblock any waiting calls */ | 1830 | /* Unblock any waiting calls */ |
1831 | wake_up_interruptible_all(&priv->wait_command_queue); | 1831 | wake_up_all(&priv->wait_command_queue); |
1832 | 1832 | ||
1833 | /* Wipe out the EXIT_PENDING status bit if we are not actually | 1833 | /* Wipe out the EXIT_PENDING status bit if we are not actually |
1834 | * exiting the module */ | 1834 | * exiting the module */ |
@@ -2266,7 +2266,7 @@ int iwl4965_mac_start(struct ieee80211_hw *hw) | |||
2266 | 2266 | ||
2267 | /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from | 2267 | /* Wait for START_ALIVE from Run Time ucode. Otherwise callbacks from |
2268 | * mac80211 will not be run successfully. */ | 2268 | * mac80211 will not be run successfully. */ |
2269 | ret = wait_event_interruptible_timeout(priv->wait_command_queue, | 2269 | ret = wait_event_timeout(priv->wait_command_queue, |
2270 | test_bit(STATUS_READY, &priv->status), | 2270 | test_bit(STATUS_READY, &priv->status), |
2271 | UCODE_READY_TIMEOUT); | 2271 | UCODE_READY_TIMEOUT); |
2272 | if (!ret) { | 2272 | if (!ret) { |
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c index dd6937e97055..77e528f5db88 100644 --- a/drivers/net/wireless/iwlwifi/iwl-scan.c +++ b/drivers/net/wireless/iwlwifi/iwl-scan.c | |||
@@ -405,31 +405,33 @@ int iwl_mac_hw_scan(struct ieee80211_hw *hw, | |||
405 | 405 | ||
406 | mutex_lock(&priv->mutex); | 406 | mutex_lock(&priv->mutex); |
407 | 407 | ||
408 | if (test_bit(STATUS_SCANNING, &priv->status) && | ||
409 | priv->scan_type != IWL_SCAN_NORMAL) { | ||
410 | IWL_DEBUG_SCAN(priv, "Scan already in progress.\n"); | ||
411 | ret = -EAGAIN; | ||
412 | goto out_unlock; | ||
413 | } | ||
414 | |||
415 | /* mac80211 will only ask for one band at a time */ | ||
416 | priv->scan_request = req; | ||
417 | priv->scan_vif = vif; | ||
418 | |||
419 | /* | 408 | /* |
420 | * If an internal scan is in progress, just set | 409 | * If an internal scan is in progress, just set |
421 | * up the scan_request as per above. | 410 | * up the scan_request as per above. |
422 | */ | 411 | */ |
423 | if (priv->scan_type != IWL_SCAN_NORMAL) { | 412 | if (priv->scan_type != IWL_SCAN_NORMAL) { |
424 | IWL_DEBUG_SCAN(priv, "SCAN request during internal scan\n"); | 413 | IWL_DEBUG_SCAN(priv, |
414 | "SCAN request during internal scan - defer\n"); | ||
415 | priv->scan_request = req; | ||
416 | priv->scan_vif = vif; | ||
425 | ret = 0; | 417 | ret = 0; |
426 | } else | 418 | } else { |
419 | priv->scan_request = req; | ||
420 | priv->scan_vif = vif; | ||
421 | /* | ||
422 | * mac80211 will only ask for one band at a time | ||
423 | * so using channels[0] here is ok | ||
424 | */ | ||
427 | ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL, | 425 | ret = iwl_scan_initiate(priv, vif, IWL_SCAN_NORMAL, |
428 | req->channels[0]->band); | 426 | req->channels[0]->band); |
427 | if (ret) { | ||
428 | priv->scan_request = NULL; | ||
429 | priv->scan_vif = NULL; | ||
430 | } | ||
431 | } | ||
429 | 432 | ||
430 | IWL_DEBUG_MAC80211(priv, "leave\n"); | 433 | IWL_DEBUG_MAC80211(priv, "leave\n"); |
431 | 434 | ||
432 | out_unlock: | ||
433 | mutex_unlock(&priv->mutex); | 435 | mutex_unlock(&priv->mutex); |
434 | 436 | ||
435 | return ret; | 437 | return ret; |
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c index 8b1cef0ffde6..4bf3cf457ef0 100644 --- a/drivers/net/wireless/rtlwifi/usb.c +++ b/drivers/net/wireless/rtlwifi/usb.c | |||
@@ -863,6 +863,7 @@ static void _rtl_usb_tx_preprocess(struct ieee80211_hw *hw, struct sk_buff *skb, | |||
863 | u8 tid = 0; | 863 | u8 tid = 0; |
864 | u16 seq_number = 0; | 864 | u16 seq_number = 0; |
865 | 865 | ||
866 | memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc)); | ||
866 | if (ieee80211_is_auth(fc)) { | 867 | if (ieee80211_is_auth(fc)) { |
867 | RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n")); | 868 | RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n")); |
868 | rtl_ips_nic_on(hw); | 869 | rtl_ips_nic_on(hw); |
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 0ca86f9ec4ed..182562952c79 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c | |||
@@ -327,12 +327,12 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref, | |||
327 | xenvif_get(vif); | 327 | xenvif_get(vif); |
328 | 328 | ||
329 | rtnl_lock(); | 329 | rtnl_lock(); |
330 | if (netif_running(vif->dev)) | ||
331 | xenvif_up(vif); | ||
332 | if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) | 330 | if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN) |
333 | dev_set_mtu(vif->dev, ETH_DATA_LEN); | 331 | dev_set_mtu(vif->dev, ETH_DATA_LEN); |
334 | netdev_update_features(vif->dev); | 332 | netdev_update_features(vif->dev); |
335 | netif_carrier_on(vif->dev); | 333 | netif_carrier_on(vif->dev); |
334 | if (netif_running(vif->dev)) | ||
335 | xenvif_up(vif); | ||
336 | rtnl_unlock(); | 336 | rtnl_unlock(); |
337 | 337 | ||
338 | return 0; | 338 | return 0; |
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 4e84fd4a4312..e9651f0a8817 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c | |||
@@ -77,7 +77,7 @@ unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE; | |||
77 | unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; | 77 | unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE; |
78 | unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; | 78 | unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE; |
79 | 79 | ||
80 | enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE; | 80 | enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF; |
81 | 81 | ||
82 | /* | 82 | /* |
83 | * The default CLS is used if arch didn't set CLS explicitly and not | 83 | * The default CLS is used if arch didn't set CLS explicitly and not |
@@ -3568,10 +3568,14 @@ static int __init pci_setup(char *str) | |||
3568 | pci_hotplug_io_size = memparse(str + 9, &str); | 3568 | pci_hotplug_io_size = memparse(str + 9, &str); |
3569 | } else if (!strncmp(str, "hpmemsize=", 10)) { | 3569 | } else if (!strncmp(str, "hpmemsize=", 10)) { |
3570 | pci_hotplug_mem_size = memparse(str + 10, &str); | 3570 | pci_hotplug_mem_size = memparse(str + 10, &str); |
3571 | } else if (!strncmp(str, "pcie_bus_tune_off", 17)) { | ||
3572 | pcie_bus_config = PCIE_BUS_TUNE_OFF; | ||
3571 | } else if (!strncmp(str, "pcie_bus_safe", 13)) { | 3573 | } else if (!strncmp(str, "pcie_bus_safe", 13)) { |
3572 | pcie_bus_config = PCIE_BUS_SAFE; | 3574 | pcie_bus_config = PCIE_BUS_SAFE; |
3573 | } else if (!strncmp(str, "pcie_bus_perf", 13)) { | 3575 | } else if (!strncmp(str, "pcie_bus_perf", 13)) { |
3574 | pcie_bus_config = PCIE_BUS_PERFORMANCE; | 3576 | pcie_bus_config = PCIE_BUS_PERFORMANCE; |
3577 | } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) { | ||
3578 | pcie_bus_config = PCIE_BUS_PEER2PEER; | ||
3575 | } else { | 3579 | } else { |
3576 | printk(KERN_ERR "PCI: Unknown option `%s'\n", | 3580 | printk(KERN_ERR "PCI: Unknown option `%s'\n", |
3577 | str); | 3581 | str); |
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c index f3f94a5c068f..6ab6bd3df4b2 100644 --- a/drivers/pci/probe.c +++ b/drivers/pci/probe.c | |||
@@ -1458,12 +1458,24 @@ static int pcie_bus_configure_set(struct pci_dev *dev, void *data) | |||
1458 | */ | 1458 | */ |
1459 | void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss) | 1459 | void pcie_bus_configure_settings(struct pci_bus *bus, u8 mpss) |
1460 | { | 1460 | { |
1461 | u8 smpss = mpss; | 1461 | u8 smpss; |
1462 | 1462 | ||
1463 | if (!pci_is_pcie(bus->self)) | 1463 | if (!pci_is_pcie(bus->self)) |
1464 | return; | 1464 | return; |
1465 | 1465 | ||
1466 | if (pcie_bus_config == PCIE_BUS_TUNE_OFF) | ||
1467 | return; | ||
1468 | |||
1469 | /* FIXME - Peer to peer DMA is possible, though the endpoint would need | ||
1470 | * to be aware to the MPS of the destination. To work around this, | ||
1471 | * simply force the MPS of the entire system to the smallest possible. | ||
1472 | */ | ||
1473 | if (pcie_bus_config == PCIE_BUS_PEER2PEER) | ||
1474 | smpss = 0; | ||
1475 | |||
1466 | if (pcie_bus_config == PCIE_BUS_SAFE) { | 1476 | if (pcie_bus_config == PCIE_BUS_SAFE) { |
1477 | smpss = mpss; | ||
1478 | |||
1467 | pcie_find_smpss(bus->self, &smpss); | 1479 | pcie_find_smpss(bus->self, &smpss); |
1468 | pci_walk_bus(bus, pcie_find_smpss, &smpss); | 1480 | pci_walk_bus(bus, pcie_find_smpss, &smpss); |
1469 | } | 1481 | } |
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index cbde448f9947..eb3140ee821e 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c | |||
@@ -654,8 +654,8 @@ static struct io_subchannel_private console_priv; | |||
654 | static int console_subchannel_in_use; | 654 | static int console_subchannel_in_use; |
655 | 655 | ||
656 | /* | 656 | /* |
657 | * Use tpi to get a pending interrupt, call the interrupt handler and | 657 | * Use cio_tpi to get a pending interrupt and call the interrupt handler. |
658 | * return a pointer to the subchannel structure. | 658 | * Return non-zero if an interrupt was processed, zero otherwise. |
659 | */ | 659 | */ |
660 | static int cio_tpi(void) | 660 | static int cio_tpi(void) |
661 | { | 661 | { |
@@ -667,6 +667,10 @@ static int cio_tpi(void) | |||
667 | tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id; | 667 | tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id; |
668 | if (tpi(NULL) != 1) | 668 | if (tpi(NULL) != 1) |
669 | return 0; | 669 | return 0; |
670 | if (tpi_info->adapter_IO) { | ||
671 | do_adapter_IO(tpi_info->isc); | ||
672 | return 1; | ||
673 | } | ||
670 | irb = (struct irb *)&S390_lowcore.irb; | 674 | irb = (struct irb *)&S390_lowcore.irb; |
671 | /* Store interrupt response block to lowcore. */ | 675 | /* Store interrupt response block to lowcore. */ |
672 | if (tsch(tpi_info->schid, irb) != 0) | 676 | if (tsch(tpi_info->schid, irb) != 0) |
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c index b7bd5b0cc7aa..3868ab2397c6 100644 --- a/drivers/scsi/3w-9xxx.c +++ b/drivers/scsi/3w-9xxx.c | |||
@@ -1800,10 +1800,12 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_ | |||
1800 | switch (retval) { | 1800 | switch (retval) { |
1801 | case SCSI_MLQUEUE_HOST_BUSY: | 1801 | case SCSI_MLQUEUE_HOST_BUSY: |
1802 | twa_free_request_id(tw_dev, request_id); | 1802 | twa_free_request_id(tw_dev, request_id); |
1803 | twa_unmap_scsi_data(tw_dev, request_id); | ||
1803 | break; | 1804 | break; |
1804 | case 1: | 1805 | case 1: |
1805 | tw_dev->state[request_id] = TW_S_COMPLETED; | 1806 | tw_dev->state[request_id] = TW_S_COMPLETED; |
1806 | twa_free_request_id(tw_dev, request_id); | 1807 | twa_free_request_id(tw_dev, request_id); |
1808 | twa_unmap_scsi_data(tw_dev, request_id); | ||
1807 | SCpnt->result = (DID_ERROR << 16); | 1809 | SCpnt->result = (DID_ERROR << 16); |
1808 | done(SCpnt); | 1810 | done(SCpnt); |
1809 | retval = 0; | 1811 | retval = 0; |
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 8d9dae89f065..3878b7395081 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
@@ -837,6 +837,7 @@ config SCSI_ISCI | |||
837 | # (temporary): known alpha quality driver | 837 | # (temporary): known alpha quality driver |
838 | depends on EXPERIMENTAL | 838 | depends on EXPERIMENTAL |
839 | select SCSI_SAS_LIBSAS | 839 | select SCSI_SAS_LIBSAS |
840 | select SCSI_SAS_HOST_SMP | ||
840 | ---help--- | 841 | ---help--- |
841 | This driver supports the 6Gb/s SAS capabilities of the storage | 842 | This driver supports the 6Gb/s SAS capabilities of the storage |
842 | control unit found in the Intel(R) C600 series chipset. | 843 | control unit found in the Intel(R) C600 series chipset. |
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 3c08f5352b2d..6153a66a8a31 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile | |||
@@ -88,7 +88,7 @@ obj-$(CONFIG_SCSI_QLOGIC_FAS) += qlogicfas408.o qlogicfas.o | |||
88 | obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o | 88 | obj-$(CONFIG_PCMCIA_QLOGIC) += qlogicfas408.o |
89 | obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o | 89 | obj-$(CONFIG_SCSI_QLOGIC_1280) += qla1280.o |
90 | obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/ | 90 | obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx/ |
91 | obj-$(CONFIG_SCSI_QLA_ISCSI) += qla4xxx/ | 91 | obj-$(CONFIG_SCSI_QLA_ISCSI) += libiscsi.o qla4xxx/ |
92 | obj-$(CONFIG_SCSI_LPFC) += lpfc/ | 92 | obj-$(CONFIG_SCSI_LPFC) += lpfc/ |
93 | obj-$(CONFIG_SCSI_BFA_FC) += bfa/ | 93 | obj-$(CONFIG_SCSI_BFA_FC) += bfa/ |
94 | obj-$(CONFIG_SCSI_PAS16) += pas16.o | 94 | obj-$(CONFIG_SCSI_PAS16) += pas16.o |
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c index e7d0d47b9185..e5f2d7d9002e 100644 --- a/drivers/scsi/aacraid/commsup.c +++ b/drivers/scsi/aacraid/commsup.c | |||
@@ -1283,6 +1283,8 @@ static int _aac_reset_adapter(struct aac_dev *aac, int forced) | |||
1283 | kfree(aac->queues); | 1283 | kfree(aac->queues); |
1284 | aac->queues = NULL; | 1284 | aac->queues = NULL; |
1285 | free_irq(aac->pdev->irq, aac); | 1285 | free_irq(aac->pdev->irq, aac); |
1286 | if (aac->msi) | ||
1287 | pci_disable_msi(aac->pdev); | ||
1286 | kfree(aac->fsa_dev); | 1288 | kfree(aac->fsa_dev); |
1287 | aac->fsa_dev = NULL; | 1289 | aac->fsa_dev = NULL; |
1288 | quirks = aac_get_driver_ident(index)->quirks; | 1290 | quirks = aac_get_driver_ident(index)->quirks; |
diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index bd22041e2789..f58644850333 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c | |||
@@ -913,7 +913,7 @@ static void l2t_put(struct cxgbi_sock *csk) | |||
913 | struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev; | 913 | struct t3cdev *t3dev = (struct t3cdev *)csk->cdev->lldev; |
914 | 914 | ||
915 | if (csk->l2t) { | 915 | if (csk->l2t) { |
916 | l2t_release(L2DATA(t3dev), csk->l2t); | 916 | l2t_release(t3dev, csk->l2t); |
917 | csk->l2t = NULL; | 917 | csk->l2t = NULL; |
918 | cxgbi_sock_put(csk); | 918 | cxgbi_sock_put(csk); |
919 | } | 919 | } |
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index f84084bba2f0..c9e3dc024bc3 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c | |||
@@ -1721,7 +1721,7 @@ static int sas_find_bcast_dev(struct domain_device *dev, | |||
1721 | list_for_each_entry(ch, &ex->children, siblings) { | 1721 | list_for_each_entry(ch, &ex->children, siblings) { |
1722 | if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) { | 1722 | if (ch->dev_type == EDGE_DEV || ch->dev_type == FANOUT_DEV) { |
1723 | res = sas_find_bcast_dev(ch, src_dev); | 1723 | res = sas_find_bcast_dev(ch, src_dev); |
1724 | if (src_dev) | 1724 | if (*src_dev) |
1725 | return res; | 1725 | return res; |
1726 | } | 1726 | } |
1727 | } | 1727 | } |
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 646fc5263d50..8a7591f035e6 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c | |||
@@ -1507,8 +1507,8 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) | |||
1507 | 1507 | ||
1508 | if (k != blocks_done) { | 1508 | if (k != blocks_done) { |
1509 | qla_printk(KERN_WARNING, sp->fcport->vha->hw, | 1509 | qla_printk(KERN_WARNING, sp->fcport->vha->hw, |
1510 | "unexpected tag values tag:lba=%x:%lx)\n", | 1510 | "unexpected tag values tag:lba=%x:%llx)\n", |
1511 | e_ref_tag, lba_s); | 1511 | e_ref_tag, (unsigned long long)lba_s); |
1512 | return 1; | 1512 | return 1; |
1513 | } | 1513 | } |
1514 | 1514 | ||
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c index d2407558773f..24cacff57786 100644 --- a/drivers/spi/spi-fsl-spi.c +++ b/drivers/spi/spi-fsl-spi.c | |||
@@ -825,6 +825,9 @@ static void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi) | |||
825 | { | 825 | { |
826 | struct device *dev = mspi->dev; | 826 | struct device *dev = mspi->dev; |
827 | 827 | ||
828 | if (!(mspi->flags & SPI_CPM_MODE)) | ||
829 | return; | ||
830 | |||
828 | dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE); | 831 | dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE); |
829 | dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE); | 832 | dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE); |
830 | cpm_muram_free(cpm_muram_offset(mspi->tx_bd)); | 833 | cpm_muram_free(cpm_muram_offset(mspi->tx_bd)); |
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 8ac6542aedcd..fa594d604aca 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c | |||
@@ -786,9 +786,11 @@ static int __devinit spi_imx_probe(struct platform_device *pdev) | |||
786 | int cs_gpio = of_get_named_gpio(np, "cs-gpios", i); | 786 | int cs_gpio = of_get_named_gpio(np, "cs-gpios", i); |
787 | if (cs_gpio < 0) | 787 | if (cs_gpio < 0) |
788 | cs_gpio = mxc_platform_info->chipselect[i]; | 788 | cs_gpio = mxc_platform_info->chipselect[i]; |
789 | |||
790 | spi_imx->chipselect[i] = cs_gpio; | ||
789 | if (cs_gpio < 0) | 791 | if (cs_gpio < 0) |
790 | continue; | 792 | continue; |
791 | spi_imx->chipselect[i] = cs_gpio; | 793 | |
792 | ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME); | 794 | ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME); |
793 | if (ret) { | 795 | if (ret) { |
794 | while (i > 0) { | 796 | while (i > 0) { |
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c index 1d23f3831866..6a80749391db 100644 --- a/drivers/spi/spi-topcliff-pch.c +++ b/drivers/spi/spi-topcliff-pch.c | |||
@@ -50,6 +50,8 @@ | |||
50 | #define PCH_RX_THOLD 7 | 50 | #define PCH_RX_THOLD 7 |
51 | #define PCH_RX_THOLD_MAX 15 | 51 | #define PCH_RX_THOLD_MAX 15 |
52 | 52 | ||
53 | #define PCH_TX_THOLD 2 | ||
54 | |||
53 | #define PCH_MAX_BAUDRATE 5000000 | 55 | #define PCH_MAX_BAUDRATE 5000000 |
54 | #define PCH_MAX_FIFO_DEPTH 16 | 56 | #define PCH_MAX_FIFO_DEPTH 16 |
55 | 57 | ||
@@ -58,6 +60,7 @@ | |||
58 | #define PCH_SLEEP_TIME 10 | 60 | #define PCH_SLEEP_TIME 10 |
59 | 61 | ||
60 | #define SSN_LOW 0x02U | 62 | #define SSN_LOW 0x02U |
63 | #define SSN_HIGH 0x03U | ||
61 | #define SSN_NO_CONTROL 0x00U | 64 | #define SSN_NO_CONTROL 0x00U |
62 | #define PCH_MAX_CS 0xFF | 65 | #define PCH_MAX_CS 0xFF |
63 | #define PCI_DEVICE_ID_GE_SPI 0x8816 | 66 | #define PCI_DEVICE_ID_GE_SPI 0x8816 |
@@ -316,16 +319,19 @@ static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val, | |||
316 | 319 | ||
317 | /* if transfer complete interrupt */ | 320 | /* if transfer complete interrupt */ |
318 | if (reg_spsr_val & SPSR_FI_BIT) { | 321 | if (reg_spsr_val & SPSR_FI_BIT) { |
319 | if (tx_index < bpw_len) | 322 | if ((tx_index == bpw_len) && (rx_index == tx_index)) { |
323 | /* disable interrupts */ | ||
324 | pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL); | ||
325 | |||
326 | /* transfer is completed; | ||
327 | inform pch_spi_process_messages */ | ||
328 | data->transfer_complete = true; | ||
329 | data->transfer_active = false; | ||
330 | wake_up(&data->wait); | ||
331 | } else { | ||
320 | dev_err(&data->master->dev, | 332 | dev_err(&data->master->dev, |
321 | "%s : Transfer is not completed", __func__); | 333 | "%s : Transfer is not completed", __func__); |
322 | /* disable interrupts */ | 334 | } |
323 | pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL); | ||
324 | |||
325 | /* transfer is completed;inform pch_spi_process_messages */ | ||
326 | data->transfer_complete = true; | ||
327 | data->transfer_active = false; | ||
328 | wake_up(&data->wait); | ||
329 | } | 335 | } |
330 | } | 336 | } |
331 | 337 | ||
@@ -348,16 +354,26 @@ static irqreturn_t pch_spi_handler(int irq, void *dev_id) | |||
348 | "%s returning due to suspend\n", __func__); | 354 | "%s returning due to suspend\n", __func__); |
349 | return IRQ_NONE; | 355 | return IRQ_NONE; |
350 | } | 356 | } |
351 | if (data->use_dma) | ||
352 | return IRQ_NONE; | ||
353 | 357 | ||
354 | io_remap_addr = data->io_remap_addr; | 358 | io_remap_addr = data->io_remap_addr; |
355 | spsr = io_remap_addr + PCH_SPSR; | 359 | spsr = io_remap_addr + PCH_SPSR; |
356 | 360 | ||
357 | reg_spsr_val = ioread32(spsr); | 361 | reg_spsr_val = ioread32(spsr); |
358 | 362 | ||
359 | if (reg_spsr_val & SPSR_ORF_BIT) | 363 | if (reg_spsr_val & SPSR_ORF_BIT) { |
360 | dev_err(&board_dat->pdev->dev, "%s Over run error", __func__); | 364 | dev_err(&board_dat->pdev->dev, "%s Over run error\n", __func__); |
365 | if (data->current_msg->complete != 0) { | ||
366 | data->transfer_complete = true; | ||
367 | data->current_msg->status = -EIO; | ||
368 | data->current_msg->complete(data->current_msg->context); | ||
369 | data->bcurrent_msg_processing = false; | ||
370 | data->current_msg = NULL; | ||
371 | data->cur_trans = NULL; | ||
372 | } | ||
373 | } | ||
374 | |||
375 | if (data->use_dma) | ||
376 | return IRQ_NONE; | ||
361 | 377 | ||
362 | /* Check if the interrupt is for SPI device */ | 378 | /* Check if the interrupt is for SPI device */ |
363 | if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) { | 379 | if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) { |
@@ -756,10 +772,6 @@ static void pch_spi_set_ir(struct pch_spi_data *data) | |||
756 | 772 | ||
757 | wait_event_interruptible(data->wait, data->transfer_complete); | 773 | wait_event_interruptible(data->wait, data->transfer_complete); |
758 | 774 | ||
759 | pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL); | ||
760 | dev_dbg(&data->master->dev, | ||
761 | "%s:no more control over SSN-writing 0 to SSNXCR.", __func__); | ||
762 | |||
763 | /* clear all interrupts */ | 775 | /* clear all interrupts */ |
764 | pch_spi_writereg(data->master, PCH_SPSR, | 776 | pch_spi_writereg(data->master, PCH_SPSR, |
765 | pch_spi_readreg(data->master, PCH_SPSR)); | 777 | pch_spi_readreg(data->master, PCH_SPSR)); |
@@ -815,10 +827,11 @@ static void pch_spi_copy_rx_data_for_dma(struct pch_spi_data *data, int bpw) | |||
815 | } | 827 | } |
816 | } | 828 | } |
817 | 829 | ||
818 | static void pch_spi_start_transfer(struct pch_spi_data *data) | 830 | static int pch_spi_start_transfer(struct pch_spi_data *data) |
819 | { | 831 | { |
820 | struct pch_spi_dma_ctrl *dma; | 832 | struct pch_spi_dma_ctrl *dma; |
821 | unsigned long flags; | 833 | unsigned long flags; |
834 | int rtn; | ||
822 | 835 | ||
823 | dma = &data->dma; | 836 | dma = &data->dma; |
824 | 837 | ||
@@ -833,19 +846,23 @@ static void pch_spi_start_transfer(struct pch_spi_data *data) | |||
833 | initiating the transfer. */ | 846 | initiating the transfer. */ |
834 | dev_dbg(&data->master->dev, | 847 | dev_dbg(&data->master->dev, |
835 | "%s:waiting for transfer to get over\n", __func__); | 848 | "%s:waiting for transfer to get over\n", __func__); |
836 | wait_event_interruptible(data->wait, data->transfer_complete); | 849 | rtn = wait_event_interruptible_timeout(data->wait, |
850 | data->transfer_complete, | ||
851 | msecs_to_jiffies(2 * HZ)); | ||
837 | 852 | ||
838 | dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent, | 853 | dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent, |
839 | DMA_FROM_DEVICE); | 854 | DMA_FROM_DEVICE); |
855 | |||
856 | dma_sync_sg_for_cpu(&data->master->dev, dma->sg_tx_p, dma->nent, | ||
857 | DMA_FROM_DEVICE); | ||
858 | memset(data->dma.tx_buf_virt, 0, PAGE_SIZE); | ||
859 | |||
840 | async_tx_ack(dma->desc_rx); | 860 | async_tx_ack(dma->desc_rx); |
841 | async_tx_ack(dma->desc_tx); | 861 | async_tx_ack(dma->desc_tx); |
842 | kfree(dma->sg_tx_p); | 862 | kfree(dma->sg_tx_p); |
843 | kfree(dma->sg_rx_p); | 863 | kfree(dma->sg_rx_p); |
844 | 864 | ||
845 | spin_lock_irqsave(&data->lock, flags); | 865 | spin_lock_irqsave(&data->lock, flags); |
846 | pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL); | ||
847 | dev_dbg(&data->master->dev, | ||
848 | "%s:no more control over SSN-writing 0 to SSNXCR.", __func__); | ||
849 | 866 | ||
850 | /* clear fifo threshold, disable interrupts, disable SPI transfer */ | 867 | /* clear fifo threshold, disable interrupts, disable SPI transfer */ |
851 | pch_spi_setclr_reg(data->master, PCH_SPCR, 0, | 868 | pch_spi_setclr_reg(data->master, PCH_SPCR, 0, |
@@ -858,6 +875,8 @@ static void pch_spi_start_transfer(struct pch_spi_data *data) | |||
858 | pch_spi_clear_fifo(data->master); | 875 | pch_spi_clear_fifo(data->master); |
859 | 876 | ||
860 | spin_unlock_irqrestore(&data->lock, flags); | 877 | spin_unlock_irqrestore(&data->lock, flags); |
878 | |||
879 | return rtn; | ||
861 | } | 880 | } |
862 | 881 | ||
863 | static void pch_dma_rx_complete(void *arg) | 882 | static void pch_dma_rx_complete(void *arg) |
@@ -1023,8 +1042,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw) | |||
1023 | /* set receive fifo threshold and transmit fifo threshold */ | 1042 | /* set receive fifo threshold and transmit fifo threshold */ |
1024 | pch_spi_setclr_reg(data->master, PCH_SPCR, | 1043 | pch_spi_setclr_reg(data->master, PCH_SPCR, |
1025 | ((size - 1) << SPCR_RFIC_FIELD) | | 1044 | ((size - 1) << SPCR_RFIC_FIELD) | |
1026 | ((PCH_MAX_FIFO_DEPTH - PCH_DMA_TRANS_SIZE) << | 1045 | (PCH_TX_THOLD << SPCR_TFIC_FIELD), |
1027 | SPCR_TFIC_FIELD), | ||
1028 | MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS); | 1046 | MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS); |
1029 | 1047 | ||
1030 | spin_unlock_irqrestore(&data->lock, flags); | 1048 | spin_unlock_irqrestore(&data->lock, flags); |
@@ -1035,13 +1053,20 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw) | |||
1035 | /* offset, length setting */ | 1053 | /* offset, length setting */ |
1036 | sg = dma->sg_rx_p; | 1054 | sg = dma->sg_rx_p; |
1037 | for (i = 0; i < num; i++, sg++) { | 1055 | for (i = 0; i < num; i++, sg++) { |
1038 | if (i == 0) { | 1056 | if (i == (num - 2)) { |
1039 | sg->offset = 0; | 1057 | sg->offset = size * i; |
1058 | sg->offset = sg->offset * (*bpw / 8); | ||
1040 | sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem, | 1059 | sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem, |
1041 | sg->offset); | 1060 | sg->offset); |
1042 | sg_dma_len(sg) = rem; | 1061 | sg_dma_len(sg) = rem; |
1062 | } else if (i == (num - 1)) { | ||
1063 | sg->offset = size * (i - 1) + rem; | ||
1064 | sg->offset = sg->offset * (*bpw / 8); | ||
1065 | sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size, | ||
1066 | sg->offset); | ||
1067 | sg_dma_len(sg) = size; | ||
1043 | } else { | 1068 | } else { |
1044 | sg->offset = rem + size * (i - 1); | 1069 | sg->offset = size * i; |
1045 | sg->offset = sg->offset * (*bpw / 8); | 1070 | sg->offset = sg->offset * (*bpw / 8); |
1046 | sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size, | 1071 | sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size, |
1047 | sg->offset); | 1072 | sg->offset); |
@@ -1065,6 +1090,16 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw) | |||
1065 | dma->desc_rx = desc_rx; | 1090 | dma->desc_rx = desc_rx; |
1066 | 1091 | ||
1067 | /* TX */ | 1092 | /* TX */ |
1093 | if (data->bpw_len > PCH_DMA_TRANS_SIZE) { | ||
1094 | num = data->bpw_len / PCH_DMA_TRANS_SIZE; | ||
1095 | size = PCH_DMA_TRANS_SIZE; | ||
1096 | rem = 16; | ||
1097 | } else { | ||
1098 | num = 1; | ||
1099 | size = data->bpw_len; | ||
1100 | rem = data->bpw_len; | ||
1101 | } | ||
1102 | |||
1068 | dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC); | 1103 | dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC); |
1069 | sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */ | 1104 | sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */ |
1070 | /* offset, length setting */ | 1105 | /* offset, length setting */ |
@@ -1162,6 +1197,7 @@ static void pch_spi_process_messages(struct work_struct *pwork) | |||
1162 | if (data->use_dma) | 1197 | if (data->use_dma) |
1163 | pch_spi_request_dma(data, | 1198 | pch_spi_request_dma(data, |
1164 | data->current_msg->spi->bits_per_word); | 1199 | data->current_msg->spi->bits_per_word); |
1200 | pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL); | ||
1165 | do { | 1201 | do { |
1166 | /* If we are already processing a message get the next | 1202 | /* If we are already processing a message get the next |
1167 | transfer structure from the message otherwise retrieve | 1203 | transfer structure from the message otherwise retrieve |
@@ -1184,7 +1220,8 @@ static void pch_spi_process_messages(struct work_struct *pwork) | |||
1184 | 1220 | ||
1185 | if (data->use_dma) { | 1221 | if (data->use_dma) { |
1186 | pch_spi_handle_dma(data, &bpw); | 1222 | pch_spi_handle_dma(data, &bpw); |
1187 | pch_spi_start_transfer(data); | 1223 | if (!pch_spi_start_transfer(data)) |
1224 | goto out; | ||
1188 | pch_spi_copy_rx_data_for_dma(data, bpw); | 1225 | pch_spi_copy_rx_data_for_dma(data, bpw); |
1189 | } else { | 1226 | } else { |
1190 | pch_spi_set_tx(data, &bpw); | 1227 | pch_spi_set_tx(data, &bpw); |
@@ -1222,6 +1259,8 @@ static void pch_spi_process_messages(struct work_struct *pwork) | |||
1222 | 1259 | ||
1223 | } while (data->cur_trans != NULL); | 1260 | } while (data->cur_trans != NULL); |
1224 | 1261 | ||
1262 | out: | ||
1263 | pch_spi_writereg(data->master, PCH_SSNXCR, SSN_HIGH); | ||
1225 | if (data->use_dma) | 1264 | if (data->use_dma) |
1226 | pch_spi_release_dma(data); | 1265 | pch_spi_release_dma(data); |
1227 | } | 1266 | } |
diff --git a/drivers/watchdog/hpwdt.c b/drivers/watchdog/hpwdt.c index 809cbda03d7a..7e7feac05221 100644 --- a/drivers/watchdog/hpwdt.c +++ b/drivers/watchdog/hpwdt.c | |||
@@ -477,15 +477,11 @@ static int hpwdt_time_left(void) | |||
477 | /* | 477 | /* |
478 | * NMI Handler | 478 | * NMI Handler |
479 | */ | 479 | */ |
480 | static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason, | 480 | static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs) |
481 | void *data) | ||
482 | { | 481 | { |
483 | unsigned long rom_pl; | 482 | unsigned long rom_pl; |
484 | static int die_nmi_called; | 483 | static int die_nmi_called; |
485 | 484 | ||
486 | if (ulReason != DIE_NMIUNKNOWN) | ||
487 | goto out; | ||
488 | |||
489 | if (!hpwdt_nmi_decoding) | 485 | if (!hpwdt_nmi_decoding) |
490 | goto out; | 486 | goto out; |
491 | 487 | ||
@@ -508,7 +504,7 @@ static int hpwdt_pretimeout(struct notifier_block *nb, unsigned long ulReason, | |||
508 | "Management Log for details.\n"); | 504 | "Management Log for details.\n"); |
509 | 505 | ||
510 | out: | 506 | out: |
511 | return NOTIFY_OK; | 507 | return NMI_DONE; |
512 | } | 508 | } |
513 | #endif /* CONFIG_HPWDT_NMI_DECODING */ | 509 | #endif /* CONFIG_HPWDT_NMI_DECODING */ |
514 | 510 | ||
@@ -648,13 +644,6 @@ static struct miscdevice hpwdt_miscdev = { | |||
648 | .fops = &hpwdt_fops, | 644 | .fops = &hpwdt_fops, |
649 | }; | 645 | }; |
650 | 646 | ||
651 | #ifdef CONFIG_HPWDT_NMI_DECODING | ||
652 | static struct notifier_block die_notifier = { | ||
653 | .notifier_call = hpwdt_pretimeout, | ||
654 | .priority = 0, | ||
655 | }; | ||
656 | #endif /* CONFIG_HPWDT_NMI_DECODING */ | ||
657 | |||
658 | /* | 647 | /* |
659 | * Init & Exit | 648 | * Init & Exit |
660 | */ | 649 | */ |
@@ -740,10 +729,9 @@ static int __devinit hpwdt_init_nmi_decoding(struct pci_dev *dev) | |||
740 | * die notify list to handle a critical NMI. The default is to | 729 | * die notify list to handle a critical NMI. The default is to |
741 | * be last so other users of the NMI signal can function. | 730 | * be last so other users of the NMI signal can function. |
742 | */ | 731 | */ |
743 | if (priority) | 732 | retval = register_nmi_handler(NMI_UNKNOWN, hpwdt_pretimeout, |
744 | die_notifier.priority = 0x7FFFFFFF; | 733 | (priority) ? NMI_FLAG_FIRST : 0, |
745 | 734 | "hpwdt"); | |
746 | retval = register_die_notifier(&die_notifier); | ||
747 | if (retval != 0) { | 735 | if (retval != 0) { |
748 | dev_warn(&dev->dev, | 736 | dev_warn(&dev->dev, |
749 | "Unable to register a die notifier (err=%d).\n", | 737 | "Unable to register a die notifier (err=%d).\n", |
@@ -763,7 +751,7 @@ static int __devinit hpwdt_init_nmi_decoding(struct pci_dev *dev) | |||
763 | 751 | ||
764 | static void hpwdt_exit_nmi_decoding(void) | 752 | static void hpwdt_exit_nmi_decoding(void) |
765 | { | 753 | { |
766 | unregister_die_notifier(&die_notifier); | 754 | unregister_nmi_handler(NMI_UNKNOWN, "hpwdt"); |
767 | if (cru_rom_addr) | 755 | if (cru_rom_addr) |
768 | iounmap(cru_rom_addr); | 756 | iounmap(cru_rom_addr); |
769 | } | 757 | } |
diff --git a/drivers/zorro/zorro.c b/drivers/zorro/zorro.c index e0c2807b0970..181fa8158a8b 100644 --- a/drivers/zorro/zorro.c +++ b/drivers/zorro/zorro.c | |||
@@ -148,10 +148,10 @@ static int __init amiga_zorro_probe(struct platform_device *pdev) | |||
148 | } | 148 | } |
149 | platform_set_drvdata(pdev, bus); | 149 | platform_set_drvdata(pdev, bus); |
150 | 150 | ||
151 | /* Register all devices */ | ||
152 | pr_info("Zorro: Probing AutoConfig expansion devices: %u device%s\n", | 151 | pr_info("Zorro: Probing AutoConfig expansion devices: %u device%s\n", |
153 | zorro_num_autocon, zorro_num_autocon == 1 ? "" : "s"); | 152 | zorro_num_autocon, zorro_num_autocon == 1 ? "" : "s"); |
154 | 153 | ||
154 | /* First identify all devices ... */ | ||
155 | for (i = 0; i < zorro_num_autocon; i++) { | 155 | for (i = 0; i < zorro_num_autocon; i++) { |
156 | z = &zorro_autocon[i]; | 156 | z = &zorro_autocon[i]; |
157 | z->id = (z->rom.er_Manufacturer<<16) | (z->rom.er_Product<<8); | 157 | z->id = (z->rom.er_Manufacturer<<16) | (z->rom.er_Product<<8); |
@@ -172,6 +172,11 @@ static int __init amiga_zorro_probe(struct platform_device *pdev) | |||
172 | dev_set_name(&z->dev, "%02x", i); | 172 | dev_set_name(&z->dev, "%02x", i); |
173 | z->dev.parent = &bus->dev; | 173 | z->dev.parent = &bus->dev; |
174 | z->dev.bus = &zorro_bus_type; | 174 | z->dev.bus = &zorro_bus_type; |
175 | } | ||
176 | |||
177 | /* ... then register them */ | ||
178 | for (i = 0; i < zorro_num_autocon; i++) { | ||
179 | z = &zorro_autocon[i]; | ||
175 | error = device_register(&z->dev); | 180 | error = device_register(&z->dev); |
176 | if (error) { | 181 | if (error) { |
177 | dev_err(&bus->dev, "Error registering device %s\n", | 182 | dev_err(&bus->dev, "Error registering device %s\n", |
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index a381cd22f518..e4e57d59edb7 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
@@ -1036,11 +1036,13 @@ out: | |||
1036 | * on error we return an unlocked page and the error value | 1036 | * on error we return an unlocked page and the error value |
1037 | * on success we return a locked page and 0 | 1037 | * on success we return a locked page and 0 |
1038 | */ | 1038 | */ |
1039 | static int prepare_uptodate_page(struct page *page, u64 pos) | 1039 | static int prepare_uptodate_page(struct page *page, u64 pos, |
1040 | bool force_uptodate) | ||
1040 | { | 1041 | { |
1041 | int ret = 0; | 1042 | int ret = 0; |
1042 | 1043 | ||
1043 | if ((pos & (PAGE_CACHE_SIZE - 1)) && !PageUptodate(page)) { | 1044 | if (((pos & (PAGE_CACHE_SIZE - 1)) || force_uptodate) && |
1045 | !PageUptodate(page)) { | ||
1044 | ret = btrfs_readpage(NULL, page); | 1046 | ret = btrfs_readpage(NULL, page); |
1045 | if (ret) | 1047 | if (ret) |
1046 | return ret; | 1048 | return ret; |
@@ -1061,7 +1063,7 @@ static int prepare_uptodate_page(struct page *page, u64 pos) | |||
1061 | static noinline int prepare_pages(struct btrfs_root *root, struct file *file, | 1063 | static noinline int prepare_pages(struct btrfs_root *root, struct file *file, |
1062 | struct page **pages, size_t num_pages, | 1064 | struct page **pages, size_t num_pages, |
1063 | loff_t pos, unsigned long first_index, | 1065 | loff_t pos, unsigned long first_index, |
1064 | size_t write_bytes) | 1066 | size_t write_bytes, bool force_uptodate) |
1065 | { | 1067 | { |
1066 | struct extent_state *cached_state = NULL; | 1068 | struct extent_state *cached_state = NULL; |
1067 | int i; | 1069 | int i; |
@@ -1086,10 +1088,11 @@ again: | |||
1086 | } | 1088 | } |
1087 | 1089 | ||
1088 | if (i == 0) | 1090 | if (i == 0) |
1089 | err = prepare_uptodate_page(pages[i], pos); | 1091 | err = prepare_uptodate_page(pages[i], pos, |
1092 | force_uptodate); | ||
1090 | if (i == num_pages - 1) | 1093 | if (i == num_pages - 1) |
1091 | err = prepare_uptodate_page(pages[i], | 1094 | err = prepare_uptodate_page(pages[i], |
1092 | pos + write_bytes); | 1095 | pos + write_bytes, false); |
1093 | if (err) { | 1096 | if (err) { |
1094 | page_cache_release(pages[i]); | 1097 | page_cache_release(pages[i]); |
1095 | faili = i - 1; | 1098 | faili = i - 1; |
@@ -1158,6 +1161,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, | |||
1158 | size_t num_written = 0; | 1161 | size_t num_written = 0; |
1159 | int nrptrs; | 1162 | int nrptrs; |
1160 | int ret = 0; | 1163 | int ret = 0; |
1164 | bool force_page_uptodate = false; | ||
1161 | 1165 | ||
1162 | nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) / | 1166 | nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) / |
1163 | PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / | 1167 | PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / |
@@ -1200,7 +1204,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, | |||
1200 | * contents of pages from loop to loop | 1204 | * contents of pages from loop to loop |
1201 | */ | 1205 | */ |
1202 | ret = prepare_pages(root, file, pages, num_pages, | 1206 | ret = prepare_pages(root, file, pages, num_pages, |
1203 | pos, first_index, write_bytes); | 1207 | pos, first_index, write_bytes, |
1208 | force_page_uptodate); | ||
1204 | if (ret) { | 1209 | if (ret) { |
1205 | btrfs_delalloc_release_space(inode, | 1210 | btrfs_delalloc_release_space(inode, |
1206 | num_pages << PAGE_CACHE_SHIFT); | 1211 | num_pages << PAGE_CACHE_SHIFT); |
@@ -1217,12 +1222,15 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, | |||
1217 | if (copied < write_bytes) | 1222 | if (copied < write_bytes) |
1218 | nrptrs = 1; | 1223 | nrptrs = 1; |
1219 | 1224 | ||
1220 | if (copied == 0) | 1225 | if (copied == 0) { |
1226 | force_page_uptodate = true; | ||
1221 | dirty_pages = 0; | 1227 | dirty_pages = 0; |
1222 | else | 1228 | } else { |
1229 | force_page_uptodate = false; | ||
1223 | dirty_pages = (copied + offset + | 1230 | dirty_pages = (copied + offset + |
1224 | PAGE_CACHE_SIZE - 1) >> | 1231 | PAGE_CACHE_SIZE - 1) >> |
1225 | PAGE_CACHE_SHIFT; | 1232 | PAGE_CACHE_SHIFT; |
1233 | } | ||
1226 | 1234 | ||
1227 | /* | 1235 | /* |
1228 | * If we had a short copy we need to release the excess delaloc | 1236 | * If we had a short copy we need to release the excess delaloc |
diff --git a/fs/namei.c b/fs/namei.c index f4788365ea22..0b3138de2a3b 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -721,12 +721,6 @@ static int follow_automount(struct path *path, unsigned flags, | |||
721 | if (!path->dentry->d_op || !path->dentry->d_op->d_automount) | 721 | if (!path->dentry->d_op || !path->dentry->d_op->d_automount) |
722 | return -EREMOTE; | 722 | return -EREMOTE; |
723 | 723 | ||
724 | /* We don't want to mount if someone supplied AT_NO_AUTOMOUNT | ||
725 | * and this is the terminal part of the path. | ||
726 | */ | ||
727 | if ((flags & LOOKUP_NO_AUTOMOUNT) && !(flags & LOOKUP_PARENT)) | ||
728 | return -EISDIR; /* we actually want to stop here */ | ||
729 | |||
730 | /* We don't want to mount if someone's just doing a stat - | 724 | /* We don't want to mount if someone's just doing a stat - |
731 | * unless they're stat'ing a directory and appended a '/' to | 725 | * unless they're stat'ing a directory and appended a '/' to |
732 | * the name. | 726 | * the name. |
@@ -739,7 +733,7 @@ static int follow_automount(struct path *path, unsigned flags, | |||
739 | * of the daemon to instantiate them before they can be used. | 733 | * of the daemon to instantiate them before they can be used. |
740 | */ | 734 | */ |
741 | if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY | | 735 | if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY | |
742 | LOOKUP_OPEN | LOOKUP_CREATE)) && | 736 | LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) && |
743 | path->dentry->d_inode) | 737 | path->dentry->d_inode) |
744 | return -EISDIR; | 738 | return -EISDIR; |
745 | 739 | ||
diff --git a/fs/namespace.c b/fs/namespace.c index 22bfe8273c68..b4febb29d3bb 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -1757,7 +1757,7 @@ static int do_loopback(struct path *path, char *old_name, | |||
1757 | return err; | 1757 | return err; |
1758 | if (!old_name || !*old_name) | 1758 | if (!old_name || !*old_name) |
1759 | return -EINVAL; | 1759 | return -EINVAL; |
1760 | err = kern_path(old_name, LOOKUP_FOLLOW, &old_path); | 1760 | err = kern_path(old_name, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &old_path); |
1761 | if (err) | 1761 | if (err) |
1762 | return err; | 1762 | return err; |
1763 | 1763 | ||
diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 9b7dd7013b15..5b19b6aabe18 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c | |||
@@ -2798,7 +2798,7 @@ static struct dentry *nfs_follow_remote_path(struct vfsmount *root_mnt, | |||
2798 | goto out_put_mnt_ns; | 2798 | goto out_put_mnt_ns; |
2799 | 2799 | ||
2800 | ret = vfs_path_lookup(root_mnt->mnt_root, root_mnt, | 2800 | ret = vfs_path_lookup(root_mnt->mnt_root, root_mnt, |
2801 | export_path, LOOKUP_FOLLOW, &path); | 2801 | export_path, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path); |
2802 | 2802 | ||
2803 | nfs_referral_loop_unprotect(); | 2803 | nfs_referral_loop_unprotect(); |
2804 | put_mnt_ns(ns_private); | 2804 | put_mnt_ns(ns_private); |
diff --git a/fs/quota/quota.c b/fs/quota/quota.c index b34bdb25490c..10b6be3ca280 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c | |||
@@ -355,7 +355,7 @@ SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special, | |||
355 | * resolution (think about autofs) and thus deadlocks could arise. | 355 | * resolution (think about autofs) and thus deadlocks could arise. |
356 | */ | 356 | */ |
357 | if (cmds == Q_QUOTAON) { | 357 | if (cmds == Q_QUOTAON) { |
358 | ret = user_path_at(AT_FDCWD, addr, LOOKUP_FOLLOW, &path); | 358 | ret = user_path_at(AT_FDCWD, addr, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path); |
359 | if (ret) | 359 | if (ret) |
360 | pathp = ERR_PTR(ret); | 360 | pathp = ERR_PTR(ret); |
361 | else | 361 | else |
@@ -81,8 +81,6 @@ int vfs_fstatat(int dfd, const char __user *filename, struct kstat *stat, | |||
81 | 81 | ||
82 | if (!(flag & AT_SYMLINK_NOFOLLOW)) | 82 | if (!(flag & AT_SYMLINK_NOFOLLOW)) |
83 | lookup_flags |= LOOKUP_FOLLOW; | 83 | lookup_flags |= LOOKUP_FOLLOW; |
84 | if (flag & AT_NO_AUTOMOUNT) | ||
85 | lookup_flags |= LOOKUP_NO_AUTOMOUNT; | ||
86 | if (flag & AT_EMPTY_PATH) | 84 | if (flag & AT_EMPTY_PATH) |
87 | lookup_flags |= LOOKUP_EMPTY; | 85 | lookup_flags |= LOOKUP_EMPTY; |
88 | 86 | ||
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index e807ad687a07..3ad553e8eae2 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h | |||
@@ -80,6 +80,7 @@ extern void irq_domain_del(struct irq_domain *domain); | |||
80 | #endif /* CONFIG_IRQ_DOMAIN */ | 80 | #endif /* CONFIG_IRQ_DOMAIN */ |
81 | 81 | ||
82 | #if defined(CONFIG_IRQ_DOMAIN) && defined(CONFIG_OF_IRQ) | 82 | #if defined(CONFIG_IRQ_DOMAIN) && defined(CONFIG_OF_IRQ) |
83 | extern struct irq_domain_ops irq_domain_simple_ops; | ||
83 | extern void irq_domain_add_simple(struct device_node *controller, int irq_base); | 84 | extern void irq_domain_add_simple(struct device_node *controller, int irq_base); |
84 | extern void irq_domain_generate_simple(const struct of_device_id *match, | 85 | extern void irq_domain_generate_simple(const struct of_device_id *match, |
85 | u64 phys_base, unsigned int irq_start); | 86 | u64 phys_base, unsigned int irq_start); |
diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 2c366b52f505..aace6b8691a2 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h | |||
@@ -553,6 +553,7 @@ struct kvm_ppc_pvinfo { | |||
553 | #define KVM_CAP_SPAPR_TCE 63 | 553 | #define KVM_CAP_SPAPR_TCE 63 |
554 | #define KVM_CAP_PPC_SMT 64 | 554 | #define KVM_CAP_PPC_SMT 64 |
555 | #define KVM_CAP_PPC_RMA 65 | 555 | #define KVM_CAP_PPC_RMA 65 |
556 | #define KVM_CAP_S390_GMAP 71 | ||
556 | 557 | ||
557 | #ifdef KVM_CAP_IRQ_ROUTING | 558 | #ifdef KVM_CAP_IRQ_ROUTING |
558 | 559 | ||
diff --git a/include/linux/namei.h b/include/linux/namei.h index 76fe2c62ae71..409328d1cbbb 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h | |||
@@ -48,11 +48,12 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND}; | |||
48 | */ | 48 | */ |
49 | #define LOOKUP_FOLLOW 0x0001 | 49 | #define LOOKUP_FOLLOW 0x0001 |
50 | #define LOOKUP_DIRECTORY 0x0002 | 50 | #define LOOKUP_DIRECTORY 0x0002 |
51 | #define LOOKUP_AUTOMOUNT 0x0004 | ||
51 | 52 | ||
52 | #define LOOKUP_PARENT 0x0010 | 53 | #define LOOKUP_PARENT 0x0010 |
53 | #define LOOKUP_REVAL 0x0020 | 54 | #define LOOKUP_REVAL 0x0020 |
54 | #define LOOKUP_RCU 0x0040 | 55 | #define LOOKUP_RCU 0x0040 |
55 | #define LOOKUP_NO_AUTOMOUNT 0x0080 | 56 | |
56 | /* | 57 | /* |
57 | * Intent data | 58 | * Intent data |
58 | */ | 59 | */ |
diff --git a/include/linux/pci.h b/include/linux/pci.h index 8c230cbcbb48..9fc01226055b 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -621,8 +621,9 @@ struct pci_driver { | |||
621 | extern void pcie_bus_configure_settings(struct pci_bus *bus, u8 smpss); | 621 | extern void pcie_bus_configure_settings(struct pci_bus *bus, u8 smpss); |
622 | 622 | ||
623 | enum pcie_bus_config_types { | 623 | enum pcie_bus_config_types { |
624 | PCIE_BUS_PERFORMANCE, | 624 | PCIE_BUS_TUNE_OFF, |
625 | PCIE_BUS_SAFE, | 625 | PCIE_BUS_SAFE, |
626 | PCIE_BUS_PERFORMANCE, | ||
626 | PCIE_BUS_PEER2PEER, | 627 | PCIE_BUS_PEER2PEER, |
627 | }; | 628 | }; |
628 | 629 | ||
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index c816075c01ce..1e9ebe5e0091 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -220,7 +220,10 @@ struct perf_event_attr { | |||
220 | mmap_data : 1, /* non-exec mmap data */ | 220 | mmap_data : 1, /* non-exec mmap data */ |
221 | sample_id_all : 1, /* sample_type all events */ | 221 | sample_id_all : 1, /* sample_type all events */ |
222 | 222 | ||
223 | __reserved_1 : 45; | 223 | exclude_host : 1, /* don't count in host */ |
224 | exclude_guest : 1, /* don't count in guest */ | ||
225 | |||
226 | __reserved_1 : 43; | ||
224 | 227 | ||
225 | union { | 228 | union { |
226 | __u32 wakeup_events; /* wakeup every n events */ | 229 | __u32 wakeup_events; /* wakeup every n events */ |
diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h index e07e2742a865..1dc420ba213a 100644 --- a/include/linux/ptp_classify.h +++ b/include/linux/ptp_classify.h | |||
@@ -51,6 +51,7 @@ | |||
51 | #define PTP_CLASS_V2_VLAN (PTP_CLASS_V2 | PTP_CLASS_VLAN) | 51 | #define PTP_CLASS_V2_VLAN (PTP_CLASS_V2 | PTP_CLASS_VLAN) |
52 | 52 | ||
53 | #define PTP_EV_PORT 319 | 53 | #define PTP_EV_PORT 319 |
54 | #define PTP_GEN_BIT 0x08 /* indicates general message, if set in message type */ | ||
54 | 55 | ||
55 | #define OFF_ETYPE 12 | 56 | #define OFF_ETYPE 12 |
56 | #define OFF_IHL 14 | 57 | #define OFF_IHL 14 |
@@ -116,14 +117,20 @@ static inline int ptp_filter_init(struct sock_filter *f, int len) | |||
116 | {OP_OR, 0, 0, PTP_CLASS_IPV6 }, /* */ \ | 117 | {OP_OR, 0, 0, PTP_CLASS_IPV6 }, /* */ \ |
117 | {OP_RETA, 0, 0, 0 }, /* */ \ | 118 | {OP_RETA, 0, 0, 0 }, /* */ \ |
118 | /*L3x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE }, /* */ \ | 119 | /*L3x*/ {OP_RETK, 0, 0, PTP_CLASS_NONE }, /* */ \ |
119 | /*L40*/ {OP_JEQ, 0, 6, ETH_P_8021Q }, /* f goto L50 */ \ | 120 | /*L40*/ {OP_JEQ, 0, 9, ETH_P_8021Q }, /* f goto L50 */ \ |
120 | {OP_LDH, 0, 0, OFF_ETYPE + 4 }, /* */ \ | 121 | {OP_LDH, 0, 0, OFF_ETYPE + 4 }, /* */ \ |
121 | {OP_JEQ, 0, 9, ETH_P_1588 }, /* f goto L60 */ \ | 122 | {OP_JEQ, 0, 15, ETH_P_1588 }, /* f goto L60 */ \ |
123 | {OP_LDB, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \ | ||
124 | {OP_AND, 0, 0, PTP_GEN_BIT }, /* */ \ | ||
125 | {OP_JEQ, 0, 12, 0 }, /* f goto L6x */ \ | ||
122 | {OP_LDH, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \ | 126 | {OP_LDH, 0, 0, ETH_HLEN + VLAN_HLEN }, /* */ \ |
123 | {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \ | 127 | {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \ |
124 | {OP_OR, 0, 0, PTP_CLASS_VLAN }, /* */ \ | 128 | {OP_OR, 0, 0, PTP_CLASS_VLAN }, /* */ \ |
125 | {OP_RETA, 0, 0, 0 }, /* */ \ | 129 | {OP_RETA, 0, 0, 0 }, /* */ \ |
126 | /*L50*/ {OP_JEQ, 0, 4, ETH_P_1588 }, /* f goto L61 */ \ | 130 | /*L50*/ {OP_JEQ, 0, 7, ETH_P_1588 }, /* f goto L61 */ \ |
131 | {OP_LDB, 0, 0, ETH_HLEN }, /* */ \ | ||
132 | {OP_AND, 0, 0, PTP_GEN_BIT }, /* */ \ | ||
133 | {OP_JEQ, 0, 4, 0 }, /* f goto L6x */ \ | ||
127 | {OP_LDH, 0, 0, ETH_HLEN }, /* */ \ | 134 | {OP_LDH, 0, 0, ETH_HLEN }, /* */ \ |
128 | {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \ | 135 | {OP_AND, 0, 0, PTP_CLASS_VMASK }, /* */ \ |
129 | {OP_OR, 0, 0, PTP_CLASS_L2 }, /* */ \ | 136 | {OP_OR, 0, 0, PTP_CLASS_L2 }, /* */ \ |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 4ac2c0578e0f..41d0237fd449 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1956,7 +1956,6 @@ static inline void disable_sched_clock_irqtime(void) {} | |||
1956 | 1956 | ||
1957 | extern unsigned long long | 1957 | extern unsigned long long |
1958 | task_sched_runtime(struct task_struct *task); | 1958 | task_sched_runtime(struct task_struct *task); |
1959 | extern unsigned long long thread_group_sched_runtime(struct task_struct *task); | ||
1960 | 1959 | ||
1961 | /* sched_exec is called by processes performing an exec */ | 1960 | /* sched_exec is called by processes performing an exec */ |
1962 | #ifdef CONFIG_SMP | 1961 | #ifdef CONFIG_SMP |
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 6bca4cc0063c..5f172703eb4f 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h | |||
@@ -298,7 +298,7 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template, | |||
298 | __array(char, name, 32) | 298 | __array(char, name, 32) |
299 | __field(unsigned long, ino) | 299 | __field(unsigned long, ino) |
300 | __field(unsigned long, state) | 300 | __field(unsigned long, state) |
301 | __field(unsigned long, age) | 301 | __field(unsigned long, dirtied_when) |
302 | __field(unsigned long, writeback_index) | 302 | __field(unsigned long, writeback_index) |
303 | __field(long, nr_to_write) | 303 | __field(long, nr_to_write) |
304 | __field(unsigned long, wrote) | 304 | __field(unsigned long, wrote) |
@@ -309,19 +309,19 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template, | |||
309 | dev_name(inode->i_mapping->backing_dev_info->dev), 32); | 309 | dev_name(inode->i_mapping->backing_dev_info->dev), 32); |
310 | __entry->ino = inode->i_ino; | 310 | __entry->ino = inode->i_ino; |
311 | __entry->state = inode->i_state; | 311 | __entry->state = inode->i_state; |
312 | __entry->age = (jiffies - inode->dirtied_when) * | 312 | __entry->dirtied_when = inode->dirtied_when; |
313 | 1000 / HZ; | ||
314 | __entry->writeback_index = inode->i_mapping->writeback_index; | 313 | __entry->writeback_index = inode->i_mapping->writeback_index; |
315 | __entry->nr_to_write = nr_to_write; | 314 | __entry->nr_to_write = nr_to_write; |
316 | __entry->wrote = nr_to_write - wbc->nr_to_write; | 315 | __entry->wrote = nr_to_write - wbc->nr_to_write; |
317 | ), | 316 | ), |
318 | 317 | ||
319 | TP_printk("bdi %s: ino=%lu state=%s age=%lu " | 318 | TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu " |
320 | "index=%lu to_write=%ld wrote=%lu", | 319 | "index=%lu to_write=%ld wrote=%lu", |
321 | __entry->name, | 320 | __entry->name, |
322 | __entry->ino, | 321 | __entry->ino, |
323 | show_inode_state(__entry->state), | 322 | show_inode_state(__entry->state), |
324 | __entry->age, | 323 | __entry->dirtied_when, |
324 | (jiffies - __entry->dirtied_when) / HZ, | ||
325 | __entry->writeback_index, | 325 | __entry->writeback_index, |
326 | __entry->nr_to_write, | 326 | __entry->nr_to_write, |
327 | __entry->wrote | 327 | __entry->wrote |
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index 533c49f48047..769724944fc6 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
@@ -711,6 +711,9 @@ __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call | |||
711 | #undef __perf_count | 711 | #undef __perf_count |
712 | #define __perf_count(c) __count = (c) | 712 | #define __perf_count(c) __count = (c) |
713 | 713 | ||
714 | #undef TP_perf_assign | ||
715 | #define TP_perf_assign(args...) args | ||
716 | |||
714 | #undef DECLARE_EVENT_CLASS | 717 | #undef DECLARE_EVENT_CLASS |
715 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ | 718 | #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ |
716 | static notrace void \ | 719 | static notrace void \ |
diff --git a/init/main.c b/init/main.c index 2a9b88aa5e76..03b408dff825 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -381,9 +381,6 @@ static noinline void __init_refok rest_init(void) | |||
381 | preempt_enable_no_resched(); | 381 | preempt_enable_no_resched(); |
382 | schedule(); | 382 | schedule(); |
383 | 383 | ||
384 | /* At this point, we can enable user mode helper functionality */ | ||
385 | usermodehelper_enable(); | ||
386 | |||
387 | /* Call into cpu_idle with preempt disabled */ | 384 | /* Call into cpu_idle with preempt disabled */ |
388 | preempt_disable(); | 385 | preempt_disable(); |
389 | cpu_idle(); | 386 | cpu_idle(); |
@@ -733,6 +730,7 @@ static void __init do_basic_setup(void) | |||
733 | driver_init(); | 730 | driver_init(); |
734 | init_irq_proc(); | 731 | init_irq_proc(); |
735 | do_ctors(); | 732 | do_ctors(); |
733 | usermodehelper_enable(); | ||
736 | do_initcalls(); | 734 | do_initcalls(); |
737 | } | 735 | } |
738 | 736 | ||
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index d5828da3fd38..b57a3776de44 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c | |||
@@ -29,7 +29,11 @@ void irq_domain_add(struct irq_domain *domain) | |||
29 | */ | 29 | */ |
30 | for (hwirq = 0; hwirq < domain->nr_irq; hwirq++) { | 30 | for (hwirq = 0; hwirq < domain->nr_irq; hwirq++) { |
31 | d = irq_get_irq_data(irq_domain_to_irq(domain, hwirq)); | 31 | d = irq_get_irq_data(irq_domain_to_irq(domain, hwirq)); |
32 | if (d || d->domain) { | 32 | if (!d) { |
33 | WARN(1, "error: assigning domain to non existant irq_desc"); | ||
34 | return; | ||
35 | } | ||
36 | if (d->domain) { | ||
33 | /* things are broken; just report, don't clean up */ | 37 | /* things are broken; just report, don't clean up */ |
34 | WARN(1, "error: irq_desc already assigned to a domain"); | 38 | WARN(1, "error: irq_desc already assigned to a domain"); |
35 | return; | 39 | return; |
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 58f405b581e7..c8008dd58ef2 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
@@ -250,7 +250,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) | |||
250 | do { | 250 | do { |
251 | times->utime = cputime_add(times->utime, t->utime); | 251 | times->utime = cputime_add(times->utime, t->utime); |
252 | times->stime = cputime_add(times->stime, t->stime); | 252 | times->stime = cputime_add(times->stime, t->stime); |
253 | times->sum_exec_runtime += t->se.sum_exec_runtime; | 253 | times->sum_exec_runtime += task_sched_runtime(t); |
254 | } while_each_thread(tsk, t); | 254 | } while_each_thread(tsk, t); |
255 | out: | 255 | out: |
256 | rcu_read_unlock(); | 256 | rcu_read_unlock(); |
@@ -312,7 +312,8 @@ static int cpu_clock_sample_group(const clockid_t which_clock, | |||
312 | cpu->cpu = cputime.utime; | 312 | cpu->cpu = cputime.utime; |
313 | break; | 313 | break; |
314 | case CPUCLOCK_SCHED: | 314 | case CPUCLOCK_SCHED: |
315 | cpu->sched = thread_group_sched_runtime(p); | 315 | thread_group_cputime(p, &cputime); |
316 | cpu->sched = cputime.sum_exec_runtime; | ||
316 | break; | 317 | break; |
317 | } | 318 | } |
318 | return 0; | 319 | return 0; |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 9de3ecfd20f9..a70d2a5d8c7b 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -744,20 +744,17 @@ int ptrace_request(struct task_struct *child, long request, | |||
744 | break; | 744 | break; |
745 | 745 | ||
746 | si = child->last_siginfo; | 746 | si = child->last_siginfo; |
747 | if (unlikely(!si || si->si_code >> 8 != PTRACE_EVENT_STOP)) | 747 | if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) { |
748 | break; | 748 | child->jobctl |= JOBCTL_LISTENING; |
749 | 749 | /* | |
750 | child->jobctl |= JOBCTL_LISTENING; | 750 | * If NOTIFY is set, it means event happened between |
751 | 751 | * start of this trap and now. Trigger re-trap. | |
752 | /* | 752 | */ |
753 | * If NOTIFY is set, it means event happened between start | 753 | if (child->jobctl & JOBCTL_TRAP_NOTIFY) |
754 | * of this trap and now. Trigger re-trap immediately. | 754 | signal_wake_up(child, true); |
755 | */ | 755 | ret = 0; |
756 | if (child->jobctl & JOBCTL_TRAP_NOTIFY) | 756 | } |
757 | signal_wake_up(child, true); | ||
758 | |||
759 | unlock_task_sighand(child, &flags); | 757 | unlock_task_sighand(child, &flags); |
760 | ret = 0; | ||
761 | break; | 758 | break; |
762 | 759 | ||
763 | case PTRACE_DETACH: /* detach a process that was attached. */ | 760 | case PTRACE_DETACH: /* detach a process that was attached. */ |
diff --git a/kernel/resource.c b/kernel/resource.c index 3b3cedc52592..c8dc249da5ce 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
@@ -419,6 +419,9 @@ static int __find_resource(struct resource *root, struct resource *old, | |||
419 | else | 419 | else |
420 | tmp.end = root->end; | 420 | tmp.end = root->end; |
421 | 421 | ||
422 | if (tmp.end < tmp.start) | ||
423 | goto next; | ||
424 | |||
422 | resource_clip(&tmp, constraint->min, constraint->max); | 425 | resource_clip(&tmp, constraint->min, constraint->max); |
423 | arch_remove_reservations(&tmp); | 426 | arch_remove_reservations(&tmp); |
424 | 427 | ||
@@ -436,8 +439,10 @@ static int __find_resource(struct resource *root, struct resource *old, | |||
436 | return 0; | 439 | return 0; |
437 | } | 440 | } |
438 | } | 441 | } |
439 | if (!this) | 442 | |
443 | next: if (!this || this->end == root->end) | ||
440 | break; | 444 | break; |
445 | |||
441 | if (this != old) | 446 | if (this != old) |
442 | tmp.start = this->end + 1; | 447 | tmp.start = this->end + 1; |
443 | this = this->sibling; | 448 | this = this->sibling; |
diff --git a/kernel/sched.c b/kernel/sched.c index ec5f472bc5b9..b50b0f0c9aa9 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -3725,30 +3725,6 @@ unsigned long long task_sched_runtime(struct task_struct *p) | |||
3725 | } | 3725 | } |
3726 | 3726 | ||
3727 | /* | 3727 | /* |
3728 | * Return sum_exec_runtime for the thread group. | ||
3729 | * In case the task is currently running, return the sum plus current's | ||
3730 | * pending runtime that have not been accounted yet. | ||
3731 | * | ||
3732 | * Note that the thread group might have other running tasks as well, | ||
3733 | * so the return value not includes other pending runtime that other | ||
3734 | * running tasks might have. | ||
3735 | */ | ||
3736 | unsigned long long thread_group_sched_runtime(struct task_struct *p) | ||
3737 | { | ||
3738 | struct task_cputime totals; | ||
3739 | unsigned long flags; | ||
3740 | struct rq *rq; | ||
3741 | u64 ns; | ||
3742 | |||
3743 | rq = task_rq_lock(p, &flags); | ||
3744 | thread_group_cputime(p, &totals); | ||
3745 | ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq); | ||
3746 | task_rq_unlock(rq, p, &flags); | ||
3747 | |||
3748 | return ns; | ||
3749 | } | ||
3750 | |||
3751 | /* | ||
3752 | * Account user cpu time to a process. | 3728 | * Account user cpu time to a process. |
3753 | * @p: the process that the cpu time gets accounted to | 3729 | * @p: the process that the cpu time gets accounted to |
3754 | * @cputime: the cpu time spent in user space since the last update | 3730 | * @cputime: the cpu time spent in user space since the last update |
@@ -4372,7 +4348,7 @@ static inline void sched_submit_work(struct task_struct *tsk) | |||
4372 | blk_schedule_flush_plug(tsk); | 4348 | blk_schedule_flush_plug(tsk); |
4373 | } | 4349 | } |
4374 | 4350 | ||
4375 | asmlinkage void schedule(void) | 4351 | asmlinkage void __sched schedule(void) |
4376 | { | 4352 | { |
4377 | struct task_struct *tsk = current; | 4353 | struct task_struct *tsk = current; |
4378 | 4354 | ||
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 97540f0c9e47..af1177858be3 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
@@ -1050,7 +1050,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) | |||
1050 | */ | 1050 | */ |
1051 | if (curr && unlikely(rt_task(curr)) && | 1051 | if (curr && unlikely(rt_task(curr)) && |
1052 | (curr->rt.nr_cpus_allowed < 2 || | 1052 | (curr->rt.nr_cpus_allowed < 2 || |
1053 | curr->prio < p->prio) && | 1053 | curr->prio <= p->prio) && |
1054 | (p->rt.nr_cpus_allowed > 1)) { | 1054 | (p->rt.nr_cpus_allowed > 1)) { |
1055 | int target = find_lowest_rq(p); | 1055 | int target = find_lowest_rq(p); |
1056 | 1056 | ||
@@ -1581,7 +1581,7 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p) | |||
1581 | p->rt.nr_cpus_allowed > 1 && | 1581 | p->rt.nr_cpus_allowed > 1 && |
1582 | rt_task(rq->curr) && | 1582 | rt_task(rq->curr) && |
1583 | (rq->curr->rt.nr_cpus_allowed < 2 || | 1583 | (rq->curr->rt.nr_cpus_allowed < 2 || |
1584 | rq->curr->prio < p->prio)) | 1584 | rq->curr->prio <= p->prio)) |
1585 | push_rt_tasks(rq); | 1585 | push_rt_tasks(rq); |
1586 | } | 1586 | } |
1587 | 1587 | ||
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 3e2f91ffa4e2..05dd35114a27 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c | |||
@@ -565,7 +565,7 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) | |||
565 | struct orig_node *orig_node = NULL; | 565 | struct orig_node *orig_node = NULL; |
566 | int data_len = skb->len, ret; | 566 | int data_len = skb->len, ret; |
567 | short vid = -1; | 567 | short vid = -1; |
568 | bool do_bcast = false; | 568 | bool do_bcast; |
569 | 569 | ||
570 | if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) | 570 | if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) |
571 | goto dropped; | 571 | goto dropped; |
@@ -598,15 +598,15 @@ static int interface_tx(struct sk_buff *skb, struct net_device *soft_iface) | |||
598 | tt_local_add(soft_iface, ethhdr->h_source); | 598 | tt_local_add(soft_iface, ethhdr->h_source); |
599 | 599 | ||
600 | orig_node = transtable_search(bat_priv, ethhdr->h_dest); | 600 | orig_node = transtable_search(bat_priv, ethhdr->h_dest); |
601 | if (is_multicast_ether_addr(ethhdr->h_dest) || | 601 | do_bcast = is_multicast_ether_addr(ethhdr->h_dest); |
602 | (orig_node && orig_node->gw_flags)) { | 602 | if (do_bcast || (orig_node && orig_node->gw_flags)) { |
603 | ret = gw_is_target(bat_priv, skb, orig_node); | 603 | ret = gw_is_target(bat_priv, skb, orig_node); |
604 | 604 | ||
605 | if (ret < 0) | 605 | if (ret < 0) |
606 | goto dropped; | 606 | goto dropped; |
607 | 607 | ||
608 | if (ret == 0) | 608 | if (ret) |
609 | do_bcast = true; | 609 | do_bcast = false; |
610 | } | 610 | } |
611 | 611 | ||
612 | /* ethernet packet should be broadcasted */ | 612 | /* ethernet packet should be broadcasted */ |
diff --git a/net/can/bcm.c b/net/can/bcm.c index d6c8ae5b2e6a..c84963d2dee6 100644 --- a/net/can/bcm.c +++ b/net/can/bcm.c | |||
@@ -344,6 +344,18 @@ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head, | |||
344 | } | 344 | } |
345 | } | 345 | } |
346 | 346 | ||
347 | static void bcm_tx_start_timer(struct bcm_op *op) | ||
348 | { | ||
349 | if (op->kt_ival1.tv64 && op->count) | ||
350 | hrtimer_start(&op->timer, | ||
351 | ktime_add(ktime_get(), op->kt_ival1), | ||
352 | HRTIMER_MODE_ABS); | ||
353 | else if (op->kt_ival2.tv64) | ||
354 | hrtimer_start(&op->timer, | ||
355 | ktime_add(ktime_get(), op->kt_ival2), | ||
356 | HRTIMER_MODE_ABS); | ||
357 | } | ||
358 | |||
347 | static void bcm_tx_timeout_tsklet(unsigned long data) | 359 | static void bcm_tx_timeout_tsklet(unsigned long data) |
348 | { | 360 | { |
349 | struct bcm_op *op = (struct bcm_op *)data; | 361 | struct bcm_op *op = (struct bcm_op *)data; |
@@ -365,26 +377,12 @@ static void bcm_tx_timeout_tsklet(unsigned long data) | |||
365 | 377 | ||
366 | bcm_send_to_user(op, &msg_head, NULL, 0); | 378 | bcm_send_to_user(op, &msg_head, NULL, 0); |
367 | } | 379 | } |
368 | } | ||
369 | |||
370 | if (op->kt_ival1.tv64 && (op->count > 0)) { | ||
371 | |||
372 | /* send (next) frame */ | ||
373 | bcm_can_tx(op); | 380 | bcm_can_tx(op); |
374 | hrtimer_start(&op->timer, | ||
375 | ktime_add(ktime_get(), op->kt_ival1), | ||
376 | HRTIMER_MODE_ABS); | ||
377 | 381 | ||
378 | } else { | 382 | } else if (op->kt_ival2.tv64) |
379 | if (op->kt_ival2.tv64) { | 383 | bcm_can_tx(op); |
380 | 384 | ||
381 | /* send (next) frame */ | 385 | bcm_tx_start_timer(op); |
382 | bcm_can_tx(op); | ||
383 | hrtimer_start(&op->timer, | ||
384 | ktime_add(ktime_get(), op->kt_ival2), | ||
385 | HRTIMER_MODE_ABS); | ||
386 | } | ||
387 | } | ||
388 | } | 386 | } |
389 | 387 | ||
390 | /* | 388 | /* |
@@ -964,23 +962,20 @@ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, | |||
964 | hrtimer_cancel(&op->timer); | 962 | hrtimer_cancel(&op->timer); |
965 | } | 963 | } |
966 | 964 | ||
967 | if ((op->flags & STARTTIMER) && | 965 | if (op->flags & STARTTIMER) { |
968 | ((op->kt_ival1.tv64 && op->count) || op->kt_ival2.tv64)) { | 966 | hrtimer_cancel(&op->timer); |
969 | |||
970 | /* spec: send can_frame when starting timer */ | 967 | /* spec: send can_frame when starting timer */ |
971 | op->flags |= TX_ANNOUNCE; | 968 | op->flags |= TX_ANNOUNCE; |
972 | |||
973 | if (op->kt_ival1.tv64 && (op->count > 0)) { | ||
974 | /* op->count-- is done in bcm_tx_timeout_handler */ | ||
975 | hrtimer_start(&op->timer, op->kt_ival1, | ||
976 | HRTIMER_MODE_REL); | ||
977 | } else | ||
978 | hrtimer_start(&op->timer, op->kt_ival2, | ||
979 | HRTIMER_MODE_REL); | ||
980 | } | 969 | } |
981 | 970 | ||
982 | if (op->flags & TX_ANNOUNCE) | 971 | if (op->flags & TX_ANNOUNCE) { |
983 | bcm_can_tx(op); | 972 | bcm_can_tx(op); |
973 | if (op->count) | ||
974 | op->count--; | ||
975 | } | ||
976 | |||
977 | if (op->flags & STARTTIMER) | ||
978 | bcm_tx_start_timer(op); | ||
984 | 979 | ||
985 | return msg_head->nframes * CFSIZ + MHSIZ; | 980 | return msg_head->nframes * CFSIZ + MHSIZ; |
986 | } | 981 | } |
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c index 132963abc266..2883ea01e680 100644 --- a/net/ceph/ceph_common.c +++ b/net/ceph/ceph_common.c | |||
@@ -232,6 +232,7 @@ void ceph_destroy_options(struct ceph_options *opt) | |||
232 | ceph_crypto_key_destroy(opt->key); | 232 | ceph_crypto_key_destroy(opt->key); |
233 | kfree(opt->key); | 233 | kfree(opt->key); |
234 | } | 234 | } |
235 | kfree(opt->mon_addr); | ||
235 | kfree(opt); | 236 | kfree(opt); |
236 | } | 237 | } |
237 | EXPORT_SYMBOL(ceph_destroy_options); | 238 | EXPORT_SYMBOL(ceph_destroy_options); |
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index c340e2e0765b..9918e9eb276e 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c | |||
@@ -2307,6 +2307,7 @@ struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags) | |||
2307 | m->front_max = front_len; | 2307 | m->front_max = front_len; |
2308 | m->front_is_vmalloc = false; | 2308 | m->front_is_vmalloc = false; |
2309 | m->more_to_follow = false; | 2309 | m->more_to_follow = false; |
2310 | m->ack_stamp = 0; | ||
2310 | m->pool = NULL; | 2311 | m->pool = NULL; |
2311 | 2312 | ||
2312 | /* middle */ | 2313 | /* middle */ |
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index 16836a7df7a6..88ad8a2501b5 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c | |||
@@ -217,6 +217,7 @@ struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, | |||
217 | INIT_LIST_HEAD(&req->r_unsafe_item); | 217 | INIT_LIST_HEAD(&req->r_unsafe_item); |
218 | INIT_LIST_HEAD(&req->r_linger_item); | 218 | INIT_LIST_HEAD(&req->r_linger_item); |
219 | INIT_LIST_HEAD(&req->r_linger_osd); | 219 | INIT_LIST_HEAD(&req->r_linger_osd); |
220 | INIT_LIST_HEAD(&req->r_req_lru_item); | ||
220 | req->r_flags = flags; | 221 | req->r_flags = flags; |
221 | 222 | ||
222 | WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0); | 223 | WARN_ON((flags & (CEPH_OSD_FLAG_READ|CEPH_OSD_FLAG_WRITE)) == 0); |
@@ -816,13 +817,10 @@ static void __register_request(struct ceph_osd_client *osdc, | |||
816 | { | 817 | { |
817 | req->r_tid = ++osdc->last_tid; | 818 | req->r_tid = ++osdc->last_tid; |
818 | req->r_request->hdr.tid = cpu_to_le64(req->r_tid); | 819 | req->r_request->hdr.tid = cpu_to_le64(req->r_tid); |
819 | INIT_LIST_HEAD(&req->r_req_lru_item); | ||
820 | |||
821 | dout("__register_request %p tid %lld\n", req, req->r_tid); | 820 | dout("__register_request %p tid %lld\n", req, req->r_tid); |
822 | __insert_request(osdc, req); | 821 | __insert_request(osdc, req); |
823 | ceph_osdc_get_request(req); | 822 | ceph_osdc_get_request(req); |
824 | osdc->num_requests++; | 823 | osdc->num_requests++; |
825 | |||
826 | if (osdc->num_requests == 1) { | 824 | if (osdc->num_requests == 1) { |
827 | dout(" first request, scheduling timeout\n"); | 825 | dout(" first request, scheduling timeout\n"); |
828 | __schedule_osd_timeout(osdc); | 826 | __schedule_osd_timeout(osdc); |
diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index e97c3588c3ec..fd863fe76934 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c | |||
@@ -339,6 +339,7 @@ static int __insert_pg_mapping(struct ceph_pg_mapping *new, | |||
339 | struct ceph_pg_mapping *pg = NULL; | 339 | struct ceph_pg_mapping *pg = NULL; |
340 | int c; | 340 | int c; |
341 | 341 | ||
342 | dout("__insert_pg_mapping %llx %p\n", *(u64 *)&new->pgid, new); | ||
342 | while (*p) { | 343 | while (*p) { |
343 | parent = *p; | 344 | parent = *p; |
344 | pg = rb_entry(parent, struct ceph_pg_mapping, node); | 345 | pg = rb_entry(parent, struct ceph_pg_mapping, node); |
@@ -366,16 +367,33 @@ static struct ceph_pg_mapping *__lookup_pg_mapping(struct rb_root *root, | |||
366 | while (n) { | 367 | while (n) { |
367 | pg = rb_entry(n, struct ceph_pg_mapping, node); | 368 | pg = rb_entry(n, struct ceph_pg_mapping, node); |
368 | c = pgid_cmp(pgid, pg->pgid); | 369 | c = pgid_cmp(pgid, pg->pgid); |
369 | if (c < 0) | 370 | if (c < 0) { |
370 | n = n->rb_left; | 371 | n = n->rb_left; |
371 | else if (c > 0) | 372 | } else if (c > 0) { |
372 | n = n->rb_right; | 373 | n = n->rb_right; |
373 | else | 374 | } else { |
375 | dout("__lookup_pg_mapping %llx got %p\n", | ||
376 | *(u64 *)&pgid, pg); | ||
374 | return pg; | 377 | return pg; |
378 | } | ||
375 | } | 379 | } |
376 | return NULL; | 380 | return NULL; |
377 | } | 381 | } |
378 | 382 | ||
383 | static int __remove_pg_mapping(struct rb_root *root, struct ceph_pg pgid) | ||
384 | { | ||
385 | struct ceph_pg_mapping *pg = __lookup_pg_mapping(root, pgid); | ||
386 | |||
387 | if (pg) { | ||
388 | dout("__remove_pg_mapping %llx %p\n", *(u64 *)&pgid, pg); | ||
389 | rb_erase(&pg->node, root); | ||
390 | kfree(pg); | ||
391 | return 0; | ||
392 | } | ||
393 | dout("__remove_pg_mapping %llx dne\n", *(u64 *)&pgid); | ||
394 | return -ENOENT; | ||
395 | } | ||
396 | |||
379 | /* | 397 | /* |
380 | * rbtree of pg pool info | 398 | * rbtree of pg pool info |
381 | */ | 399 | */ |
@@ -711,7 +729,6 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, | |||
711 | void *start = *p; | 729 | void *start = *p; |
712 | int err = -EINVAL; | 730 | int err = -EINVAL; |
713 | u16 version; | 731 | u16 version; |
714 | struct rb_node *rbp; | ||
715 | 732 | ||
716 | ceph_decode_16_safe(p, end, version, bad); | 733 | ceph_decode_16_safe(p, end, version, bad); |
717 | if (version > CEPH_OSDMAP_INC_VERSION) { | 734 | if (version > CEPH_OSDMAP_INC_VERSION) { |
@@ -861,7 +878,6 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, | |||
861 | } | 878 | } |
862 | 879 | ||
863 | /* new_pg_temp */ | 880 | /* new_pg_temp */ |
864 | rbp = rb_first(&map->pg_temp); | ||
865 | ceph_decode_32_safe(p, end, len, bad); | 881 | ceph_decode_32_safe(p, end, len, bad); |
866 | while (len--) { | 882 | while (len--) { |
867 | struct ceph_pg_mapping *pg; | 883 | struct ceph_pg_mapping *pg; |
@@ -872,18 +888,6 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, | |||
872 | ceph_decode_copy(p, &pgid, sizeof(pgid)); | 888 | ceph_decode_copy(p, &pgid, sizeof(pgid)); |
873 | pglen = ceph_decode_32(p); | 889 | pglen = ceph_decode_32(p); |
874 | 890 | ||
875 | /* remove any? */ | ||
876 | while (rbp && pgid_cmp(rb_entry(rbp, struct ceph_pg_mapping, | ||
877 | node)->pgid, pgid) <= 0) { | ||
878 | struct ceph_pg_mapping *cur = | ||
879 | rb_entry(rbp, struct ceph_pg_mapping, node); | ||
880 | |||
881 | rbp = rb_next(rbp); | ||
882 | dout(" removed pg_temp %llx\n", *(u64 *)&cur->pgid); | ||
883 | rb_erase(&cur->node, &map->pg_temp); | ||
884 | kfree(cur); | ||
885 | } | ||
886 | |||
887 | if (pglen) { | 891 | if (pglen) { |
888 | /* insert */ | 892 | /* insert */ |
889 | ceph_decode_need(p, end, pglen*sizeof(u32), bad); | 893 | ceph_decode_need(p, end, pglen*sizeof(u32), bad); |
@@ -903,17 +907,11 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, | |||
903 | } | 907 | } |
904 | dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid, | 908 | dout(" added pg_temp %llx len %d\n", *(u64 *)&pgid, |
905 | pglen); | 909 | pglen); |
910 | } else { | ||
911 | /* remove */ | ||
912 | __remove_pg_mapping(&map->pg_temp, pgid); | ||
906 | } | 913 | } |
907 | } | 914 | } |
908 | while (rbp) { | ||
909 | struct ceph_pg_mapping *cur = | ||
910 | rb_entry(rbp, struct ceph_pg_mapping, node); | ||
911 | |||
912 | rbp = rb_next(rbp); | ||
913 | dout(" removed pg_temp %llx\n", *(u64 *)&cur->pgid); | ||
914 | rb_erase(&cur->node, &map->pg_temp); | ||
915 | kfree(cur); | ||
916 | } | ||
917 | 915 | ||
918 | /* ignore the rest */ | 916 | /* ignore the rest */ |
919 | *p = end; | 917 | *p = end; |
@@ -1046,10 +1044,25 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid, | |||
1046 | struct ceph_pg_mapping *pg; | 1044 | struct ceph_pg_mapping *pg; |
1047 | struct ceph_pg_pool_info *pool; | 1045 | struct ceph_pg_pool_info *pool; |
1048 | int ruleno; | 1046 | int ruleno; |
1049 | unsigned poolid, ps, pps; | 1047 | unsigned poolid, ps, pps, t; |
1050 | int preferred; | 1048 | int preferred; |
1051 | 1049 | ||
1050 | poolid = le32_to_cpu(pgid.pool); | ||
1051 | ps = le16_to_cpu(pgid.ps); | ||
1052 | preferred = (s16)le16_to_cpu(pgid.preferred); | ||
1053 | |||
1054 | pool = __lookup_pg_pool(&osdmap->pg_pools, poolid); | ||
1055 | if (!pool) | ||
1056 | return NULL; | ||
1057 | |||
1052 | /* pg_temp? */ | 1058 | /* pg_temp? */ |
1059 | if (preferred >= 0) | ||
1060 | t = ceph_stable_mod(ps, le32_to_cpu(pool->v.lpg_num), | ||
1061 | pool->lpgp_num_mask); | ||
1062 | else | ||
1063 | t = ceph_stable_mod(ps, le32_to_cpu(pool->v.pg_num), | ||
1064 | pool->pgp_num_mask); | ||
1065 | pgid.ps = cpu_to_le16(t); | ||
1053 | pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid); | 1066 | pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid); |
1054 | if (pg) { | 1067 | if (pg) { |
1055 | *num = pg->len; | 1068 | *num = pg->len; |
@@ -1057,18 +1070,6 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid, | |||
1057 | } | 1070 | } |
1058 | 1071 | ||
1059 | /* crush */ | 1072 | /* crush */ |
1060 | poolid = le32_to_cpu(pgid.pool); | ||
1061 | ps = le16_to_cpu(pgid.ps); | ||
1062 | preferred = (s16)le16_to_cpu(pgid.preferred); | ||
1063 | |||
1064 | /* don't forcefeed bad device ids to crush */ | ||
1065 | if (preferred >= osdmap->max_osd || | ||
1066 | preferred >= osdmap->crush->max_devices) | ||
1067 | preferred = -1; | ||
1068 | |||
1069 | pool = __lookup_pg_pool(&osdmap->pg_pools, poolid); | ||
1070 | if (!pool) | ||
1071 | return NULL; | ||
1072 | ruleno = crush_find_rule(osdmap->crush, pool->v.crush_ruleset, | 1073 | ruleno = crush_find_rule(osdmap->crush, pool->v.crush_ruleset, |
1073 | pool->v.type, pool->v.size); | 1074 | pool->v.type, pool->v.size); |
1074 | if (ruleno < 0) { | 1075 | if (ruleno < 0) { |
@@ -1078,6 +1079,11 @@ static int *calc_pg_raw(struct ceph_osdmap *osdmap, struct ceph_pg pgid, | |||
1078 | return NULL; | 1079 | return NULL; |
1079 | } | 1080 | } |
1080 | 1081 | ||
1082 | /* don't forcefeed bad device ids to crush */ | ||
1083 | if (preferred >= osdmap->max_osd || | ||
1084 | preferred >= osdmap->crush->max_devices) | ||
1085 | preferred = -1; | ||
1086 | |||
1081 | if (preferred >= 0) | 1087 | if (preferred >= 0) |
1082 | pps = ceph_stable_mod(ps, | 1088 | pps = ceph_stable_mod(ps, |
1083 | le32_to_cpu(pool->v.lpgp_num), | 1089 | le32_to_cpu(pool->v.lpgp_num), |
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 705c82886281..def0538e2413 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c | |||
@@ -696,8 +696,10 @@ static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, | |||
696 | int err; | 696 | int err; |
697 | 697 | ||
698 | err = ip6mr_fib_lookup(net, &fl6, &mrt); | 698 | err = ip6mr_fib_lookup(net, &fl6, &mrt); |
699 | if (err < 0) | 699 | if (err < 0) { |
700 | kfree_skb(skb); | ||
700 | return err; | 701 | return err; |
702 | } | ||
701 | 703 | ||
702 | read_lock(&mrt_lock); | 704 | read_lock(&mrt_lock); |
703 | dev->stats.tx_bytes += skb->len; | 705 | dev->stats.tx_bytes += skb->len; |
@@ -2052,8 +2054,10 @@ int ip6_mr_input(struct sk_buff *skb) | |||
2052 | int err; | 2054 | int err; |
2053 | 2055 | ||
2054 | err = ip6mr_fib_lookup(net, &fl6, &mrt); | 2056 | err = ip6mr_fib_lookup(net, &fl6, &mrt); |
2055 | if (err < 0) | 2057 | if (err < 0) { |
2058 | kfree_skb(skb); | ||
2056 | return err; | 2059 | return err; |
2060 | } | ||
2057 | 2061 | ||
2058 | read_lock(&mrt_lock); | 2062 | read_lock(&mrt_lock); |
2059 | cache = ip6mr_cache_find(mrt, | 2063 | cache = ip6mr_cache_find(mrt, |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 1250f9020670..fb545edef6ea 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -244,7 +244,9 @@ static inline struct rt6_info *ip6_dst_alloc(struct dst_ops *ops, | |||
244 | { | 244 | { |
245 | struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, flags); | 245 | struct rt6_info *rt = dst_alloc(ops, dev, 0, 0, flags); |
246 | 246 | ||
247 | memset(&rt->rt6i_table, 0, sizeof(*rt) - sizeof(struct dst_entry)); | 247 | if (rt != NULL) |
248 | memset(&rt->rt6i_table, 0, | ||
249 | sizeof(*rt) - sizeof(struct dst_entry)); | ||
248 | 250 | ||
249 | return rt; | 251 | return rt; |
250 | } | 252 | } |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 3c9fa618b69d..79cc6469508d 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -1383,6 +1383,8 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1383 | newtp->af_specific = &tcp_sock_ipv6_mapped_specific; | 1383 | newtp->af_specific = &tcp_sock_ipv6_mapped_specific; |
1384 | #endif | 1384 | #endif |
1385 | 1385 | ||
1386 | newnp->ipv6_ac_list = NULL; | ||
1387 | newnp->ipv6_fl_list = NULL; | ||
1386 | newnp->pktoptions = NULL; | 1388 | newnp->pktoptions = NULL; |
1387 | newnp->opt = NULL; | 1389 | newnp->opt = NULL; |
1388 | newnp->mcast_oif = inet6_iif(skb); | 1390 | newnp->mcast_oif = inet6_iif(skb); |
@@ -1447,6 +1449,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb, | |||
1447 | First: no IPv4 options. | 1449 | First: no IPv4 options. |
1448 | */ | 1450 | */ |
1449 | newinet->inet_opt = NULL; | 1451 | newinet->inet_opt = NULL; |
1452 | newnp->ipv6_ac_list = NULL; | ||
1450 | newnp->ipv6_fl_list = NULL; | 1453 | newnp->ipv6_fl_list = NULL; |
1451 | 1454 | ||
1452 | /* Clone RX bits */ | 1455 | /* Clone RX bits */ |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index c698cec0a445..fabb4fafa281 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -961,7 +961,10 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, | |||
961 | return 0; | 961 | return 0; |
962 | 962 | ||
963 | drop_n_acct: | 963 | drop_n_acct: |
964 | po->stats.tp_drops = atomic_inc_return(&sk->sk_drops); | 964 | spin_lock(&sk->sk_receive_queue.lock); |
965 | po->stats.tp_drops++; | ||
966 | atomic_inc(&sk->sk_drops); | ||
967 | spin_unlock(&sk->sk_receive_queue.lock); | ||
965 | 968 | ||
966 | drop_n_restore: | 969 | drop_n_restore: |
967 | if (skb_head != skb->data && skb_shared(skb)) { | 970 | if (skb_head != skb->data && skb_shared(skb)) { |
diff --git a/net/rds/iw_rdma.c b/net/rds/iw_rdma.c index 8b77edbab272..4e1de171866c 100644 --- a/net/rds/iw_rdma.c +++ b/net/rds/iw_rdma.c | |||
@@ -84,7 +84,8 @@ static int rds_iw_map_fastreg(struct rds_iw_mr_pool *pool, | |||
84 | static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); | 84 | static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); |
85 | static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, | 85 | static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, |
86 | struct list_head *unmap_list, | 86 | struct list_head *unmap_list, |
87 | struct list_head *kill_list); | 87 | struct list_head *kill_list, |
88 | int *unpinned); | ||
88 | static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); | 89 | static void rds_iw_destroy_fastreg(struct rds_iw_mr_pool *pool, struct rds_iw_mr *ibmr); |
89 | 90 | ||
90 | static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id) | 91 | static int rds_iw_get_device(struct rds_sock *rs, struct rds_iw_device **rds_iwdev, struct rdma_cm_id **cm_id) |
@@ -499,7 +500,7 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all) | |||
499 | LIST_HEAD(unmap_list); | 500 | LIST_HEAD(unmap_list); |
500 | LIST_HEAD(kill_list); | 501 | LIST_HEAD(kill_list); |
501 | unsigned long flags; | 502 | unsigned long flags; |
502 | unsigned int nfreed = 0, ncleaned = 0, free_goal; | 503 | unsigned int nfreed = 0, ncleaned = 0, unpinned = 0, free_goal; |
503 | int ret = 0; | 504 | int ret = 0; |
504 | 505 | ||
505 | rds_iw_stats_inc(s_iw_rdma_mr_pool_flush); | 506 | rds_iw_stats_inc(s_iw_rdma_mr_pool_flush); |
@@ -524,7 +525,8 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all) | |||
524 | * will be destroyed by the unmap function. | 525 | * will be destroyed by the unmap function. |
525 | */ | 526 | */ |
526 | if (!list_empty(&unmap_list)) { | 527 | if (!list_empty(&unmap_list)) { |
527 | ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list, &kill_list); | 528 | ncleaned = rds_iw_unmap_fastreg_list(pool, &unmap_list, |
529 | &kill_list, &unpinned); | ||
528 | /* If we've been asked to destroy all MRs, move those | 530 | /* If we've been asked to destroy all MRs, move those |
529 | * that were simply cleaned to the kill list */ | 531 | * that were simply cleaned to the kill list */ |
530 | if (free_all) | 532 | if (free_all) |
@@ -548,6 +550,7 @@ static int rds_iw_flush_mr_pool(struct rds_iw_mr_pool *pool, int free_all) | |||
548 | spin_unlock_irqrestore(&pool->list_lock, flags); | 550 | spin_unlock_irqrestore(&pool->list_lock, flags); |
549 | } | 551 | } |
550 | 552 | ||
553 | atomic_sub(unpinned, &pool->free_pinned); | ||
551 | atomic_sub(ncleaned, &pool->dirty_count); | 554 | atomic_sub(ncleaned, &pool->dirty_count); |
552 | atomic_sub(nfreed, &pool->item_count); | 555 | atomic_sub(nfreed, &pool->item_count); |
553 | 556 | ||
@@ -828,7 +831,8 @@ static void rds_iw_free_fastreg(struct rds_iw_mr_pool *pool, | |||
828 | 831 | ||
829 | static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, | 832 | static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, |
830 | struct list_head *unmap_list, | 833 | struct list_head *unmap_list, |
831 | struct list_head *kill_list) | 834 | struct list_head *kill_list, |
835 | int *unpinned) | ||
832 | { | 836 | { |
833 | struct rds_iw_mapping *mapping, *next; | 837 | struct rds_iw_mapping *mapping, *next; |
834 | unsigned int ncleaned = 0; | 838 | unsigned int ncleaned = 0; |
@@ -855,6 +859,7 @@ static unsigned int rds_iw_unmap_fastreg_list(struct rds_iw_mr_pool *pool, | |||
855 | 859 | ||
856 | spin_lock_irqsave(&pool->list_lock, flags); | 860 | spin_lock_irqsave(&pool->list_lock, flags); |
857 | list_for_each_entry_safe(mapping, next, unmap_list, m_list) { | 861 | list_for_each_entry_safe(mapping, next, unmap_list, m_list) { |
862 | *unpinned += mapping->m_sg.len; | ||
858 | list_move(&mapping->m_list, &laundered); | 863 | list_move(&mapping->m_list, &laundered); |
859 | ncleaned++; | 864 | ncleaned++; |
860 | } | 865 | } |
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index e83e7fee3bc0..ea40d540a990 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c | |||
@@ -4113,9 +4113,12 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev, | |||
4113 | if (len % sizeof(u32)) | 4113 | if (len % sizeof(u32)) |
4114 | return -EINVAL; | 4114 | return -EINVAL; |
4115 | 4115 | ||
4116 | if (settings->n_akm_suites > NL80211_MAX_NR_AKM_SUITES) | ||
4117 | return -EINVAL; | ||
4118 | |||
4116 | memcpy(settings->akm_suites, data, len); | 4119 | memcpy(settings->akm_suites, data, len); |
4117 | 4120 | ||
4118 | for (i = 0; i < settings->n_ciphers_pairwise; i++) | 4121 | for (i = 0; i < settings->n_akm_suites; i++) |
4119 | if (!nl80211_valid_akm_suite(settings->akm_suites[i])) | 4122 | if (!nl80211_valid_akm_suite(settings->akm_suites[i])) |
4120 | return -EINVAL; | 4123 | return -EINVAL; |
4121 | } | 4124 | } |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 94fdcc7f1030..552df27dcf53 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -1349,14 +1349,16 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) | |||
1349 | BUG(); | 1349 | BUG(); |
1350 | } | 1350 | } |
1351 | xdst = dst_alloc(dst_ops, NULL, 0, 0, 0); | 1351 | xdst = dst_alloc(dst_ops, NULL, 0, 0, 0); |
1352 | memset(&xdst->u.rt6.rt6i_table, 0, sizeof(*xdst) - sizeof(struct dst_entry)); | ||
1353 | xfrm_policy_put_afinfo(afinfo); | ||
1354 | 1352 | ||
1355 | if (likely(xdst)) | 1353 | if (likely(xdst)) { |
1354 | memset(&xdst->u.rt6.rt6i_table, 0, | ||
1355 | sizeof(*xdst) - sizeof(struct dst_entry)); | ||
1356 | xdst->flo.ops = &xfrm_bundle_fc_ops; | 1356 | xdst->flo.ops = &xfrm_bundle_fc_ops; |
1357 | else | 1357 | } else |
1358 | xdst = ERR_PTR(-ENOBUFS); | 1358 | xdst = ERR_PTR(-ENOBUFS); |
1359 | 1359 | ||
1360 | xfrm_policy_put_afinfo(afinfo); | ||
1361 | |||
1360 | return xdst; | 1362 | return xdst; |
1361 | } | 1363 | } |
1362 | 1364 | ||
diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c index f9123f09e83e..32b02d906703 100644 --- a/sound/pci/fm801.c +++ b/sound/pci/fm801.c | |||
@@ -68,6 +68,7 @@ MODULE_PARM_DESC(enable, "Enable FM801 soundcard."); | |||
68 | module_param_array(tea575x_tuner, int, NULL, 0444); | 68 | module_param_array(tea575x_tuner, int, NULL, 0444); |
69 | MODULE_PARM_DESC(tea575x_tuner, "TEA575x tuner access method (0 = auto, 1 = SF256-PCS, 2=SF256-PCP, 3=SF64-PCR, 8=disable, +16=tuner-only)."); | 69 | MODULE_PARM_DESC(tea575x_tuner, "TEA575x tuner access method (0 = auto, 1 = SF256-PCS, 2=SF256-PCP, 3=SF64-PCR, 8=disable, +16=tuner-only)."); |
70 | 70 | ||
71 | #define TUNER_DISABLED (1<<3) | ||
71 | #define TUNER_ONLY (1<<4) | 72 | #define TUNER_ONLY (1<<4) |
72 | #define TUNER_TYPE_MASK (~TUNER_ONLY & 0xFFFF) | 73 | #define TUNER_TYPE_MASK (~TUNER_ONLY & 0xFFFF) |
73 | 74 | ||
@@ -1150,7 +1151,8 @@ static int snd_fm801_free(struct fm801 *chip) | |||
1150 | 1151 | ||
1151 | __end_hw: | 1152 | __end_hw: |
1152 | #ifdef CONFIG_SND_FM801_TEA575X_BOOL | 1153 | #ifdef CONFIG_SND_FM801_TEA575X_BOOL |
1153 | snd_tea575x_exit(&chip->tea); | 1154 | if (!(chip->tea575x_tuner & TUNER_DISABLED)) |
1155 | snd_tea575x_exit(&chip->tea); | ||
1154 | #endif | 1156 | #endif |
1155 | if (chip->irq >= 0) | 1157 | if (chip->irq >= 0) |
1156 | free_irq(chip->irq, chip); | 1158 | free_irq(chip->irq, chip); |
@@ -1236,7 +1238,6 @@ static int __devinit snd_fm801_create(struct snd_card *card, | |||
1236 | (tea575x_tuner & TUNER_TYPE_MASK) < 4) { | 1238 | (tea575x_tuner & TUNER_TYPE_MASK) < 4) { |
1237 | if (snd_tea575x_init(&chip->tea)) { | 1239 | if (snd_tea575x_init(&chip->tea)) { |
1238 | snd_printk(KERN_ERR "TEA575x radio not found\n"); | 1240 | snd_printk(KERN_ERR "TEA575x radio not found\n"); |
1239 | snd_fm801_free(chip); | ||
1240 | return -ENODEV; | 1241 | return -ENODEV; |
1241 | } | 1242 | } |
1242 | } else if ((tea575x_tuner & TUNER_TYPE_MASK) == 0) { | 1243 | } else if ((tea575x_tuner & TUNER_TYPE_MASK) == 0) { |
@@ -1251,11 +1252,15 @@ static int __devinit snd_fm801_create(struct snd_card *card, | |||
1251 | } | 1252 | } |
1252 | if (tea575x_tuner == 4) { | 1253 | if (tea575x_tuner == 4) { |
1253 | snd_printk(KERN_ERR "TEA575x radio not found\n"); | 1254 | snd_printk(KERN_ERR "TEA575x radio not found\n"); |
1254 | snd_fm801_free(chip); | 1255 | chip->tea575x_tuner = TUNER_DISABLED; |
1255 | return -ENODEV; | ||
1256 | } | 1256 | } |
1257 | } | 1257 | } |
1258 | strlcpy(chip->tea.card, snd_fm801_tea575x_gpios[(tea575x_tuner & TUNER_TYPE_MASK) - 1].name, sizeof(chip->tea.card)); | 1258 | if (!(chip->tea575x_tuner & TUNER_DISABLED)) { |
1259 | strlcpy(chip->tea.card, | ||
1260 | snd_fm801_tea575x_gpios[(tea575x_tuner & | ||
1261 | TUNER_TYPE_MASK) - 1].name, | ||
1262 | sizeof(chip->tea.card)); | ||
1263 | } | ||
1259 | #endif | 1264 | #endif |
1260 | 1265 | ||
1261 | *rchip = chip; | 1266 | *rchip = chip; |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index be6982289c0d..e9a2a8795d1b 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -1924,7 +1924,8 @@ static unsigned int azx_via_get_position(struct azx *chip, | |||
1924 | } | 1924 | } |
1925 | 1925 | ||
1926 | static unsigned int azx_get_position(struct azx *chip, | 1926 | static unsigned int azx_get_position(struct azx *chip, |
1927 | struct azx_dev *azx_dev) | 1927 | struct azx_dev *azx_dev, |
1928 | bool with_check) | ||
1928 | { | 1929 | { |
1929 | unsigned int pos; | 1930 | unsigned int pos; |
1930 | int stream = azx_dev->substream->stream; | 1931 | int stream = azx_dev->substream->stream; |
@@ -1940,7 +1941,7 @@ static unsigned int azx_get_position(struct azx *chip, | |||
1940 | default: | 1941 | default: |
1941 | /* use the position buffer */ | 1942 | /* use the position buffer */ |
1942 | pos = le32_to_cpu(*azx_dev->posbuf); | 1943 | pos = le32_to_cpu(*azx_dev->posbuf); |
1943 | if (chip->position_fix[stream] == POS_FIX_AUTO) { | 1944 | if (with_check && chip->position_fix[stream] == POS_FIX_AUTO) { |
1944 | if (!pos || pos == (u32)-1) { | 1945 | if (!pos || pos == (u32)-1) { |
1945 | printk(KERN_WARNING | 1946 | printk(KERN_WARNING |
1946 | "hda-intel: Invalid position buffer, " | 1947 | "hda-intel: Invalid position buffer, " |
@@ -1964,7 +1965,7 @@ static snd_pcm_uframes_t azx_pcm_pointer(struct snd_pcm_substream *substream) | |||
1964 | struct azx *chip = apcm->chip; | 1965 | struct azx *chip = apcm->chip; |
1965 | struct azx_dev *azx_dev = get_azx_dev(substream); | 1966 | struct azx_dev *azx_dev = get_azx_dev(substream); |
1966 | return bytes_to_frames(substream->runtime, | 1967 | return bytes_to_frames(substream->runtime, |
1967 | azx_get_position(chip, azx_dev)); | 1968 | azx_get_position(chip, azx_dev, false)); |
1968 | } | 1969 | } |
1969 | 1970 | ||
1970 | /* | 1971 | /* |
@@ -1987,7 +1988,7 @@ static int azx_position_ok(struct azx *chip, struct azx_dev *azx_dev) | |||
1987 | return -1; /* bogus (too early) interrupt */ | 1988 | return -1; /* bogus (too early) interrupt */ |
1988 | 1989 | ||
1989 | stream = azx_dev->substream->stream; | 1990 | stream = azx_dev->substream->stream; |
1990 | pos = azx_get_position(chip, azx_dev); | 1991 | pos = azx_get_position(chip, azx_dev, true); |
1991 | 1992 | ||
1992 | if (WARN_ONCE(!azx_dev->period_bytes, | 1993 | if (WARN_ONCE(!azx_dev->period_bytes, |
1993 | "hda-intel: zero azx_dev->period_bytes")) | 1994 | "hda-intel: zero azx_dev->period_bytes")) |
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 0503c999e7d3..7a73621a8909 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c | |||
@@ -578,6 +578,10 @@ static void alc_line_automute(struct hda_codec *codec) | |||
578 | { | 578 | { |
579 | struct alc_spec *spec = codec->spec; | 579 | struct alc_spec *spec = codec->spec; |
580 | 580 | ||
581 | /* check LO jack only when it's different from HP */ | ||
582 | if (spec->autocfg.line_out_pins[0] == spec->autocfg.hp_pins[0]) | ||
583 | return; | ||
584 | |||
581 | spec->line_jack_present = | 585 | spec->line_jack_present = |
582 | detect_jacks(codec, ARRAY_SIZE(spec->autocfg.line_out_pins), | 586 | detect_jacks(codec, ARRAY_SIZE(spec->autocfg.line_out_pins), |
583 | spec->autocfg.line_out_pins); | 587 | spec->autocfg.line_out_pins); |
@@ -1321,7 +1325,9 @@ do_sku: | |||
1321 | * 15 : 1 --> enable the function "Mute internal speaker | 1325 | * 15 : 1 --> enable the function "Mute internal speaker |
1322 | * when the external headphone out jack is plugged" | 1326 | * when the external headphone out jack is plugged" |
1323 | */ | 1327 | */ |
1324 | if (!spec->autocfg.hp_pins[0]) { | 1328 | if (!spec->autocfg.hp_pins[0] && |
1329 | !(spec->autocfg.line_out_pins[0] && | ||
1330 | spec->autocfg.line_out_type == AUTO_PIN_HP_OUT)) { | ||
1325 | hda_nid_t nid; | 1331 | hda_nid_t nid; |
1326 | tmp = (ass >> 11) & 0x3; /* HP to chassis */ | 1332 | tmp = (ass >> 11) & 0x3; /* HP to chassis */ |
1327 | if (tmp == 0) | 1333 | if (tmp == 0) |
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 1b7c11432aa7..987e3cf71a0b 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c | |||
@@ -5630,6 +5630,7 @@ again: | |||
5630 | switch (codec->vendor_id) { | 5630 | switch (codec->vendor_id) { |
5631 | case 0x111d76d1: | 5631 | case 0x111d76d1: |
5632 | case 0x111d76d9: | 5632 | case 0x111d76d9: |
5633 | case 0x111d76df: | ||
5633 | case 0x111d76e5: | 5634 | case 0x111d76e5: |
5634 | case 0x111d7666: | 5635 | case 0x111d7666: |
5635 | case 0x111d7667: | 5636 | case 0x111d7667: |
diff --git a/sound/soc/blackfin/bf5xx-ad73311.c b/sound/soc/blackfin/bf5xx-ad73311.c index 732a247f2527..b94eb7ef7d16 100644 --- a/sound/soc/blackfin/bf5xx-ad73311.c +++ b/sound/soc/blackfin/bf5xx-ad73311.c | |||
@@ -128,7 +128,7 @@ static int snd_ad73311_configure(void) | |||
128 | return 0; | 128 | return 0; |
129 | } | 129 | } |
130 | 130 | ||
131 | static int bf5xx_probe(struct platform_device *pdev) | 131 | static int bf5xx_probe(struct snd_soc_card *card) |
132 | { | 132 | { |
133 | int err; | 133 | int err; |
134 | if (gpio_request(GPIO_SE, "AD73311_SE")) { | 134 | if (gpio_request(GPIO_SE, "AD73311_SE")) { |
diff --git a/sound/soc/codecs/ssm2602.c b/sound/soc/codecs/ssm2602.c index 84f4ad568556..9801cd7cfcb5 100644 --- a/sound/soc/codecs/ssm2602.c +++ b/sound/soc/codecs/ssm2602.c | |||
@@ -431,7 +431,8 @@ static int ssm2602_set_dai_fmt(struct snd_soc_dai *codec_dai, | |||
431 | static int ssm2602_set_bias_level(struct snd_soc_codec *codec, | 431 | static int ssm2602_set_bias_level(struct snd_soc_codec *codec, |
432 | enum snd_soc_bias_level level) | 432 | enum snd_soc_bias_level level) |
433 | { | 433 | { |
434 | u16 reg = snd_soc_read(codec, SSM2602_PWR) & 0xff7f; | 434 | u16 reg = snd_soc_read(codec, SSM2602_PWR); |
435 | reg &= ~(PWR_POWER_OFF | PWR_OSC_PDN); | ||
435 | 436 | ||
436 | switch (level) { | 437 | switch (level) { |
437 | case SND_SOC_BIAS_ON: | 438 | case SND_SOC_BIAS_ON: |
diff --git a/sound/soc/codecs/wm8753.c b/sound/soc/codecs/wm8753.c index ffa2ffe5ec11..aa091a0d8187 100644 --- a/sound/soc/codecs/wm8753.c +++ b/sound/soc/codecs/wm8753.c | |||
@@ -1454,8 +1454,8 @@ static int wm8753_probe(struct snd_soc_codec *codec) | |||
1454 | /* set the update bits */ | 1454 | /* set the update bits */ |
1455 | snd_soc_update_bits(codec, WM8753_LDAC, 0x0100, 0x0100); | 1455 | snd_soc_update_bits(codec, WM8753_LDAC, 0x0100, 0x0100); |
1456 | snd_soc_update_bits(codec, WM8753_RDAC, 0x0100, 0x0100); | 1456 | snd_soc_update_bits(codec, WM8753_RDAC, 0x0100, 0x0100); |
1457 | snd_soc_update_bits(codec, WM8753_LDAC, 0x0100, 0x0100); | 1457 | snd_soc_update_bits(codec, WM8753_LADC, 0x0100, 0x0100); |
1458 | snd_soc_update_bits(codec, WM8753_RDAC, 0x0100, 0x0100); | 1458 | snd_soc_update_bits(codec, WM8753_RADC, 0x0100, 0x0100); |
1459 | snd_soc_update_bits(codec, WM8753_LOUT1V, 0x0100, 0x0100); | 1459 | snd_soc_update_bits(codec, WM8753_LOUT1V, 0x0100, 0x0100); |
1460 | snd_soc_update_bits(codec, WM8753_ROUT1V, 0x0100, 0x0100); | 1460 | snd_soc_update_bits(codec, WM8753_ROUT1V, 0x0100, 0x0100); |
1461 | snd_soc_update_bits(codec, WM8753_LOUT2V, 0x0100, 0x0100); | 1461 | snd_soc_update_bits(codec, WM8753_LOUT2V, 0x0100, 0x0100); |
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c index 1725550c293e..d2c315fa1b9b 100644 --- a/sound/soc/codecs/wm8962.c +++ b/sound/soc/codecs/wm8962.c | |||
@@ -3479,31 +3479,6 @@ int wm8962_mic_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack) | |||
3479 | } | 3479 | } |
3480 | EXPORT_SYMBOL_GPL(wm8962_mic_detect); | 3480 | EXPORT_SYMBOL_GPL(wm8962_mic_detect); |
3481 | 3481 | ||
3482 | #ifdef CONFIG_PM | ||
3483 | static int wm8962_resume(struct snd_soc_codec *codec) | ||
3484 | { | ||
3485 | u16 *reg_cache = codec->reg_cache; | ||
3486 | int i; | ||
3487 | |||
3488 | /* Restore the registers */ | ||
3489 | for (i = 1; i < codec->driver->reg_cache_size; i++) { | ||
3490 | switch (i) { | ||
3491 | case WM8962_SOFTWARE_RESET: | ||
3492 | continue; | ||
3493 | default: | ||
3494 | break; | ||
3495 | } | ||
3496 | |||
3497 | if (reg_cache[i] != wm8962_reg[i]) | ||
3498 | snd_soc_write(codec, i, reg_cache[i]); | ||
3499 | } | ||
3500 | |||
3501 | return 0; | ||
3502 | } | ||
3503 | #else | ||
3504 | #define wm8962_resume NULL | ||
3505 | #endif | ||
3506 | |||
3507 | #if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE) | 3482 | #if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE) |
3508 | static int beep_rates[] = { | 3483 | static int beep_rates[] = { |
3509 | 500, 1000, 2000, 4000, | 3484 | 500, 1000, 2000, 4000, |
@@ -4015,7 +3990,6 @@ static int wm8962_remove(struct snd_soc_codec *codec) | |||
4015 | static struct snd_soc_codec_driver soc_codec_dev_wm8962 = { | 3990 | static struct snd_soc_codec_driver soc_codec_dev_wm8962 = { |
4016 | .probe = wm8962_probe, | 3991 | .probe = wm8962_probe, |
4017 | .remove = wm8962_remove, | 3992 | .remove = wm8962_remove, |
4018 | .resume = wm8962_resume, | ||
4019 | .set_bias_level = wm8962_set_bias_level, | 3993 | .set_bias_level = wm8962_set_bias_level, |
4020 | .reg_cache_size = WM8962_MAX_REGISTER + 1, | 3994 | .reg_cache_size = WM8962_MAX_REGISTER + 1, |
4021 | .reg_word_size = sizeof(u16), | 3995 | .reg_word_size = sizeof(u16), |
diff --git a/sound/soc/omap/mcpdm.c b/sound/soc/omap/mcpdm.c index 928f03707451..50e59194ad81 100644 --- a/sound/soc/omap/mcpdm.c +++ b/sound/soc/omap/mcpdm.c | |||
@@ -449,7 +449,7 @@ exit: | |||
449 | return ret; | 449 | return ret; |
450 | } | 450 | } |
451 | 451 | ||
452 | int __devexit omap_mcpdm_remove(struct platform_device *pdev) | 452 | int omap_mcpdm_remove(struct platform_device *pdev) |
453 | { | 453 | { |
454 | struct omap_mcpdm *mcpdm_ptr = platform_get_drvdata(pdev); | 454 | struct omap_mcpdm *mcpdm_ptr = platform_get_drvdata(pdev); |
455 | 455 | ||
diff --git a/sound/soc/omap/mcpdm.h b/sound/soc/omap/mcpdm.h index df3e16fb51f3..20c20a8649fe 100644 --- a/sound/soc/omap/mcpdm.h +++ b/sound/soc/omap/mcpdm.h | |||
@@ -150,4 +150,4 @@ extern int omap_mcpdm_request(void); | |||
150 | extern void omap_mcpdm_free(void); | 150 | extern void omap_mcpdm_free(void); |
151 | extern int omap_mcpdm_set_offset(int offset1, int offset2); | 151 | extern int omap_mcpdm_set_offset(int offset1, int offset2); |
152 | int __devinit omap_mcpdm_probe(struct platform_device *pdev); | 152 | int __devinit omap_mcpdm_probe(struct platform_device *pdev); |
153 | int __devexit omap_mcpdm_remove(struct platform_device *pdev); | 153 | int omap_mcpdm_remove(struct platform_device *pdev); |
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c index ebcc2d4d2b18..478d60778453 100644 --- a/sound/soc/omap/omap-mcbsp.c +++ b/sound/soc/omap/omap-mcbsp.c | |||
@@ -516,6 +516,12 @@ static int omap_mcbsp_dai_set_dai_sysclk(struct snd_soc_dai *cpu_dai, | |||
516 | struct omap_mcbsp_reg_cfg *regs = &mcbsp_data->regs; | 516 | struct omap_mcbsp_reg_cfg *regs = &mcbsp_data->regs; |
517 | int err = 0; | 517 | int err = 0; |
518 | 518 | ||
519 | if (mcbsp_data->active) | ||
520 | if (freq == mcbsp_data->in_freq) | ||
521 | return 0; | ||
522 | else | ||
523 | return -EBUSY; | ||
524 | |||
519 | /* The McBSP signal muxing functions are only available on McBSP1 */ | 525 | /* The McBSP signal muxing functions are only available on McBSP1 */ |
520 | if (clk_id == OMAP_MCBSP_CLKR_SRC_CLKR || | 526 | if (clk_id == OMAP_MCBSP_CLKR_SRC_CLKR || |
521 | clk_id == OMAP_MCBSP_CLKR_SRC_CLKX || | 527 | clk_id == OMAP_MCBSP_CLKR_SRC_CLKX || |
diff --git a/sound/soc/pxa/zylonite.c b/sound/soc/pxa/zylonite.c index b6445757fc54..2b8350b52232 100644 --- a/sound/soc/pxa/zylonite.c +++ b/sound/soc/pxa/zylonite.c | |||
@@ -196,20 +196,20 @@ static int zylonite_probe(struct snd_soc_card *card) | |||
196 | if (clk_pout) { | 196 | if (clk_pout) { |
197 | pout = clk_get(NULL, "CLK_POUT"); | 197 | pout = clk_get(NULL, "CLK_POUT"); |
198 | if (IS_ERR(pout)) { | 198 | if (IS_ERR(pout)) { |
199 | dev_err(&pdev->dev, "Unable to obtain CLK_POUT: %ld\n", | 199 | dev_err(card->dev, "Unable to obtain CLK_POUT: %ld\n", |
200 | PTR_ERR(pout)); | 200 | PTR_ERR(pout)); |
201 | return PTR_ERR(pout); | 201 | return PTR_ERR(pout); |
202 | } | 202 | } |
203 | 203 | ||
204 | ret = clk_enable(pout); | 204 | ret = clk_enable(pout); |
205 | if (ret != 0) { | 205 | if (ret != 0) { |
206 | dev_err(&pdev->dev, "Unable to enable CLK_POUT: %d\n", | 206 | dev_err(card->dev, "Unable to enable CLK_POUT: %d\n", |
207 | ret); | 207 | ret); |
208 | clk_put(pout); | 208 | clk_put(pout); |
209 | return ret; | 209 | return ret; |
210 | } | 210 | } |
211 | 211 | ||
212 | dev_dbg(&pdev->dev, "MCLK enabled at %luHz\n", | 212 | dev_dbg(card->dev, "MCLK enabled at %luHz\n", |
213 | clk_get_rate(pout)); | 213 | clk_get_rate(pout)); |
214 | } | 214 | } |
215 | 215 | ||
@@ -241,7 +241,7 @@ static int zylonite_resume_pre(struct snd_soc_card *card) | |||
241 | if (clk_pout) { | 241 | if (clk_pout) { |
242 | ret = clk_enable(pout); | 242 | ret = clk_enable(pout); |
243 | if (ret != 0) | 243 | if (ret != 0) |
244 | dev_err(&pdev->dev, "Unable to enable CLK_POUT: %d\n", | 244 | dev_err(card->dev, "Unable to enable CLK_POUT: %d\n", |
245 | ret); | 245 | ret); |
246 | } | 246 | } |
247 | 247 | ||
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index d2ef014af215..ef69f5a02709 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/bitops.h> | 30 | #include <linux/bitops.h> |
31 | #include <linux/debugfs.h> | 31 | #include <linux/debugfs.h> |
32 | #include <linux/platform_device.h> | 32 | #include <linux/platform_device.h> |
33 | #include <linux/ctype.h> | ||
33 | #include <linux/slab.h> | 34 | #include <linux/slab.h> |
34 | #include <sound/ac97_codec.h> | 35 | #include <sound/ac97_codec.h> |
35 | #include <sound/core.h> | 36 | #include <sound/core.h> |
@@ -1434,9 +1435,20 @@ static void snd_soc_instantiate_card(struct snd_soc_card *card) | |||
1434 | "%s", card->name); | 1435 | "%s", card->name); |
1435 | snprintf(card->snd_card->longname, sizeof(card->snd_card->longname), | 1436 | snprintf(card->snd_card->longname, sizeof(card->snd_card->longname), |
1436 | "%s", card->long_name ? card->long_name : card->name); | 1437 | "%s", card->long_name ? card->long_name : card->name); |
1437 | if (card->driver_name) | 1438 | snprintf(card->snd_card->driver, sizeof(card->snd_card->driver), |
1438 | strlcpy(card->snd_card->driver, card->driver_name, | 1439 | "%s", card->driver_name ? card->driver_name : card->name); |
1439 | sizeof(card->snd_card->driver)); | 1440 | for (i = 0; i < ARRAY_SIZE(card->snd_card->driver); i++) { |
1441 | switch (card->snd_card->driver[i]) { | ||
1442 | case '_': | ||
1443 | case '-': | ||
1444 | case '\0': | ||
1445 | break; | ||
1446 | default: | ||
1447 | if (!isalnum(card->snd_card->driver[i])) | ||
1448 | card->snd_card->driver[i] = '_'; | ||
1449 | break; | ||
1450 | } | ||
1451 | } | ||
1440 | 1452 | ||
1441 | if (card->late_probe) { | 1453 | if (card->late_probe) { |
1442 | ret = card->late_probe(card); | 1454 | ret = card->late_probe(card); |
diff --git a/sound/usb/card.c b/sound/usb/card.c index 781d9e61adfb..d8f2bf401458 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c | |||
@@ -530,8 +530,11 @@ snd_usb_audio_probe(struct usb_device *dev, | |||
530 | return chip; | 530 | return chip; |
531 | 531 | ||
532 | __error: | 532 | __error: |
533 | if (chip && !chip->num_interfaces) | 533 | if (chip) { |
534 | snd_card_free(chip->card); | 534 | if (!chip->num_interfaces) |
535 | snd_card_free(chip->card); | ||
536 | chip->probing = 0; | ||
537 | } | ||
535 | mutex_unlock(®ister_mutex); | 538 | mutex_unlock(®ister_mutex); |
536 | __err_val: | 539 | __err_val: |
537 | return NULL; | 540 | return NULL; |
diff --git a/tools/perf/Documentation/perf-annotate.txt b/tools/perf/Documentation/perf-annotate.txt index 98a31e3181a5..0102d83600db 100644 --- a/tools/perf/Documentation/perf-annotate.txt +++ b/tools/perf/Documentation/perf-annotate.txt | |||
@@ -83,6 +83,9 @@ OPTIONS | |||
83 | --symfs=<directory>:: | 83 | --symfs=<directory>:: |
84 | Look for files with symbols relative to this directory. | 84 | Look for files with symbols relative to this directory. |
85 | 85 | ||
86 | -M:: | ||
87 | --disassembler-style=:: Set disassembler style for objdump. | ||
88 | |||
86 | SEE ALSO | 89 | SEE ALSO |
87 | -------- | 90 | -------- |
88 | linkperf:perf-record[1], linkperf:perf-report[1] | 91 | linkperf:perf-record[1], linkperf:perf-report[1] |
diff --git a/tools/perf/Documentation/perf-buildid-list.txt b/tools/perf/Documentation/perf-buildid-list.txt index 5eaac6f26d51..cc22325ffd1b 100644 --- a/tools/perf/Documentation/perf-buildid-list.txt +++ b/tools/perf/Documentation/perf-buildid-list.txt | |||
@@ -16,6 +16,9 @@ This command displays the buildids found in a perf.data file, so that other | |||
16 | tools can be used to fetch packages with matching symbol tables for use by | 16 | tools can be used to fetch packages with matching symbol tables for use by |
17 | perf report. | 17 | perf report. |
18 | 18 | ||
19 | It can also be used to show the build id of the running kernel or in an ELF | ||
20 | file using -i/--input. | ||
21 | |||
19 | OPTIONS | 22 | OPTIONS |
20 | ------- | 23 | ------- |
21 | -H:: | 24 | -H:: |
@@ -27,6 +30,9 @@ OPTIONS | |||
27 | -f:: | 30 | -f:: |
28 | --force:: | 31 | --force:: |
29 | Don't do ownership validation. | 32 | Don't do ownership validation. |
33 | -k:: | ||
34 | --kernel:: | ||
35 | Show running kernel build id. | ||
30 | -v:: | 36 | -v:: |
31 | --verbose:: | 37 | --verbose:: |
32 | Be more verbose. | 38 | Be more verbose. |
diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt index 04253c07d19a..6349b6c0e3ec 100644 --- a/tools/perf/Documentation/perf-report.txt +++ b/tools/perf/Documentation/perf-report.txt | |||
@@ -134,6 +134,9 @@ OPTIONS | |||
134 | CPUs are specified with -: 0-2. Default is to report samples on all | 134 | CPUs are specified with -: 0-2. Default is to report samples on all |
135 | CPUs. | 135 | CPUs. |
136 | 136 | ||
137 | -M:: | ||
138 | --disassembler-style=:: Set disassembler style for objdump. | ||
139 | |||
137 | SEE ALSO | 140 | SEE ALSO |
138 | -------- | 141 | -------- |
139 | linkperf:perf-stat[1] | 142 | linkperf:perf-stat[1] |
diff --git a/tools/perf/Documentation/perf-sched.txt b/tools/perf/Documentation/perf-sched.txt index 46822d5fde1c..5b212b57f70b 100644 --- a/tools/perf/Documentation/perf-sched.txt +++ b/tools/perf/Documentation/perf-sched.txt | |||
@@ -8,7 +8,7 @@ perf-sched - Tool to trace/measure scheduler properties (latencies) | |||
8 | SYNOPSIS | 8 | SYNOPSIS |
9 | -------- | 9 | -------- |
10 | [verse] | 10 | [verse] |
11 | 'perf sched' {record|latency|map|replay|trace} | 11 | 'perf sched' {record|latency|map|replay|script} |
12 | 12 | ||
13 | DESCRIPTION | 13 | DESCRIPTION |
14 | ----------- | 14 | ----------- |
@@ -20,8 +20,8 @@ There are five variants of perf sched: | |||
20 | 'perf sched latency' to report the per task scheduling latencies | 20 | 'perf sched latency' to report the per task scheduling latencies |
21 | and other scheduling properties of the workload. | 21 | and other scheduling properties of the workload. |
22 | 22 | ||
23 | 'perf sched trace' to see a detailed trace of the workload that | 23 | 'perf sched script' to see a detailed trace of the workload that |
24 | was recorded. | 24 | was recorded (aliased to 'perf script' for now). |
25 | 25 | ||
26 | 'perf sched replay' to simulate the workload that was recorded | 26 | 'perf sched replay' to simulate the workload that was recorded |
27 | via perf sched record. (this is done by starting up mockup threads | 27 | via perf sched record. (this is done by starting up mockup threads |
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt index 08394c4879a8..8966b9ab2014 100644 --- a/tools/perf/Documentation/perf-stat.txt +++ b/tools/perf/Documentation/perf-stat.txt | |||
@@ -95,12 +95,21 @@ corresponding events, i.e., they always refer to events defined earlier on the c | |||
95 | line. | 95 | line. |
96 | 96 | ||
97 | -o file:: | 97 | -o file:: |
98 | -output file:: | 98 | --output file:: |
99 | Print the output into the designated file. | 99 | Print the output into the designated file. |
100 | 100 | ||
101 | --append:: | 101 | --append:: |
102 | Append to the output file designated with the -o option. Ignored if -o is not specified. | 102 | Append to the output file designated with the -o option. Ignored if -o is not specified. |
103 | 103 | ||
104 | --log-fd:: | ||
105 | |||
106 | Log output to fd, instead of stderr. Complementary to --output, and mutually exclusive | ||
107 | with it. --append may be used here. Examples: | ||
108 | 3>results perf stat --log-fd 3 -- $cmd | ||
109 | 3>>results perf stat --log-fd 3 --append -- $cmd | ||
110 | |||
111 | |||
112 | |||
104 | EXAMPLES | 113 | EXAMPLES |
105 | -------- | 114 | -------- |
106 | 115 | ||
diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 3b8f7b80376b..e9d5c271db69 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile | |||
@@ -30,6 +30,8 @@ endif | |||
30 | # Define EXTRA_CFLAGS=-m64 or EXTRA_CFLAGS=-m32 as appropriate for cross-builds. | 30 | # Define EXTRA_CFLAGS=-m64 or EXTRA_CFLAGS=-m32 as appropriate for cross-builds. |
31 | # | 31 | # |
32 | # Define NO_DWARF if you do not want debug-info analysis feature at all. | 32 | # Define NO_DWARF if you do not want debug-info analysis feature at all. |
33 | # | ||
34 | # Define WERROR=0 to disable treating any warnings as errors. | ||
33 | 35 | ||
34 | $(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE | 36 | $(OUTPUT)PERF-VERSION-FILE: .FORCE-PERF-VERSION-FILE |
35 | @$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT) | 37 | @$(SHELL_PATH) util/PERF-VERSION-GEN $(OUTPUT) |
@@ -63,6 +65,11 @@ ifeq ($(ARCH),x86_64) | |||
63 | endif | 65 | endif |
64 | endif | 66 | endif |
65 | 67 | ||
68 | # Treat warnings as errors unless directed not to | ||
69 | ifneq ($(WERROR),0) | ||
70 | CFLAGS_WERROR := -Werror | ||
71 | endif | ||
72 | |||
66 | # | 73 | # |
67 | # Include saner warnings here, which can catch bugs: | 74 | # Include saner warnings here, which can catch bugs: |
68 | # | 75 | # |
@@ -95,7 +102,7 @@ ifndef PERF_DEBUG | |||
95 | CFLAGS_OPTIMIZE = -O6 | 102 | CFLAGS_OPTIMIZE = -O6 |
96 | endif | 103 | endif |
97 | 104 | ||
98 | CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) | 105 | CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 $(CFLAGS_WERROR) $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS) |
99 | EXTLIBS = -lpthread -lrt -lelf -lm | 106 | EXTLIBS = -lpthread -lrt -lelf -lm |
100 | ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 | 107 | ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 |
101 | ALL_LDFLAGS = $(LDFLAGS) | 108 | ALL_LDFLAGS = $(LDFLAGS) |
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index c5be28851f01..cf68819f7453 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c | |||
@@ -273,6 +273,8 @@ static const struct option options[] = { | |||
273 | "Interleave source code with assembly code (default)"), | 273 | "Interleave source code with assembly code (default)"), |
274 | OPT_BOOLEAN('0', "asm-raw", &symbol_conf.annotate_asm_raw, | 274 | OPT_BOOLEAN('0', "asm-raw", &symbol_conf.annotate_asm_raw, |
275 | "Display raw encoding of assembly instructions (default)"), | 275 | "Display raw encoding of assembly instructions (default)"), |
276 | OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style", | ||
277 | "Specify disassembler style (e.g. -M intel for intel syntax)"), | ||
276 | OPT_END() | 278 | OPT_END() |
277 | }; | 279 | }; |
278 | 280 | ||
diff --git a/tools/perf/builtin-buildid-list.c b/tools/perf/builtin-buildid-list.c index 5af32ae9031e..cb690a65bf02 100644 --- a/tools/perf/builtin-buildid-list.c +++ b/tools/perf/builtin-buildid-list.c | |||
@@ -1,7 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * builtin-buildid-list.c | 2 | * builtin-buildid-list.c |
3 | * | 3 | * |
4 | * Builtin buildid-list command: list buildids in perf.data | 4 | * Builtin buildid-list command: list buildids in perf.data, in the running |
5 | * kernel and in ELF files. | ||
5 | * | 6 | * |
6 | * Copyright (C) 2009, Red Hat Inc. | 7 | * Copyright (C) 2009, Red Hat Inc. |
7 | * Copyright (C) 2009, Arnaldo Carvalho de Melo <acme@redhat.com> | 8 | * Copyright (C) 2009, Arnaldo Carvalho de Melo <acme@redhat.com> |
@@ -15,8 +16,11 @@ | |||
15 | #include "util/session.h" | 16 | #include "util/session.h" |
16 | #include "util/symbol.h" | 17 | #include "util/symbol.h" |
17 | 18 | ||
19 | #include <libelf.h> | ||
20 | |||
18 | static char const *input_name = "perf.data"; | 21 | static char const *input_name = "perf.data"; |
19 | static bool force; | 22 | static bool force; |
23 | static bool show_kernel; | ||
20 | static bool with_hits; | 24 | static bool with_hits; |
21 | 25 | ||
22 | static const char * const buildid_list_usage[] = { | 26 | static const char * const buildid_list_usage[] = { |
@@ -29,12 +33,13 @@ static const struct option options[] = { | |||
29 | OPT_STRING('i', "input", &input_name, "file", | 33 | OPT_STRING('i', "input", &input_name, "file", |
30 | "input file name"), | 34 | "input file name"), |
31 | OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), | 35 | OPT_BOOLEAN('f', "force", &force, "don't complain, do it"), |
36 | OPT_BOOLEAN('k', "kernel", &show_kernel, "Show current kernel build id"), | ||
32 | OPT_INCR('v', "verbose", &verbose, | 37 | OPT_INCR('v', "verbose", &verbose, |
33 | "be more verbose"), | 38 | "be more verbose"), |
34 | OPT_END() | 39 | OPT_END() |
35 | }; | 40 | }; |
36 | 41 | ||
37 | static int __cmd_buildid_list(void) | 42 | static int perf_session__list_build_ids(void) |
38 | { | 43 | { |
39 | struct perf_session *session; | 44 | struct perf_session *session; |
40 | 45 | ||
@@ -52,6 +57,49 @@ static int __cmd_buildid_list(void) | |||
52 | return 0; | 57 | return 0; |
53 | } | 58 | } |
54 | 59 | ||
60 | static int sysfs__fprintf_build_id(FILE *fp) | ||
61 | { | ||
62 | u8 kallsyms_build_id[BUILD_ID_SIZE]; | ||
63 | char sbuild_id[BUILD_ID_SIZE * 2 + 1]; | ||
64 | |||
65 | if (sysfs__read_build_id("/sys/kernel/notes", kallsyms_build_id, | ||
66 | sizeof(kallsyms_build_id)) != 0) | ||
67 | return -1; | ||
68 | |||
69 | build_id__sprintf(kallsyms_build_id, sizeof(kallsyms_build_id), | ||
70 | sbuild_id); | ||
71 | fprintf(fp, "%s\n", sbuild_id); | ||
72 | return 0; | ||
73 | } | ||
74 | |||
75 | static int filename__fprintf_build_id(const char *name, FILE *fp) | ||
76 | { | ||
77 | u8 build_id[BUILD_ID_SIZE]; | ||
78 | char sbuild_id[BUILD_ID_SIZE * 2 + 1]; | ||
79 | |||
80 | if (filename__read_build_id(name, build_id, | ||
81 | sizeof(build_id)) != sizeof(build_id)) | ||
82 | return 0; | ||
83 | |||
84 | build_id__sprintf(build_id, sizeof(build_id), sbuild_id); | ||
85 | return fprintf(fp, "%s\n", sbuild_id); | ||
86 | } | ||
87 | |||
88 | static int __cmd_buildid_list(void) | ||
89 | { | ||
90 | if (show_kernel) | ||
91 | return sysfs__fprintf_build_id(stdout); | ||
92 | |||
93 | elf_version(EV_CURRENT); | ||
94 | /* | ||
95 | * See if this is an ELF file first: | ||
96 | */ | ||
97 | if (filename__fprintf_build_id(input_name, stdout)) | ||
98 | return 0; | ||
99 | |||
100 | return perf_session__list_build_ids(); | ||
101 | } | ||
102 | |||
55 | int cmd_buildid_list(int argc, const char **argv, const char *prefix __used) | 103 | int cmd_buildid_list(int argc, const char **argv, const char *prefix __used) |
56 | { | 104 | { |
57 | argc = parse_options(argc, argv, options, buildid_list_usage, 0); | 105 | argc = parse_options(argc, argv, options, buildid_list_usage, 0); |
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 6b0519f885e4..dd6467872f60 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c | |||
@@ -73,6 +73,7 @@ static off_t post_processing_offset; | |||
73 | 73 | ||
74 | static struct perf_session *session; | 74 | static struct perf_session *session; |
75 | static const char *cpu_list; | 75 | static const char *cpu_list; |
76 | static const char *progname; | ||
76 | 77 | ||
77 | static void advance_output(size_t size) | 78 | static void advance_output(size_t size) |
78 | { | 79 | { |
@@ -137,17 +138,29 @@ static void mmap_read(struct perf_mmap *md) | |||
137 | 138 | ||
138 | static volatile int done = 0; | 139 | static volatile int done = 0; |
139 | static volatile int signr = -1; | 140 | static volatile int signr = -1; |
141 | static volatile int child_finished = 0; | ||
140 | 142 | ||
141 | static void sig_handler(int sig) | 143 | static void sig_handler(int sig) |
142 | { | 144 | { |
145 | if (sig == SIGCHLD) | ||
146 | child_finished = 1; | ||
147 | |||
143 | done = 1; | 148 | done = 1; |
144 | signr = sig; | 149 | signr = sig; |
145 | } | 150 | } |
146 | 151 | ||
147 | static void sig_atexit(void) | 152 | static void sig_atexit(void) |
148 | { | 153 | { |
149 | if (child_pid > 0) | 154 | int status; |
150 | kill(child_pid, SIGTERM); | 155 | |
156 | if (child_pid > 0) { | ||
157 | if (!child_finished) | ||
158 | kill(child_pid, SIGTERM); | ||
159 | |||
160 | wait(&status); | ||
161 | if (WIFSIGNALED(status)) | ||
162 | psignal(WTERMSIG(status), progname); | ||
163 | } | ||
151 | 164 | ||
152 | if (signr == -1 || signr == SIGUSR1) | 165 | if (signr == -1 || signr == SIGUSR1) |
153 | return; | 166 | return; |
@@ -161,6 +174,7 @@ static void config_attr(struct perf_evsel *evsel, struct perf_evlist *evlist) | |||
161 | struct perf_event_attr *attr = &evsel->attr; | 174 | struct perf_event_attr *attr = &evsel->attr; |
162 | int track = !evsel->idx; /* only the first counter needs these */ | 175 | int track = !evsel->idx; /* only the first counter needs these */ |
163 | 176 | ||
177 | attr->disabled = 1; | ||
164 | attr->inherit = !no_inherit; | 178 | attr->inherit = !no_inherit; |
165 | attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | | 179 | attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | |
166 | PERF_FORMAT_TOTAL_TIME_RUNNING | | 180 | PERF_FORMAT_TOTAL_TIME_RUNNING | |
@@ -445,6 +459,8 @@ static int __cmd_record(int argc, const char **argv) | |||
445 | char buf; | 459 | char buf; |
446 | struct machine *machine; | 460 | struct machine *machine; |
447 | 461 | ||
462 | progname = argv[0]; | ||
463 | |||
448 | page_size = sysconf(_SC_PAGE_SIZE); | 464 | page_size = sysconf(_SC_PAGE_SIZE); |
449 | 465 | ||
450 | atexit(sig_atexit); | 466 | atexit(sig_atexit); |
@@ -671,6 +687,8 @@ static int __cmd_record(int argc, const char **argv) | |||
671 | } | 687 | } |
672 | } | 688 | } |
673 | 689 | ||
690 | perf_evlist__enable(evsel_list); | ||
691 | |||
674 | /* | 692 | /* |
675 | * Let the child rip | 693 | * Let the child rip |
676 | */ | 694 | */ |
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index d7ff277bdb78..3d58334909a5 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c | |||
@@ -229,10 +229,7 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist, | |||
229 | 229 | ||
230 | list_for_each_entry(pos, &evlist->entries, node) { | 230 | list_for_each_entry(pos, &evlist->entries, node) { |
231 | struct hists *hists = &pos->hists; | 231 | struct hists *hists = &pos->hists; |
232 | const char *evname = NULL; | 232 | const char *evname = event_name(pos); |
233 | |||
234 | if (rb_first(&hists->entries) != rb_last(&hists->entries)) | ||
235 | evname = event_name(pos); | ||
236 | 233 | ||
237 | hists__fprintf_nr_sample_events(hists, evname, stdout); | 234 | hists__fprintf_nr_sample_events(hists, evname, stdout); |
238 | hists__fprintf(hists, NULL, false, stdout); | 235 | hists__fprintf(hists, NULL, false, stdout); |
@@ -487,6 +484,8 @@ static const struct option options[] = { | |||
487 | OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", | 484 | OPT_STRING(0, "symfs", &symbol_conf.symfs, "directory", |
488 | "Look for files with symbols relative to this directory"), | 485 | "Look for files with symbols relative to this directory"), |
489 | OPT_STRING('c', "cpu", &cpu_list, "cpu", "list of cpus to profile"), | 486 | OPT_STRING('c', "cpu", &cpu_list, "cpu", "list of cpus to profile"), |
487 | OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style", | ||
488 | "Specify disassembler style (e.g. -M intel for intel syntax)"), | ||
490 | OPT_END() | 489 | OPT_END() |
491 | }; | 490 | }; |
492 | 491 | ||
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index bec64a9e741c..7ce65f52415e 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c | |||
@@ -196,6 +196,7 @@ static bool csv_output = false; | |||
196 | static bool group = false; | 196 | static bool group = false; |
197 | static const char *output_name = NULL; | 197 | static const char *output_name = NULL; |
198 | static FILE *output = NULL; | 198 | static FILE *output = NULL; |
199 | static int output_fd; | ||
199 | 200 | ||
200 | static volatile int done = 0; | 201 | static volatile int done = 0; |
201 | 202 | ||
@@ -253,8 +254,13 @@ static double avg_stats(struct stats *stats) | |||
253 | */ | 254 | */ |
254 | static double stddev_stats(struct stats *stats) | 255 | static double stddev_stats(struct stats *stats) |
255 | { | 256 | { |
256 | double variance = stats->M2 / (stats->n - 1); | 257 | double variance, variance_mean; |
257 | double variance_mean = variance / stats->n; | 258 | |
259 | if (!stats->n) | ||
260 | return 0.0; | ||
261 | |||
262 | variance = stats->M2 / (stats->n - 1); | ||
263 | variance_mean = variance / stats->n; | ||
258 | 264 | ||
259 | return sqrt(variance_mean); | 265 | return sqrt(variance_mean); |
260 | } | 266 | } |
@@ -489,6 +495,8 @@ static int run_perf_stat(int argc __used, const char **argv) | |||
489 | if (forks) { | 495 | if (forks) { |
490 | close(go_pipe[1]); | 496 | close(go_pipe[1]); |
491 | wait(&status); | 497 | wait(&status); |
498 | if (WIFSIGNALED(status)) | ||
499 | psignal(WTERMSIG(status), argv[0]); | ||
492 | } else { | 500 | } else { |
493 | while(!done) sleep(1); | 501 | while(!done) sleep(1); |
494 | } | 502 | } |
@@ -522,7 +530,7 @@ static void print_noise_pct(double total, double avg) | |||
522 | 530 | ||
523 | if (csv_output) | 531 | if (csv_output) |
524 | fprintf(output, "%s%.2f%%", csv_sep, pct); | 532 | fprintf(output, "%s%.2f%%", csv_sep, pct); |
525 | else | 533 | else if (pct) |
526 | fprintf(output, " ( +-%6.2f%% )", pct); | 534 | fprintf(output, " ( +-%6.2f%% )", pct); |
527 | } | 535 | } |
528 | 536 | ||
@@ -1080,6 +1088,8 @@ static const struct option options[] = { | |||
1080 | OPT_STRING('o', "output", &output_name, "file", | 1088 | OPT_STRING('o', "output", &output_name, "file", |
1081 | "output file name"), | 1089 | "output file name"), |
1082 | OPT_BOOLEAN(0, "append", &append_file, "append to the output file"), | 1090 | OPT_BOOLEAN(0, "append", &append_file, "append to the output file"), |
1091 | OPT_INTEGER(0, "log-fd", &output_fd, | ||
1092 | "log output to fd, instead of stderr"), | ||
1083 | OPT_END() | 1093 | OPT_END() |
1084 | }; | 1094 | }; |
1085 | 1095 | ||
@@ -1166,6 +1176,10 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used) | |||
1166 | if (output_name && strcmp(output_name, "-")) | 1176 | if (output_name && strcmp(output_name, "-")) |
1167 | output = NULL; | 1177 | output = NULL; |
1168 | 1178 | ||
1179 | if (output_name && output_fd) { | ||
1180 | fprintf(stderr, "cannot use both --output and --log-fd\n"); | ||
1181 | usage_with_options(stat_usage, options); | ||
1182 | } | ||
1169 | if (!output) { | 1183 | if (!output) { |
1170 | struct timespec tm; | 1184 | struct timespec tm; |
1171 | mode = append_file ? "a" : "w"; | 1185 | mode = append_file ? "a" : "w"; |
@@ -1177,18 +1191,27 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used) | |||
1177 | } | 1191 | } |
1178 | clock_gettime(CLOCK_REALTIME, &tm); | 1192 | clock_gettime(CLOCK_REALTIME, &tm); |
1179 | fprintf(output, "# started on %s\n", ctime(&tm.tv_sec)); | 1193 | fprintf(output, "# started on %s\n", ctime(&tm.tv_sec)); |
1194 | } else if (output_fd != 2) { | ||
1195 | mode = append_file ? "a" : "w"; | ||
1196 | output = fdopen(output_fd, mode); | ||
1197 | if (!output) { | ||
1198 | perror("Failed opening logfd"); | ||
1199 | return -errno; | ||
1200 | } | ||
1180 | } | 1201 | } |
1181 | 1202 | ||
1182 | if (csv_sep) | 1203 | if (csv_sep) { |
1183 | csv_output = true; | 1204 | csv_output = true; |
1184 | else | 1205 | if (!strcmp(csv_sep, "\\t")) |
1206 | csv_sep = "\t"; | ||
1207 | } else | ||
1185 | csv_sep = DEFAULT_SEPARATOR; | 1208 | csv_sep = DEFAULT_SEPARATOR; |
1186 | 1209 | ||
1187 | /* | 1210 | /* |
1188 | * let the spreadsheet do the pretty-printing | 1211 | * let the spreadsheet do the pretty-printing |
1189 | */ | 1212 | */ |
1190 | if (csv_output) { | 1213 | if (csv_output) { |
1191 | /* User explicitely passed -B? */ | 1214 | /* User explicitly passed -B? */ |
1192 | if (big_num_opt == 1) { | 1215 | if (big_num_opt == 1) { |
1193 | fprintf(stderr, "-B option not supported with -x\n"); | 1216 | fprintf(stderr, "-B option not supported with -x\n"); |
1194 | usage_with_options(stat_usage, options); | 1217 | usage_with_options(stat_usage, options); |
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c index 55f4c76f2821..efe696f936e2 100644 --- a/tools/perf/builtin-test.c +++ b/tools/perf/builtin-test.c | |||
@@ -561,7 +561,7 @@ static int test__basic_mmap(void) | |||
561 | } | 561 | } |
562 | 562 | ||
563 | err = perf_event__parse_sample(event, attr.sample_type, sample_size, | 563 | err = perf_event__parse_sample(event, attr.sample_type, sample_size, |
564 | false, &sample); | 564 | false, &sample, false); |
565 | if (err) { | 565 | if (err) { |
566 | pr_err("Can't parse sample, err = %d\n", err); | 566 | pr_err("Can't parse sample, err = %d\n", err); |
567 | goto out_munmap; | 567 | goto out_munmap; |
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index a43433f08300..5ede7d7c9239 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c | |||
@@ -191,7 +191,8 @@ static void __zero_source_counters(struct sym_entry *syme) | |||
191 | symbol__annotate_zero_histograms(sym); | 191 | symbol__annotate_zero_histograms(sym); |
192 | } | 192 | } |
193 | 193 | ||
194 | static void record_precise_ip(struct sym_entry *syme, int counter, u64 ip) | 194 | static void record_precise_ip(struct sym_entry *syme, struct map *map, |
195 | int counter, u64 ip) | ||
195 | { | 196 | { |
196 | struct annotation *notes; | 197 | struct annotation *notes; |
197 | struct symbol *sym; | 198 | struct symbol *sym; |
@@ -205,8 +206,8 @@ static void record_precise_ip(struct sym_entry *syme, int counter, u64 ip) | |||
205 | if (pthread_mutex_trylock(¬es->lock)) | 206 | if (pthread_mutex_trylock(¬es->lock)) |
206 | return; | 207 | return; |
207 | 208 | ||
208 | ip = syme->map->map_ip(syme->map, ip); | 209 | ip = map->map_ip(map, ip); |
209 | symbol__inc_addr_samples(sym, syme->map, counter, ip); | 210 | symbol__inc_addr_samples(sym, map, counter, ip); |
210 | 211 | ||
211 | pthread_mutex_unlock(¬es->lock); | 212 | pthread_mutex_unlock(¬es->lock); |
212 | } | 213 | } |
@@ -250,7 +251,7 @@ static void __list_insert_active_sym(struct sym_entry *syme) | |||
250 | list_add(&syme->node, &top.active_symbols); | 251 | list_add(&syme->node, &top.active_symbols); |
251 | } | 252 | } |
252 | 253 | ||
253 | static void print_sym_table(struct perf_session *session) | 254 | static void print_sym_table(void) |
254 | { | 255 | { |
255 | char bf[160]; | 256 | char bf[160]; |
256 | int printed = 0; | 257 | int printed = 0; |
@@ -270,10 +271,11 @@ static void print_sym_table(struct perf_session *session) | |||
270 | 271 | ||
271 | printf("%-*.*s\n", win_width, win_width, graph_dotted_line); | 272 | printf("%-*.*s\n", win_width, win_width, graph_dotted_line); |
272 | 273 | ||
273 | if (session->hists.stats.total_lost != 0) { | 274 | if (top.total_lost_warned != top.session->hists.stats.total_lost) { |
275 | top.total_lost_warned = top.session->hists.stats.total_lost; | ||
274 | color_fprintf(stdout, PERF_COLOR_RED, "WARNING:"); | 276 | color_fprintf(stdout, PERF_COLOR_RED, "WARNING:"); |
275 | printf(" LOST %" PRIu64 " events, Check IO/CPU overload\n", | 277 | printf(" LOST %" PRIu64 " events, Check IO/CPU overload\n", |
276 | session->hists.stats.total_lost); | 278 | top.total_lost_warned); |
277 | } | 279 | } |
278 | 280 | ||
279 | if (top.sym_filter_entry) { | 281 | if (top.sym_filter_entry) { |
@@ -474,7 +476,7 @@ static int key_mapped(int c) | |||
474 | return 0; | 476 | return 0; |
475 | } | 477 | } |
476 | 478 | ||
477 | static void handle_keypress(struct perf_session *session, int c) | 479 | static void handle_keypress(int c) |
478 | { | 480 | { |
479 | if (!key_mapped(c)) { | 481 | if (!key_mapped(c)) { |
480 | struct pollfd stdin_poll = { .fd = 0, .events = POLLIN }; | 482 | struct pollfd stdin_poll = { .fd = 0, .events = POLLIN }; |
@@ -550,7 +552,7 @@ static void handle_keypress(struct perf_session *session, int c) | |||
550 | case 'Q': | 552 | case 'Q': |
551 | printf("exiting.\n"); | 553 | printf("exiting.\n"); |
552 | if (dump_symtab) | 554 | if (dump_symtab) |
553 | perf_session__fprintf_dsos(session, stderr); | 555 | perf_session__fprintf_dsos(top.session, stderr); |
554 | exit(0); | 556 | exit(0); |
555 | case 's': | 557 | case 's': |
556 | prompt_symbol(&top.sym_filter_entry, "Enter details symbol"); | 558 | prompt_symbol(&top.sym_filter_entry, "Enter details symbol"); |
@@ -602,7 +604,6 @@ static void *display_thread(void *arg __used) | |||
602 | struct pollfd stdin_poll = { .fd = 0, .events = POLLIN }; | 604 | struct pollfd stdin_poll = { .fd = 0, .events = POLLIN }; |
603 | struct termios tc, save; | 605 | struct termios tc, save; |
604 | int delay_msecs, c; | 606 | int delay_msecs, c; |
605 | struct perf_session *session = (struct perf_session *) arg; | ||
606 | 607 | ||
607 | tcgetattr(0, &save); | 608 | tcgetattr(0, &save); |
608 | tc = save; | 609 | tc = save; |
@@ -617,13 +618,13 @@ repeat: | |||
617 | getc(stdin); | 618 | getc(stdin); |
618 | 619 | ||
619 | do { | 620 | do { |
620 | print_sym_table(session); | 621 | print_sym_table(); |
621 | } while (!poll(&stdin_poll, 1, delay_msecs) == 1); | 622 | } while (!poll(&stdin_poll, 1, delay_msecs) == 1); |
622 | 623 | ||
623 | c = getc(stdin); | 624 | c = getc(stdin); |
624 | tcsetattr(0, TCSAFLUSH, &save); | 625 | tcsetattr(0, TCSAFLUSH, &save); |
625 | 626 | ||
626 | handle_keypress(session, c); | 627 | handle_keypress(c); |
627 | goto repeat; | 628 | goto repeat; |
628 | 629 | ||
629 | return NULL; | 630 | return NULL; |
@@ -810,7 +811,7 @@ static void perf_event__process_sample(const union perf_event *event, | |||
810 | evsel = perf_evlist__id2evsel(top.evlist, sample->id); | 811 | evsel = perf_evlist__id2evsel(top.evlist, sample->id); |
811 | assert(evsel != NULL); | 812 | assert(evsel != NULL); |
812 | syme->count[evsel->idx]++; | 813 | syme->count[evsel->idx]++; |
813 | record_precise_ip(syme, evsel->idx, ip); | 814 | record_precise_ip(syme, al.map, evsel->idx, ip); |
814 | pthread_mutex_lock(&top.active_symbols_lock); | 815 | pthread_mutex_lock(&top.active_symbols_lock); |
815 | if (list_empty(&syme->node) || !syme->node.next) { | 816 | if (list_empty(&syme->node) || !syme->node.next) { |
816 | static bool first = true; | 817 | static bool first = true; |
@@ -935,27 +936,27 @@ static int __cmd_top(void) | |||
935 | * FIXME: perf_session__new should allow passing a O_MMAP, so that all this | 936 | * FIXME: perf_session__new should allow passing a O_MMAP, so that all this |
936 | * mmap reading, etc is encapsulated in it. Use O_WRONLY for now. | 937 | * mmap reading, etc is encapsulated in it. Use O_WRONLY for now. |
937 | */ | 938 | */ |
938 | struct perf_session *session = perf_session__new(NULL, O_WRONLY, false, false, NULL); | 939 | top.session = perf_session__new(NULL, O_WRONLY, false, false, NULL); |
939 | if (session == NULL) | 940 | if (top.session == NULL) |
940 | return -ENOMEM; | 941 | return -ENOMEM; |
941 | 942 | ||
942 | if (top.target_tid != -1) | 943 | if (top.target_tid != -1) |
943 | perf_event__synthesize_thread_map(top.evlist->threads, | 944 | perf_event__synthesize_thread_map(top.evlist->threads, |
944 | perf_event__process, session); | 945 | perf_event__process, top.session); |
945 | else | 946 | else |
946 | perf_event__synthesize_threads(perf_event__process, session); | 947 | perf_event__synthesize_threads(perf_event__process, top.session); |
947 | 948 | ||
948 | start_counters(top.evlist); | 949 | start_counters(top.evlist); |
949 | session->evlist = top.evlist; | 950 | top.session->evlist = top.evlist; |
950 | perf_session__update_sample_type(session); | 951 | perf_session__update_sample_type(top.session); |
951 | 952 | ||
952 | /* Wait for a minimal set of events before starting the snapshot */ | 953 | /* Wait for a minimal set of events before starting the snapshot */ |
953 | poll(top.evlist->pollfd, top.evlist->nr_fds, 100); | 954 | poll(top.evlist->pollfd, top.evlist->nr_fds, 100); |
954 | 955 | ||
955 | perf_session__mmap_read(session); | 956 | perf_session__mmap_read(top.session); |
956 | 957 | ||
957 | if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui : | 958 | if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui : |
958 | display_thread), session)) { | 959 | display_thread), NULL)) { |
959 | printf("Could not create display thread.\n"); | 960 | printf("Could not create display thread.\n"); |
960 | exit(-1); | 961 | exit(-1); |
961 | } | 962 | } |
@@ -973,7 +974,7 @@ static int __cmd_top(void) | |||
973 | while (1) { | 974 | while (1) { |
974 | u64 hits = top.samples; | 975 | u64 hits = top.samples; |
975 | 976 | ||
976 | perf_session__mmap_read(session); | 977 | perf_session__mmap_read(top.session); |
977 | 978 | ||
978 | if (hits == top.samples) | 979 | if (hits == top.samples) |
979 | ret = poll(top.evlist->pollfd, top.evlist->nr_fds, 100); | 980 | ret = poll(top.evlist->pollfd, top.evlist->nr_fds, 100); |
diff --git a/tools/perf/scripts/python/bin/net_dropmonitor-record b/tools/perf/scripts/python/bin/net_dropmonitor-record new file mode 100755 index 000000000000..423fb81dadae --- /dev/null +++ b/tools/perf/scripts/python/bin/net_dropmonitor-record | |||
@@ -0,0 +1,2 @@ | |||
1 | #!/bin/bash | ||
2 | perf record -e skb:kfree_skb $@ | ||
diff --git a/tools/perf/scripts/python/bin/net_dropmonitor-report b/tools/perf/scripts/python/bin/net_dropmonitor-report new file mode 100755 index 000000000000..8d698f5a06aa --- /dev/null +++ b/tools/perf/scripts/python/bin/net_dropmonitor-report | |||
@@ -0,0 +1,4 @@ | |||
1 | #!/bin/bash | ||
2 | # description: display a table of dropped frames | ||
3 | |||
4 | perf script -s "$PERF_EXEC_PATH"/scripts/python/net_dropmonitor.py $@ | ||
diff --git a/tools/perf/scripts/python/net_dropmonitor.py b/tools/perf/scripts/python/net_dropmonitor.py new file mode 100755 index 000000000000..a4ffc9500023 --- /dev/null +++ b/tools/perf/scripts/python/net_dropmonitor.py | |||
@@ -0,0 +1,72 @@ | |||
1 | # Monitor the system for dropped packets and proudce a report of drop locations and counts | ||
2 | |||
3 | import os | ||
4 | import sys | ||
5 | |||
6 | sys.path.append(os.environ['PERF_EXEC_PATH'] + \ | ||
7 | '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') | ||
8 | |||
9 | from perf_trace_context import * | ||
10 | from Core import * | ||
11 | from Util import * | ||
12 | |||
13 | drop_log = {} | ||
14 | kallsyms = [] | ||
15 | |||
16 | def get_kallsyms_table(): | ||
17 | global kallsyms | ||
18 | try: | ||
19 | f = open("/proc/kallsyms", "r") | ||
20 | linecount = 0 | ||
21 | for line in f: | ||
22 | linecount = linecount+1 | ||
23 | f.seek(0) | ||
24 | except: | ||
25 | return | ||
26 | |||
27 | |||
28 | j = 0 | ||
29 | for line in f: | ||
30 | loc = int(line.split()[0], 16) | ||
31 | name = line.split()[2] | ||
32 | j = j +1 | ||
33 | if ((j % 100) == 0): | ||
34 | print "\r" + str(j) + "/" + str(linecount), | ||
35 | kallsyms.append({ 'loc': loc, 'name' : name}) | ||
36 | |||
37 | print "\r" + str(j) + "/" + str(linecount) | ||
38 | kallsyms.sort() | ||
39 | return | ||
40 | |||
41 | def get_sym(sloc): | ||
42 | loc = int(sloc) | ||
43 | for i in kallsyms: | ||
44 | if (i['loc'] >= loc): | ||
45 | return (i['name'], i['loc']-loc) | ||
46 | return (None, 0) | ||
47 | |||
48 | def print_drop_table(): | ||
49 | print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT") | ||
50 | for i in drop_log.keys(): | ||
51 | (sym, off) = get_sym(i) | ||
52 | if sym == None: | ||
53 | sym = i | ||
54 | print "%25s %25s %25s" % (sym, off, drop_log[i]) | ||
55 | |||
56 | |||
57 | def trace_begin(): | ||
58 | print "Starting trace (Ctrl-C to dump results)" | ||
59 | |||
60 | def trace_end(): | ||
61 | print "Gathering kallsyms data" | ||
62 | get_kallsyms_table() | ||
63 | print_drop_table() | ||
64 | |||
65 | # called from perf, when it finds a correspoinding event | ||
66 | def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, | ||
67 | skbaddr, protocol, location): | ||
68 | slocation = str(location) | ||
69 | try: | ||
70 | drop_log[slocation] = drop_log[slocation] + 1 | ||
71 | except: | ||
72 | drop_log[slocation] = 1 | ||
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 01d36ba54053..bc8f4773d4d8 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c | |||
@@ -16,6 +16,8 @@ | |||
16 | #include "annotate.h" | 16 | #include "annotate.h" |
17 | #include <pthread.h> | 17 | #include <pthread.h> |
18 | 18 | ||
19 | const char *disassembler_style; | ||
20 | |||
19 | int symbol__annotate_init(struct map *map __used, struct symbol *sym) | 21 | int symbol__annotate_init(struct map *map __used, struct symbol *sym) |
20 | { | 22 | { |
21 | struct annotation *notes = symbol__annotation(sym); | 23 | struct annotation *notes = symbol__annotation(sym); |
@@ -323,9 +325,11 @@ fallback: | |||
323 | dso, dso->long_name, sym, sym->name); | 325 | dso, dso->long_name, sym, sym->name); |
324 | 326 | ||
325 | snprintf(command, sizeof(command), | 327 | snprintf(command, sizeof(command), |
326 | "objdump --start-address=0x%016" PRIx64 | 328 | "objdump %s%s --start-address=0x%016" PRIx64 |
327 | " --stop-address=0x%016" PRIx64 | 329 | " --stop-address=0x%016" PRIx64 |
328 | " -d %s %s -C %s|grep -v %s|expand", | 330 | " -d %s %s -C %s|grep -v %s|expand", |
331 | disassembler_style ? "-M " : "", | ||
332 | disassembler_style ? disassembler_style : "", | ||
329 | map__rip_2objdump(map, sym->start), | 333 | map__rip_2objdump(map, sym->start), |
330 | map__rip_2objdump(map, sym->end), | 334 | map__rip_2objdump(map, sym->end), |
331 | symbol_conf.annotate_asm_raw ? "" : "--no-show-raw", | 335 | symbol_conf.annotate_asm_raw ? "" : "--no-show-raw", |
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index c2c286896801..6ede1286ee71 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h | |||
@@ -100,4 +100,6 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map, int evidx, | |||
100 | int refresh); | 100 | int refresh); |
101 | #endif | 101 | #endif |
102 | 102 | ||
103 | extern const char *disassembler_style; | ||
104 | |||
103 | #endif /* __PERF_ANNOTATE_H */ | 105 | #endif /* __PERF_ANNOTATE_H */ |
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 3c1b8a632101..437f8ca679a0 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c | |||
@@ -169,12 +169,17 @@ static int perf_event__synthesize_mmap_events(union perf_event *event, | |||
169 | continue; | 169 | continue; |
170 | pbf += n + 3; | 170 | pbf += n + 3; |
171 | if (*pbf == 'x') { /* vm_exec */ | 171 | if (*pbf == 'x') { /* vm_exec */ |
172 | char anonstr[] = "//anon\n"; | ||
172 | char *execname = strchr(bf, '/'); | 173 | char *execname = strchr(bf, '/'); |
173 | 174 | ||
174 | /* Catch VDSO */ | 175 | /* Catch VDSO */ |
175 | if (execname == NULL) | 176 | if (execname == NULL) |
176 | execname = strstr(bf, "[vdso]"); | 177 | execname = strstr(bf, "[vdso]"); |
177 | 178 | ||
179 | /* Catch anonymous mmaps */ | ||
180 | if ((execname == NULL) && !strstr(bf, "[")) | ||
181 | execname = anonstr; | ||
182 | |||
178 | if (execname == NULL) | 183 | if (execname == NULL) |
179 | continue; | 184 | continue; |
180 | 185 | ||
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index 1d7f66488a88..357a85b85248 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h | |||
@@ -186,6 +186,6 @@ const char *perf_event__name(unsigned int id); | |||
186 | 186 | ||
187 | int perf_event__parse_sample(const union perf_event *event, u64 type, | 187 | int perf_event__parse_sample(const union perf_event *event, u64 type, |
188 | int sample_size, bool sample_id_all, | 188 | int sample_size, bool sample_id_all, |
189 | struct perf_sample *sample); | 189 | struct perf_sample *sample, bool swapped); |
190 | 190 | ||
191 | #endif /* __PERF_RECORD_H */ | 191 | #endif /* __PERF_RECORD_H */ |
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index c12bd476c6f7..72e9f4886b6d 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c | |||
@@ -113,6 +113,19 @@ void perf_evlist__disable(struct perf_evlist *evlist) | |||
113 | } | 113 | } |
114 | } | 114 | } |
115 | 115 | ||
116 | void perf_evlist__enable(struct perf_evlist *evlist) | ||
117 | { | ||
118 | int cpu, thread; | ||
119 | struct perf_evsel *pos; | ||
120 | |||
121 | for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { | ||
122 | list_for_each_entry(pos, &evlist->entries, node) { | ||
123 | for (thread = 0; thread < evlist->threads->nr; thread++) | ||
124 | ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_ENABLE); | ||
125 | } | ||
126 | } | ||
127 | } | ||
128 | |||
116 | int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) | 129 | int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) |
117 | { | 130 | { |
118 | int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries; | 131 | int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries; |
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index ce85ae9ae57a..f34915002745 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h | |||
@@ -54,6 +54,7 @@ int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite); | |||
54 | void perf_evlist__munmap(struct perf_evlist *evlist); | 54 | void perf_evlist__munmap(struct perf_evlist *evlist); |
55 | 55 | ||
56 | void perf_evlist__disable(struct perf_evlist *evlist); | 56 | void perf_evlist__disable(struct perf_evlist *evlist); |
57 | void perf_evlist__enable(struct perf_evlist *evlist); | ||
57 | 58 | ||
58 | static inline void perf_evlist__set_maps(struct perf_evlist *evlist, | 59 | static inline void perf_evlist__set_maps(struct perf_evlist *evlist, |
59 | struct cpu_map *cpus, | 60 | struct cpu_map *cpus, |
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index a03a36b7908a..e389815078d3 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c | |||
@@ -7,6 +7,8 @@ | |||
7 | * Released under the GPL v2. (and only v2, not any later version) | 7 | * Released under the GPL v2. (and only v2, not any later version) |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <byteswap.h> | ||
11 | #include "asm/bug.h" | ||
10 | #include "evsel.h" | 12 | #include "evsel.h" |
11 | #include "evlist.h" | 13 | #include "evlist.h" |
12 | #include "util.h" | 14 | #include "util.h" |
@@ -342,10 +344,20 @@ static bool sample_overlap(const union perf_event *event, | |||
342 | 344 | ||
343 | int perf_event__parse_sample(const union perf_event *event, u64 type, | 345 | int perf_event__parse_sample(const union perf_event *event, u64 type, |
344 | int sample_size, bool sample_id_all, | 346 | int sample_size, bool sample_id_all, |
345 | struct perf_sample *data) | 347 | struct perf_sample *data, bool swapped) |
346 | { | 348 | { |
347 | const u64 *array; | 349 | const u64 *array; |
348 | 350 | ||
351 | /* | ||
352 | * used for cross-endian analysis. See git commit 65014ab3 | ||
353 | * for why this goofiness is needed. | ||
354 | */ | ||
355 | union { | ||
356 | u64 val64; | ||
357 | u32 val32[2]; | ||
358 | } u; | ||
359 | |||
360 | |||
349 | data->cpu = data->pid = data->tid = -1; | 361 | data->cpu = data->pid = data->tid = -1; |
350 | data->stream_id = data->id = data->time = -1ULL; | 362 | data->stream_id = data->id = data->time = -1ULL; |
351 | 363 | ||
@@ -366,9 +378,16 @@ int perf_event__parse_sample(const union perf_event *event, u64 type, | |||
366 | } | 378 | } |
367 | 379 | ||
368 | if (type & PERF_SAMPLE_TID) { | 380 | if (type & PERF_SAMPLE_TID) { |
369 | u32 *p = (u32 *)array; | 381 | u.val64 = *array; |
370 | data->pid = p[0]; | 382 | if (swapped) { |
371 | data->tid = p[1]; | 383 | /* undo swap of u64, then swap on individual u32s */ |
384 | u.val64 = bswap_64(u.val64); | ||
385 | u.val32[0] = bswap_32(u.val32[0]); | ||
386 | u.val32[1] = bswap_32(u.val32[1]); | ||
387 | } | ||
388 | |||
389 | data->pid = u.val32[0]; | ||
390 | data->tid = u.val32[1]; | ||
372 | array++; | 391 | array++; |
373 | } | 392 | } |
374 | 393 | ||
@@ -395,8 +414,15 @@ int perf_event__parse_sample(const union perf_event *event, u64 type, | |||
395 | } | 414 | } |
396 | 415 | ||
397 | if (type & PERF_SAMPLE_CPU) { | 416 | if (type & PERF_SAMPLE_CPU) { |
398 | u32 *p = (u32 *)array; | 417 | |
399 | data->cpu = *p; | 418 | u.val64 = *array; |
419 | if (swapped) { | ||
420 | /* undo swap of u64, then swap on individual u32s */ | ||
421 | u.val64 = bswap_64(u.val64); | ||
422 | u.val32[0] = bswap_32(u.val32[0]); | ||
423 | } | ||
424 | |||
425 | data->cpu = u.val32[0]; | ||
400 | array++; | 426 | array++; |
401 | } | 427 | } |
402 | 428 | ||
@@ -423,18 +449,27 @@ int perf_event__parse_sample(const union perf_event *event, u64 type, | |||
423 | } | 449 | } |
424 | 450 | ||
425 | if (type & PERF_SAMPLE_RAW) { | 451 | if (type & PERF_SAMPLE_RAW) { |
426 | u32 *p = (u32 *)array; | 452 | const u64 *pdata; |
453 | |||
454 | u.val64 = *array; | ||
455 | if (WARN_ONCE(swapped, | ||
456 | "Endianness of raw data not corrected!\n")) { | ||
457 | /* undo swap of u64, then swap on individual u32s */ | ||
458 | u.val64 = bswap_64(u.val64); | ||
459 | u.val32[0] = bswap_32(u.val32[0]); | ||
460 | u.val32[1] = bswap_32(u.val32[1]); | ||
461 | } | ||
427 | 462 | ||
428 | if (sample_overlap(event, array, sizeof(u32))) | 463 | if (sample_overlap(event, array, sizeof(u32))) |
429 | return -EFAULT; | 464 | return -EFAULT; |
430 | 465 | ||
431 | data->raw_size = *p; | 466 | data->raw_size = u.val32[0]; |
432 | p++; | 467 | pdata = (void *) array + sizeof(u32); |
433 | 468 | ||
434 | if (sample_overlap(event, p, data->raw_size)) | 469 | if (sample_overlap(event, pdata, data->raw_size)) |
435 | return -EFAULT; | 470 | return -EFAULT; |
436 | 471 | ||
437 | data->raw_data = p; | 472 | data->raw_data = (void *) pdata; |
438 | } | 473 | } |
439 | 474 | ||
440 | return 0; | 475 | return 0; |
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index a16ecab5229d..9cf0d4393c8d 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c | |||
@@ -18,6 +18,13 @@ static inline int is_anon_memory(const char *filename) | |||
18 | return strcmp(filename, "//anon") == 0; | 18 | return strcmp(filename, "//anon") == 0; |
19 | } | 19 | } |
20 | 20 | ||
21 | static inline int is_no_dso_memory(const char *filename) | ||
22 | { | ||
23 | return !strcmp(filename, "[stack]") || | ||
24 | !strcmp(filename, "[vdso]") || | ||
25 | !strcmp(filename, "[heap]"); | ||
26 | } | ||
27 | |||
21 | void map__init(struct map *self, enum map_type type, | 28 | void map__init(struct map *self, enum map_type type, |
22 | u64 start, u64 end, u64 pgoff, struct dso *dso) | 29 | u64 start, u64 end, u64 pgoff, struct dso *dso) |
23 | { | 30 | { |
@@ -42,9 +49,10 @@ struct map *map__new(struct list_head *dsos__list, u64 start, u64 len, | |||
42 | if (self != NULL) { | 49 | if (self != NULL) { |
43 | char newfilename[PATH_MAX]; | 50 | char newfilename[PATH_MAX]; |
44 | struct dso *dso; | 51 | struct dso *dso; |
45 | int anon; | 52 | int anon, no_dso; |
46 | 53 | ||
47 | anon = is_anon_memory(filename); | 54 | anon = is_anon_memory(filename); |
55 | no_dso = is_no_dso_memory(filename); | ||
48 | 56 | ||
49 | if (anon) { | 57 | if (anon) { |
50 | snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid); | 58 | snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid); |
@@ -57,12 +65,16 @@ struct map *map__new(struct list_head *dsos__list, u64 start, u64 len, | |||
57 | 65 | ||
58 | map__init(self, type, start, start + len, pgoff, dso); | 66 | map__init(self, type, start, start + len, pgoff, dso); |
59 | 67 | ||
60 | if (anon) { | 68 | if (anon || no_dso) { |
61 | set_identity: | ||
62 | self->map_ip = self->unmap_ip = identity__map_ip; | 69 | self->map_ip = self->unmap_ip = identity__map_ip; |
63 | } else if (strcmp(filename, "[vdso]") == 0) { | 70 | |
64 | dso__set_loaded(dso, self->type); | 71 | /* |
65 | goto set_identity; | 72 | * Set memory without DSO as loaded. All map__find_* |
73 | * functions still return NULL, and we avoid the | ||
74 | * unnecessary map__load warning. | ||
75 | */ | ||
76 | if (no_dso) | ||
77 | dso__set_loaded(dso, self->type); | ||
66 | } | 78 | } |
67 | } | 79 | } |
68 | return self; | 80 | return self; |
@@ -220,55 +232,55 @@ u64 map__objdump_2ip(struct map *map, u64 addr) | |||
220 | return ip; | 232 | return ip; |
221 | } | 233 | } |
222 | 234 | ||
223 | void map_groups__init(struct map_groups *self) | 235 | void map_groups__init(struct map_groups *mg) |
224 | { | 236 | { |
225 | int i; | 237 | int i; |
226 | for (i = 0; i < MAP__NR_TYPES; ++i) { | 238 | for (i = 0; i < MAP__NR_TYPES; ++i) { |
227 | self->maps[i] = RB_ROOT; | 239 | mg->maps[i] = RB_ROOT; |
228 | INIT_LIST_HEAD(&self->removed_maps[i]); | 240 | INIT_LIST_HEAD(&mg->removed_maps[i]); |
229 | } | 241 | } |
230 | self->machine = NULL; | 242 | mg->machine = NULL; |
231 | } | 243 | } |
232 | 244 | ||
233 | static void maps__delete(struct rb_root *self) | 245 | static void maps__delete(struct rb_root *maps) |
234 | { | 246 | { |
235 | struct rb_node *next = rb_first(self); | 247 | struct rb_node *next = rb_first(maps); |
236 | 248 | ||
237 | while (next) { | 249 | while (next) { |
238 | struct map *pos = rb_entry(next, struct map, rb_node); | 250 | struct map *pos = rb_entry(next, struct map, rb_node); |
239 | 251 | ||
240 | next = rb_next(&pos->rb_node); | 252 | next = rb_next(&pos->rb_node); |
241 | rb_erase(&pos->rb_node, self); | 253 | rb_erase(&pos->rb_node, maps); |
242 | map__delete(pos); | 254 | map__delete(pos); |
243 | } | 255 | } |
244 | } | 256 | } |
245 | 257 | ||
246 | static void maps__delete_removed(struct list_head *self) | 258 | static void maps__delete_removed(struct list_head *maps) |
247 | { | 259 | { |
248 | struct map *pos, *n; | 260 | struct map *pos, *n; |
249 | 261 | ||
250 | list_for_each_entry_safe(pos, n, self, node) { | 262 | list_for_each_entry_safe(pos, n, maps, node) { |
251 | list_del(&pos->node); | 263 | list_del(&pos->node); |
252 | map__delete(pos); | 264 | map__delete(pos); |
253 | } | 265 | } |
254 | } | 266 | } |
255 | 267 | ||
256 | void map_groups__exit(struct map_groups *self) | 268 | void map_groups__exit(struct map_groups *mg) |
257 | { | 269 | { |
258 | int i; | 270 | int i; |
259 | 271 | ||
260 | for (i = 0; i < MAP__NR_TYPES; ++i) { | 272 | for (i = 0; i < MAP__NR_TYPES; ++i) { |
261 | maps__delete(&self->maps[i]); | 273 | maps__delete(&mg->maps[i]); |
262 | maps__delete_removed(&self->removed_maps[i]); | 274 | maps__delete_removed(&mg->removed_maps[i]); |
263 | } | 275 | } |
264 | } | 276 | } |
265 | 277 | ||
266 | void map_groups__flush(struct map_groups *self) | 278 | void map_groups__flush(struct map_groups *mg) |
267 | { | 279 | { |
268 | int type; | 280 | int type; |
269 | 281 | ||
270 | for (type = 0; type < MAP__NR_TYPES; type++) { | 282 | for (type = 0; type < MAP__NR_TYPES; type++) { |
271 | struct rb_root *root = &self->maps[type]; | 283 | struct rb_root *root = &mg->maps[type]; |
272 | struct rb_node *next = rb_first(root); | 284 | struct rb_node *next = rb_first(root); |
273 | 285 | ||
274 | while (next) { | 286 | while (next) { |
@@ -280,17 +292,17 @@ void map_groups__flush(struct map_groups *self) | |||
280 | * instance in some hist_entry instances, so | 292 | * instance in some hist_entry instances, so |
281 | * just move them to a separate list. | 293 | * just move them to a separate list. |
282 | */ | 294 | */ |
283 | list_add_tail(&pos->node, &self->removed_maps[pos->type]); | 295 | list_add_tail(&pos->node, &mg->removed_maps[pos->type]); |
284 | } | 296 | } |
285 | } | 297 | } |
286 | } | 298 | } |
287 | 299 | ||
288 | struct symbol *map_groups__find_symbol(struct map_groups *self, | 300 | struct symbol *map_groups__find_symbol(struct map_groups *mg, |
289 | enum map_type type, u64 addr, | 301 | enum map_type type, u64 addr, |
290 | struct map **mapp, | 302 | struct map **mapp, |
291 | symbol_filter_t filter) | 303 | symbol_filter_t filter) |
292 | { | 304 | { |
293 | struct map *map = map_groups__find(self, type, addr); | 305 | struct map *map = map_groups__find(mg, type, addr); |
294 | 306 | ||
295 | if (map != NULL) { | 307 | if (map != NULL) { |
296 | if (mapp != NULL) | 308 | if (mapp != NULL) |
@@ -301,7 +313,7 @@ struct symbol *map_groups__find_symbol(struct map_groups *self, | |||
301 | return NULL; | 313 | return NULL; |
302 | } | 314 | } |
303 | 315 | ||
304 | struct symbol *map_groups__find_symbol_by_name(struct map_groups *self, | 316 | struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, |
305 | enum map_type type, | 317 | enum map_type type, |
306 | const char *name, | 318 | const char *name, |
307 | struct map **mapp, | 319 | struct map **mapp, |
@@ -309,7 +321,7 @@ struct symbol *map_groups__find_symbol_by_name(struct map_groups *self, | |||
309 | { | 321 | { |
310 | struct rb_node *nd; | 322 | struct rb_node *nd; |
311 | 323 | ||
312 | for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) { | 324 | for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) { |
313 | struct map *pos = rb_entry(nd, struct map, rb_node); | 325 | struct map *pos = rb_entry(nd, struct map, rb_node); |
314 | struct symbol *sym = map__find_symbol_by_name(pos, name, filter); | 326 | struct symbol *sym = map__find_symbol_by_name(pos, name, filter); |
315 | 327 | ||
@@ -323,13 +335,13 @@ struct symbol *map_groups__find_symbol_by_name(struct map_groups *self, | |||
323 | return NULL; | 335 | return NULL; |
324 | } | 336 | } |
325 | 337 | ||
326 | size_t __map_groups__fprintf_maps(struct map_groups *self, | 338 | size_t __map_groups__fprintf_maps(struct map_groups *mg, |
327 | enum map_type type, int verbose, FILE *fp) | 339 | enum map_type type, int verbose, FILE *fp) |
328 | { | 340 | { |
329 | size_t printed = fprintf(fp, "%s:\n", map_type__name[type]); | 341 | size_t printed = fprintf(fp, "%s:\n", map_type__name[type]); |
330 | struct rb_node *nd; | 342 | struct rb_node *nd; |
331 | 343 | ||
332 | for (nd = rb_first(&self->maps[type]); nd; nd = rb_next(nd)) { | 344 | for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) { |
333 | struct map *pos = rb_entry(nd, struct map, rb_node); | 345 | struct map *pos = rb_entry(nd, struct map, rb_node); |
334 | printed += fprintf(fp, "Map:"); | 346 | printed += fprintf(fp, "Map:"); |
335 | printed += map__fprintf(pos, fp); | 347 | printed += map__fprintf(pos, fp); |
@@ -342,22 +354,22 @@ size_t __map_groups__fprintf_maps(struct map_groups *self, | |||
342 | return printed; | 354 | return printed; |
343 | } | 355 | } |
344 | 356 | ||
345 | size_t map_groups__fprintf_maps(struct map_groups *self, int verbose, FILE *fp) | 357 | size_t map_groups__fprintf_maps(struct map_groups *mg, int verbose, FILE *fp) |
346 | { | 358 | { |
347 | size_t printed = 0, i; | 359 | size_t printed = 0, i; |
348 | for (i = 0; i < MAP__NR_TYPES; ++i) | 360 | for (i = 0; i < MAP__NR_TYPES; ++i) |
349 | printed += __map_groups__fprintf_maps(self, i, verbose, fp); | 361 | printed += __map_groups__fprintf_maps(mg, i, verbose, fp); |
350 | return printed; | 362 | return printed; |
351 | } | 363 | } |
352 | 364 | ||
353 | static size_t __map_groups__fprintf_removed_maps(struct map_groups *self, | 365 | static size_t __map_groups__fprintf_removed_maps(struct map_groups *mg, |
354 | enum map_type type, | 366 | enum map_type type, |
355 | int verbose, FILE *fp) | 367 | int verbose, FILE *fp) |
356 | { | 368 | { |
357 | struct map *pos; | 369 | struct map *pos; |
358 | size_t printed = 0; | 370 | size_t printed = 0; |
359 | 371 | ||
360 | list_for_each_entry(pos, &self->removed_maps[type], node) { | 372 | list_for_each_entry(pos, &mg->removed_maps[type], node) { |
361 | printed += fprintf(fp, "Map:"); | 373 | printed += fprintf(fp, "Map:"); |
362 | printed += map__fprintf(pos, fp); | 374 | printed += map__fprintf(pos, fp); |
363 | if (verbose > 1) { | 375 | if (verbose > 1) { |
@@ -368,26 +380,26 @@ static size_t __map_groups__fprintf_removed_maps(struct map_groups *self, | |||
368 | return printed; | 380 | return printed; |
369 | } | 381 | } |
370 | 382 | ||
371 | static size_t map_groups__fprintf_removed_maps(struct map_groups *self, | 383 | static size_t map_groups__fprintf_removed_maps(struct map_groups *mg, |
372 | int verbose, FILE *fp) | 384 | int verbose, FILE *fp) |
373 | { | 385 | { |
374 | size_t printed = 0, i; | 386 | size_t printed = 0, i; |
375 | for (i = 0; i < MAP__NR_TYPES; ++i) | 387 | for (i = 0; i < MAP__NR_TYPES; ++i) |
376 | printed += __map_groups__fprintf_removed_maps(self, i, verbose, fp); | 388 | printed += __map_groups__fprintf_removed_maps(mg, i, verbose, fp); |
377 | return printed; | 389 | return printed; |
378 | } | 390 | } |
379 | 391 | ||
380 | size_t map_groups__fprintf(struct map_groups *self, int verbose, FILE *fp) | 392 | size_t map_groups__fprintf(struct map_groups *mg, int verbose, FILE *fp) |
381 | { | 393 | { |
382 | size_t printed = map_groups__fprintf_maps(self, verbose, fp); | 394 | size_t printed = map_groups__fprintf_maps(mg, verbose, fp); |
383 | printed += fprintf(fp, "Removed maps:\n"); | 395 | printed += fprintf(fp, "Removed maps:\n"); |
384 | return printed + map_groups__fprintf_removed_maps(self, verbose, fp); | 396 | return printed + map_groups__fprintf_removed_maps(mg, verbose, fp); |
385 | } | 397 | } |
386 | 398 | ||
387 | int map_groups__fixup_overlappings(struct map_groups *self, struct map *map, | 399 | int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, |
388 | int verbose, FILE *fp) | 400 | int verbose, FILE *fp) |
389 | { | 401 | { |
390 | struct rb_root *root = &self->maps[map->type]; | 402 | struct rb_root *root = &mg->maps[map->type]; |
391 | struct rb_node *next = rb_first(root); | 403 | struct rb_node *next = rb_first(root); |
392 | int err = 0; | 404 | int err = 0; |
393 | 405 | ||
@@ -418,7 +430,7 @@ int map_groups__fixup_overlappings(struct map_groups *self, struct map *map, | |||
418 | } | 430 | } |
419 | 431 | ||
420 | before->end = map->start - 1; | 432 | before->end = map->start - 1; |
421 | map_groups__insert(self, before); | 433 | map_groups__insert(mg, before); |
422 | if (verbose >= 2) | 434 | if (verbose >= 2) |
423 | map__fprintf(before, fp); | 435 | map__fprintf(before, fp); |
424 | } | 436 | } |
@@ -432,7 +444,7 @@ int map_groups__fixup_overlappings(struct map_groups *self, struct map *map, | |||
432 | } | 444 | } |
433 | 445 | ||
434 | after->start = map->end + 1; | 446 | after->start = map->end + 1; |
435 | map_groups__insert(self, after); | 447 | map_groups__insert(mg, after); |
436 | if (verbose >= 2) | 448 | if (verbose >= 2) |
437 | map__fprintf(after, fp); | 449 | map__fprintf(after, fp); |
438 | } | 450 | } |
@@ -441,7 +453,7 @@ move_map: | |||
441 | * If we have references, just move them to a separate list. | 453 | * If we have references, just move them to a separate list. |
442 | */ | 454 | */ |
443 | if (pos->referenced) | 455 | if (pos->referenced) |
444 | list_add_tail(&pos->node, &self->removed_maps[map->type]); | 456 | list_add_tail(&pos->node, &mg->removed_maps[map->type]); |
445 | else | 457 | else |
446 | map__delete(pos); | 458 | map__delete(pos); |
447 | 459 | ||
@@ -455,7 +467,7 @@ move_map: | |||
455 | /* | 467 | /* |
456 | * XXX This should not really _copy_ te maps, but refcount them. | 468 | * XXX This should not really _copy_ te maps, but refcount them. |
457 | */ | 469 | */ |
458 | int map_groups__clone(struct map_groups *self, | 470 | int map_groups__clone(struct map_groups *mg, |
459 | struct map_groups *parent, enum map_type type) | 471 | struct map_groups *parent, enum map_type type) |
460 | { | 472 | { |
461 | struct rb_node *nd; | 473 | struct rb_node *nd; |
@@ -464,7 +476,7 @@ int map_groups__clone(struct map_groups *self, | |||
464 | struct map *new = map__clone(map); | 476 | struct map *new = map__clone(map); |
465 | if (new == NULL) | 477 | if (new == NULL) |
466 | return -ENOMEM; | 478 | return -ENOMEM; |
467 | map_groups__insert(self, new); | 479 | map_groups__insert(mg, new); |
468 | } | 480 | } |
469 | return 0; | 481 | return 0; |
470 | } | 482 | } |
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h index b397c0383728..890d85545d0f 100644 --- a/tools/perf/util/map.h +++ b/tools/perf/util/map.h | |||
@@ -123,17 +123,17 @@ void map__fixup_end(struct map *self); | |||
123 | 123 | ||
124 | void map__reloc_vmlinux(struct map *self); | 124 | void map__reloc_vmlinux(struct map *self); |
125 | 125 | ||
126 | size_t __map_groups__fprintf_maps(struct map_groups *self, | 126 | size_t __map_groups__fprintf_maps(struct map_groups *mg, |
127 | enum map_type type, int verbose, FILE *fp); | 127 | enum map_type type, int verbose, FILE *fp); |
128 | void maps__insert(struct rb_root *maps, struct map *map); | 128 | void maps__insert(struct rb_root *maps, struct map *map); |
129 | void maps__remove(struct rb_root *self, struct map *map); | 129 | void maps__remove(struct rb_root *maps, struct map *map); |
130 | struct map *maps__find(struct rb_root *maps, u64 addr); | 130 | struct map *maps__find(struct rb_root *maps, u64 addr); |
131 | void map_groups__init(struct map_groups *self); | 131 | void map_groups__init(struct map_groups *mg); |
132 | void map_groups__exit(struct map_groups *self); | 132 | void map_groups__exit(struct map_groups *mg); |
133 | int map_groups__clone(struct map_groups *self, | 133 | int map_groups__clone(struct map_groups *mg, |
134 | struct map_groups *parent, enum map_type type); | 134 | struct map_groups *parent, enum map_type type); |
135 | size_t map_groups__fprintf(struct map_groups *self, int verbose, FILE *fp); | 135 | size_t map_groups__fprintf(struct map_groups *mg, int verbose, FILE *fp); |
136 | size_t map_groups__fprintf_maps(struct map_groups *self, int verbose, FILE *fp); | 136 | size_t map_groups__fprintf_maps(struct map_groups *mg, int verbose, FILE *fp); |
137 | 137 | ||
138 | typedef void (*machine__process_t)(struct machine *self, void *data); | 138 | typedef void (*machine__process_t)(struct machine *self, void *data); |
139 | 139 | ||
@@ -162,29 +162,29 @@ static inline bool machine__is_host(struct machine *self) | |||
162 | return self ? self->pid == HOST_KERNEL_ID : false; | 162 | return self ? self->pid == HOST_KERNEL_ID : false; |
163 | } | 163 | } |
164 | 164 | ||
165 | static inline void map_groups__insert(struct map_groups *self, struct map *map) | 165 | static inline void map_groups__insert(struct map_groups *mg, struct map *map) |
166 | { | 166 | { |
167 | maps__insert(&self->maps[map->type], map); | 167 | maps__insert(&mg->maps[map->type], map); |
168 | map->groups = self; | 168 | map->groups = mg; |
169 | } | 169 | } |
170 | 170 | ||
171 | static inline void map_groups__remove(struct map_groups *self, struct map *map) | 171 | static inline void map_groups__remove(struct map_groups *mg, struct map *map) |
172 | { | 172 | { |
173 | maps__remove(&self->maps[map->type], map); | 173 | maps__remove(&mg->maps[map->type], map); |
174 | } | 174 | } |
175 | 175 | ||
176 | static inline struct map *map_groups__find(struct map_groups *self, | 176 | static inline struct map *map_groups__find(struct map_groups *mg, |
177 | enum map_type type, u64 addr) | 177 | enum map_type type, u64 addr) |
178 | { | 178 | { |
179 | return maps__find(&self->maps[type], addr); | 179 | return maps__find(&mg->maps[type], addr); |
180 | } | 180 | } |
181 | 181 | ||
182 | struct symbol *map_groups__find_symbol(struct map_groups *self, | 182 | struct symbol *map_groups__find_symbol(struct map_groups *mg, |
183 | enum map_type type, u64 addr, | 183 | enum map_type type, u64 addr, |
184 | struct map **mapp, | 184 | struct map **mapp, |
185 | symbol_filter_t filter); | 185 | symbol_filter_t filter); |
186 | 186 | ||
187 | struct symbol *map_groups__find_symbol_by_name(struct map_groups *self, | 187 | struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, |
188 | enum map_type type, | 188 | enum map_type type, |
189 | const char *name, | 189 | const char *name, |
190 | struct map **mapp, | 190 | struct map **mapp, |
@@ -208,11 +208,11 @@ struct symbol *machine__find_kernel_function(struct machine *self, u64 addr, | |||
208 | } | 208 | } |
209 | 209 | ||
210 | static inline | 210 | static inline |
211 | struct symbol *map_groups__find_function_by_name(struct map_groups *self, | 211 | struct symbol *map_groups__find_function_by_name(struct map_groups *mg, |
212 | const char *name, struct map **mapp, | 212 | const char *name, struct map **mapp, |
213 | symbol_filter_t filter) | 213 | symbol_filter_t filter) |
214 | { | 214 | { |
215 | return map_groups__find_symbol_by_name(self, MAP__FUNCTION, name, mapp, filter); | 215 | return map_groups__find_symbol_by_name(mg, MAP__FUNCTION, name, mapp, filter); |
216 | } | 216 | } |
217 | 217 | ||
218 | static inline | 218 | static inline |
@@ -225,13 +225,13 @@ struct symbol *machine__find_kernel_function_by_name(struct machine *self, | |||
225 | filter); | 225 | filter); |
226 | } | 226 | } |
227 | 227 | ||
228 | int map_groups__fixup_overlappings(struct map_groups *self, struct map *map, | 228 | int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, |
229 | int verbose, FILE *fp); | 229 | int verbose, FILE *fp); |
230 | 230 | ||
231 | struct map *map_groups__find_by_name(struct map_groups *self, | 231 | struct map *map_groups__find_by_name(struct map_groups *mg, |
232 | enum map_type type, const char *name); | 232 | enum map_type type, const char *name); |
233 | struct map *machine__new_module(struct machine *self, u64 start, const char *filename); | 233 | struct map *machine__new_module(struct machine *self, u64 start, const char *filename); |
234 | 234 | ||
235 | void map_groups__flush(struct map_groups *self); | 235 | void map_groups__flush(struct map_groups *mg); |
236 | 236 | ||
237 | #endif /* __PERF_MAP_H */ | 237 | #endif /* __PERF_MAP_H */ |
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index 555fc3864b90..5d732621a462 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c | |||
@@ -659,7 +659,7 @@ static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf) | |||
659 | if (!die_find_variable_at(&pf->cu_die, pf->pvar->var, 0, &vr_die)) | 659 | if (!die_find_variable_at(&pf->cu_die, pf->pvar->var, 0, &vr_die)) |
660 | ret = -ENOENT; | 660 | ret = -ENOENT; |
661 | } | 661 | } |
662 | if (ret == 0) | 662 | if (ret >= 0) |
663 | ret = convert_variable(&vr_die, pf); | 663 | ret = convert_variable(&vr_die, pf); |
664 | 664 | ||
665 | if (ret < 0) | 665 | if (ret < 0) |
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index cbc8f215d4b7..7624324efad4 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c | |||
@@ -803,7 +803,7 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist, | |||
803 | first = list_entry(evlist->entries.next, struct perf_evsel, node); | 803 | first = list_entry(evlist->entries.next, struct perf_evsel, node); |
804 | err = perf_event__parse_sample(event, first->attr.sample_type, | 804 | err = perf_event__parse_sample(event, first->attr.sample_type, |
805 | perf_evsel__sample_size(first), | 805 | perf_evsel__sample_size(first), |
806 | sample_id_all, &pevent->sample); | 806 | sample_id_all, &pevent->sample, false); |
807 | if (err) | 807 | if (err) |
808 | return PyErr_Format(PyExc_OSError, | 808 | return PyErr_Format(PyExc_OSError, |
809 | "perf: can't parse sample, err=%d", err); | 809 | "perf: can't parse sample, err=%d", err); |
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h index 170601e67d6b..974d0cbee5e9 100644 --- a/tools/perf/util/session.h +++ b/tools/perf/util/session.h | |||
@@ -162,7 +162,8 @@ static inline int perf_session__parse_sample(struct perf_session *session, | |||
162 | { | 162 | { |
163 | return perf_event__parse_sample(event, session->sample_type, | 163 | return perf_event__parse_sample(event, session->sample_type, |
164 | session->sample_size, | 164 | session->sample_size, |
165 | session->sample_id_all, sample); | 165 | session->sample_id_all, sample, |
166 | session->header.needs_swap); | ||
166 | } | 167 | } |
167 | 168 | ||
168 | struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, | 169 | struct perf_evsel *perf_session__find_first_evtype(struct perf_session *session, |
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 401e220566fd..1ee8f1e40f18 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c | |||
@@ -151,11 +151,17 @@ sort__sym_cmp(struct hist_entry *left, struct hist_entry *right) | |||
151 | { | 151 | { |
152 | u64 ip_l, ip_r; | 152 | u64 ip_l, ip_r; |
153 | 153 | ||
154 | if (!left->ms.sym && !right->ms.sym) | ||
155 | return right->level - left->level; | ||
156 | |||
157 | if (!left->ms.sym || !right->ms.sym) | ||
158 | return cmp_null(left->ms.sym, right->ms.sym); | ||
159 | |||
154 | if (left->ms.sym == right->ms.sym) | 160 | if (left->ms.sym == right->ms.sym) |
155 | return 0; | 161 | return 0; |
156 | 162 | ||
157 | ip_l = left->ms.sym ? left->ms.sym->start : left->ip; | 163 | ip_l = left->ms.sym->start; |
158 | ip_r = right->ms.sym ? right->ms.sym->start : right->ip; | 164 | ip_r = right->ms.sym->start; |
159 | 165 | ||
160 | return (int64_t)(ip_r - ip_l); | 166 | return (int64_t)(ip_r - ip_l); |
161 | } | 167 | } |
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 245e60d6b4e7..077df15ee705 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c | |||
@@ -76,16 +76,104 @@ static void dso__set_sorted_by_name(struct dso *dso, enum map_type type) | |||
76 | 76 | ||
77 | bool symbol_type__is_a(char symbol_type, enum map_type map_type) | 77 | bool symbol_type__is_a(char symbol_type, enum map_type map_type) |
78 | { | 78 | { |
79 | symbol_type = toupper(symbol_type); | ||
80 | |||
79 | switch (map_type) { | 81 | switch (map_type) { |
80 | case MAP__FUNCTION: | 82 | case MAP__FUNCTION: |
81 | return symbol_type == 'T' || symbol_type == 'W'; | 83 | return symbol_type == 'T' || symbol_type == 'W'; |
82 | case MAP__VARIABLE: | 84 | case MAP__VARIABLE: |
83 | return symbol_type == 'D' || symbol_type == 'd'; | 85 | return symbol_type == 'D'; |
84 | default: | 86 | default: |
85 | return false; | 87 | return false; |
86 | } | 88 | } |
87 | } | 89 | } |
88 | 90 | ||
91 | static int prefix_underscores_count(const char *str) | ||
92 | { | ||
93 | const char *tail = str; | ||
94 | |||
95 | while (*tail == '_') | ||
96 | tail++; | ||
97 | |||
98 | return tail - str; | ||
99 | } | ||
100 | |||
101 | #define SYMBOL_A 0 | ||
102 | #define SYMBOL_B 1 | ||
103 | |||
104 | static int choose_best_symbol(struct symbol *syma, struct symbol *symb) | ||
105 | { | ||
106 | s64 a; | ||
107 | s64 b; | ||
108 | |||
109 | /* Prefer a symbol with non zero length */ | ||
110 | a = syma->end - syma->start; | ||
111 | b = symb->end - symb->start; | ||
112 | if ((b == 0) && (a > 0)) | ||
113 | return SYMBOL_A; | ||
114 | else if ((a == 0) && (b > 0)) | ||
115 | return SYMBOL_B; | ||
116 | |||
117 | /* Prefer a non weak symbol over a weak one */ | ||
118 | a = syma->binding == STB_WEAK; | ||
119 | b = symb->binding == STB_WEAK; | ||
120 | if (b && !a) | ||
121 | return SYMBOL_A; | ||
122 | if (a && !b) | ||
123 | return SYMBOL_B; | ||
124 | |||
125 | /* Prefer a global symbol over a non global one */ | ||
126 | a = syma->binding == STB_GLOBAL; | ||
127 | b = symb->binding == STB_GLOBAL; | ||
128 | if (a && !b) | ||
129 | return SYMBOL_A; | ||
130 | if (b && !a) | ||
131 | return SYMBOL_B; | ||
132 | |||
133 | /* Prefer a symbol with less underscores */ | ||
134 | a = prefix_underscores_count(syma->name); | ||
135 | b = prefix_underscores_count(symb->name); | ||
136 | if (b > a) | ||
137 | return SYMBOL_A; | ||
138 | else if (a > b) | ||
139 | return SYMBOL_B; | ||
140 | |||
141 | /* If all else fails, choose the symbol with the longest name */ | ||
142 | if (strlen(syma->name) >= strlen(symb->name)) | ||
143 | return SYMBOL_A; | ||
144 | else | ||
145 | return SYMBOL_B; | ||
146 | } | ||
147 | |||
148 | static void symbols__fixup_duplicate(struct rb_root *symbols) | ||
149 | { | ||
150 | struct rb_node *nd; | ||
151 | struct symbol *curr, *next; | ||
152 | |||
153 | nd = rb_first(symbols); | ||
154 | |||
155 | while (nd) { | ||
156 | curr = rb_entry(nd, struct symbol, rb_node); | ||
157 | again: | ||
158 | nd = rb_next(&curr->rb_node); | ||
159 | next = rb_entry(nd, struct symbol, rb_node); | ||
160 | |||
161 | if (!nd) | ||
162 | break; | ||
163 | |||
164 | if (curr->start != next->start) | ||
165 | continue; | ||
166 | |||
167 | if (choose_best_symbol(curr, next) == SYMBOL_A) { | ||
168 | rb_erase(&next->rb_node, symbols); | ||
169 | goto again; | ||
170 | } else { | ||
171 | nd = rb_next(&curr->rb_node); | ||
172 | rb_erase(&curr->rb_node, symbols); | ||
173 | } | ||
174 | } | ||
175 | } | ||
176 | |||
89 | static void symbols__fixup_end(struct rb_root *symbols) | 177 | static void symbols__fixup_end(struct rb_root *symbols) |
90 | { | 178 | { |
91 | struct rb_node *nd, *prevnd = rb_first(symbols); | 179 | struct rb_node *nd, *prevnd = rb_first(symbols); |
@@ -440,18 +528,11 @@ int kallsyms__parse(const char *filename, void *arg, | |||
440 | char *line = NULL; | 528 | char *line = NULL; |
441 | size_t n; | 529 | size_t n; |
442 | int err = -1; | 530 | int err = -1; |
443 | u64 prev_start = 0; | ||
444 | char prev_symbol_type = 0; | ||
445 | char *prev_symbol_name; | ||
446 | FILE *file = fopen(filename, "r"); | 531 | FILE *file = fopen(filename, "r"); |
447 | 532 | ||
448 | if (file == NULL) | 533 | if (file == NULL) |
449 | goto out_failure; | 534 | goto out_failure; |
450 | 535 | ||
451 | prev_symbol_name = malloc(KSYM_NAME_LEN); | ||
452 | if (prev_symbol_name == NULL) | ||
453 | goto out_close; | ||
454 | |||
455 | err = 0; | 536 | err = 0; |
456 | 537 | ||
457 | while (!feof(file)) { | 538 | while (!feof(file)) { |
@@ -472,7 +553,7 @@ int kallsyms__parse(const char *filename, void *arg, | |||
472 | if (len + 2 >= line_len) | 553 | if (len + 2 >= line_len) |
473 | continue; | 554 | continue; |
474 | 555 | ||
475 | symbol_type = toupper(line[len]); | 556 | symbol_type = line[len]; |
476 | len += 2; | 557 | len += 2; |
477 | symbol_name = line + len; | 558 | symbol_name = line + len; |
478 | len = line_len - len; | 559 | len = line_len - len; |
@@ -482,24 +563,18 @@ int kallsyms__parse(const char *filename, void *arg, | |||
482 | break; | 563 | break; |
483 | } | 564 | } |
484 | 565 | ||
485 | if (prev_symbol_type) { | 566 | /* |
486 | u64 end = start; | 567 | * module symbols are not sorted so we add all |
487 | if (end != prev_start) | 568 | * symbols with zero length and rely on |
488 | --end; | 569 | * symbols__fixup_end() to fix it up. |
489 | err = process_symbol(arg, prev_symbol_name, | 570 | */ |
490 | prev_symbol_type, prev_start, end); | 571 | err = process_symbol(arg, symbol_name, |
491 | if (err) | 572 | symbol_type, start, start); |
492 | break; | 573 | if (err) |
493 | } | 574 | break; |
494 | |||
495 | memcpy(prev_symbol_name, symbol_name, len + 1); | ||
496 | prev_symbol_type = symbol_type; | ||
497 | prev_start = start; | ||
498 | } | 575 | } |
499 | 576 | ||
500 | free(prev_symbol_name); | ||
501 | free(line); | 577 | free(line); |
502 | out_close: | ||
503 | fclose(file); | 578 | fclose(file); |
504 | return err; | 579 | return err; |
505 | 580 | ||
@@ -705,6 +780,9 @@ int dso__load_kallsyms(struct dso *dso, const char *filename, | |||
705 | if (dso__load_all_kallsyms(dso, filename, map) < 0) | 780 | if (dso__load_all_kallsyms(dso, filename, map) < 0) |
706 | return -1; | 781 | return -1; |
707 | 782 | ||
783 | symbols__fixup_duplicate(&dso->symbols[map->type]); | ||
784 | symbols__fixup_end(&dso->symbols[map->type]); | ||
785 | |||
708 | if (dso->kernel == DSO_TYPE_GUEST_KERNEL) | 786 | if (dso->kernel == DSO_TYPE_GUEST_KERNEL) |
709 | dso->symtab_type = SYMTAB__GUEST_KALLSYMS; | 787 | dso->symtab_type = SYMTAB__GUEST_KALLSYMS; |
710 | else | 788 | else |
@@ -1094,8 +1172,7 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name, | |||
1094 | if (dso->has_build_id) { | 1172 | if (dso->has_build_id) { |
1095 | u8 build_id[BUILD_ID_SIZE]; | 1173 | u8 build_id[BUILD_ID_SIZE]; |
1096 | 1174 | ||
1097 | if (elf_read_build_id(elf, build_id, | 1175 | if (elf_read_build_id(elf, build_id, BUILD_ID_SIZE) < 0) |
1098 | BUILD_ID_SIZE) != BUILD_ID_SIZE) | ||
1099 | goto out_elf_end; | 1176 | goto out_elf_end; |
1100 | 1177 | ||
1101 | if (!dso__build_id_equal(dso, build_id)) | 1178 | if (!dso__build_id_equal(dso, build_id)) |
@@ -1113,6 +1190,8 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name, | |||
1113 | } | 1190 | } |
1114 | 1191 | ||
1115 | opdsec = elf_section_by_name(elf, &ehdr, &opdshdr, ".opd", &opdidx); | 1192 | opdsec = elf_section_by_name(elf, &ehdr, &opdshdr, ".opd", &opdidx); |
1193 | if (opdshdr.sh_type != SHT_PROGBITS) | ||
1194 | opdsec = NULL; | ||
1116 | if (opdsec) | 1195 | if (opdsec) |
1117 | opddata = elf_rawdata(opdsec, NULL); | 1196 | opddata = elf_rawdata(opdsec, NULL); |
1118 | 1197 | ||
@@ -1278,6 +1357,7 @@ new_symbol: | |||
1278 | * For misannotated, zeroed, ASM function sizes. | 1357 | * For misannotated, zeroed, ASM function sizes. |
1279 | */ | 1358 | */ |
1280 | if (nr > 0) { | 1359 | if (nr > 0) { |
1360 | symbols__fixup_duplicate(&dso->symbols[map->type]); | ||
1281 | symbols__fixup_end(&dso->symbols[map->type]); | 1361 | symbols__fixup_end(&dso->symbols[map->type]); |
1282 | if (kmap) { | 1362 | if (kmap) { |
1283 | /* | 1363 | /* |
@@ -1364,8 +1444,8 @@ static int elf_read_build_id(Elf *elf, void *bf, size_t size) | |||
1364 | ptr = data->d_buf; | 1444 | ptr = data->d_buf; |
1365 | while (ptr < (data->d_buf + data->d_size)) { | 1445 | while (ptr < (data->d_buf + data->d_size)) { |
1366 | GElf_Nhdr *nhdr = ptr; | 1446 | GElf_Nhdr *nhdr = ptr; |
1367 | int namesz = NOTE_ALIGN(nhdr->n_namesz), | 1447 | size_t namesz = NOTE_ALIGN(nhdr->n_namesz), |
1368 | descsz = NOTE_ALIGN(nhdr->n_descsz); | 1448 | descsz = NOTE_ALIGN(nhdr->n_descsz); |
1369 | const char *name; | 1449 | const char *name; |
1370 | 1450 | ||
1371 | ptr += sizeof(*nhdr); | 1451 | ptr += sizeof(*nhdr); |
@@ -1374,8 +1454,10 @@ static int elf_read_build_id(Elf *elf, void *bf, size_t size) | |||
1374 | if (nhdr->n_type == NT_GNU_BUILD_ID && | 1454 | if (nhdr->n_type == NT_GNU_BUILD_ID && |
1375 | nhdr->n_namesz == sizeof("GNU")) { | 1455 | nhdr->n_namesz == sizeof("GNU")) { |
1376 | if (memcmp(name, "GNU", sizeof("GNU")) == 0) { | 1456 | if (memcmp(name, "GNU", sizeof("GNU")) == 0) { |
1377 | memcpy(bf, ptr, BUILD_ID_SIZE); | 1457 | size_t sz = min(size, descsz); |
1378 | err = BUILD_ID_SIZE; | 1458 | memcpy(bf, ptr, sz); |
1459 | memset(bf + sz, 0, size - sz); | ||
1460 | err = descsz; | ||
1379 | break; | 1461 | break; |
1380 | } | 1462 | } |
1381 | } | 1463 | } |
@@ -1427,7 +1509,7 @@ int sysfs__read_build_id(const char *filename, void *build_id, size_t size) | |||
1427 | while (1) { | 1509 | while (1) { |
1428 | char bf[BUFSIZ]; | 1510 | char bf[BUFSIZ]; |
1429 | GElf_Nhdr nhdr; | 1511 | GElf_Nhdr nhdr; |
1430 | int namesz, descsz; | 1512 | size_t namesz, descsz; |
1431 | 1513 | ||
1432 | if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr)) | 1514 | if (read(fd, &nhdr, sizeof(nhdr)) != sizeof(nhdr)) |
1433 | break; | 1515 | break; |
@@ -1436,15 +1518,16 @@ int sysfs__read_build_id(const char *filename, void *build_id, size_t size) | |||
1436 | descsz = NOTE_ALIGN(nhdr.n_descsz); | 1518 | descsz = NOTE_ALIGN(nhdr.n_descsz); |
1437 | if (nhdr.n_type == NT_GNU_BUILD_ID && | 1519 | if (nhdr.n_type == NT_GNU_BUILD_ID && |
1438 | nhdr.n_namesz == sizeof("GNU")) { | 1520 | nhdr.n_namesz == sizeof("GNU")) { |
1439 | if (read(fd, bf, namesz) != namesz) | 1521 | if (read(fd, bf, namesz) != (ssize_t)namesz) |
1440 | break; | 1522 | break; |
1441 | if (memcmp(bf, "GNU", sizeof("GNU")) == 0) { | 1523 | if (memcmp(bf, "GNU", sizeof("GNU")) == 0) { |
1442 | if (read(fd, build_id, | 1524 | size_t sz = min(descsz, size); |
1443 | BUILD_ID_SIZE) == BUILD_ID_SIZE) { | 1525 | if (read(fd, build_id, sz) == (ssize_t)sz) { |
1526 | memset(build_id + sz, 0, size - sz); | ||
1444 | err = 0; | 1527 | err = 0; |
1445 | break; | 1528 | break; |
1446 | } | 1529 | } |
1447 | } else if (read(fd, bf, descsz) != descsz) | 1530 | } else if (read(fd, bf, descsz) != (ssize_t)descsz) |
1448 | break; | 1531 | break; |
1449 | } else { | 1532 | } else { |
1450 | int n = namesz + descsz; | 1533 | int n = namesz + descsz; |
diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h index bfbf95bcc603..b07b0410463c 100644 --- a/tools/perf/util/top.h +++ b/tools/perf/util/top.h | |||
@@ -10,6 +10,7 @@ | |||
10 | 10 | ||
11 | struct perf_evlist; | 11 | struct perf_evlist; |
12 | struct perf_evsel; | 12 | struct perf_evsel; |
13 | struct perf_session; | ||
13 | 14 | ||
14 | struct sym_entry { | 15 | struct sym_entry { |
15 | struct rb_node rb_node; | 16 | struct rb_node rb_node; |
@@ -38,6 +39,7 @@ struct perf_top { | |||
38 | u64 kernel_samples, us_samples; | 39 | u64 kernel_samples, us_samples; |
39 | u64 exact_samples; | 40 | u64 exact_samples; |
40 | u64 guest_us_samples, guest_kernel_samples; | 41 | u64 guest_us_samples, guest_kernel_samples; |
42 | u64 total_lost_warned; | ||
41 | int print_entries, count_filter, delay_secs; | 43 | int print_entries, count_filter, delay_secs; |
42 | int display_weighted, freq, rb_entries; | 44 | int display_weighted, freq, rb_entries; |
43 | pid_t target_pid, target_tid; | 45 | pid_t target_pid, target_tid; |
@@ -45,6 +47,7 @@ struct perf_top { | |||
45 | const char *cpu_list; | 47 | const char *cpu_list; |
46 | struct sym_entry *sym_filter_entry; | 48 | struct sym_entry *sym_filter_entry; |
47 | struct perf_evsel *sym_evsel; | 49 | struct perf_evsel *sym_evsel; |
50 | struct perf_session *session; | ||
48 | }; | 51 | }; |
49 | 52 | ||
50 | size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size); | 53 | size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size); |
diff --git a/tools/perf/util/ui/browsers/top.c b/tools/perf/util/ui/browsers/top.c index 88403cf8396a..9b6b43b32ac8 100644 --- a/tools/perf/util/ui/browsers/top.c +++ b/tools/perf/util/ui/browsers/top.c | |||
@@ -11,10 +11,12 @@ | |||
11 | #include "../helpline.h" | 11 | #include "../helpline.h" |
12 | #include "../libslang.h" | 12 | #include "../libslang.h" |
13 | #include "../util.h" | 13 | #include "../util.h" |
14 | #include "../ui.h" | ||
14 | #include "../../evlist.h" | 15 | #include "../../evlist.h" |
15 | #include "../../hist.h" | 16 | #include "../../hist.h" |
16 | #include "../../sort.h" | 17 | #include "../../sort.h" |
17 | #include "../../symbol.h" | 18 | #include "../../symbol.h" |
19 | #include "../../session.h" | ||
18 | #include "../../top.h" | 20 | #include "../../top.h" |
19 | 21 | ||
20 | struct perf_top_browser { | 22 | struct perf_top_browser { |
@@ -43,10 +45,10 @@ static void perf_top_browser__write(struct ui_browser *browser, void *entry, int | |||
43 | 45 | ||
44 | if (top->evlist->nr_entries == 1 || !top->display_weighted) { | 46 | if (top->evlist->nr_entries == 1 || !top->display_weighted) { |
45 | slsmg_printf("%20.2f ", syme->weight); | 47 | slsmg_printf("%20.2f ", syme->weight); |
46 | width -= 24; | 48 | width -= 21; |
47 | } else { | 49 | } else { |
48 | slsmg_printf("%9.1f %10ld ", syme->weight, syme->snap_count); | 50 | slsmg_printf("%9.1f %10ld ", syme->weight, syme->snap_count); |
49 | width -= 23; | 51 | width -= 20; |
50 | } | 52 | } |
51 | 53 | ||
52 | slsmg_printf("%4.1f%%", pcnt); | 54 | slsmg_printf("%4.1f%%", pcnt); |
@@ -143,6 +145,25 @@ do_annotation: | |||
143 | symbol__tui_annotate(sym, syme->map, 0, top->delay_secs * 1000); | 145 | symbol__tui_annotate(sym, syme->map, 0, top->delay_secs * 1000); |
144 | } | 146 | } |
145 | 147 | ||
148 | static void perf_top_browser__warn_lost(struct perf_top_browser *browser) | ||
149 | { | ||
150 | struct perf_top *top = browser->b.priv; | ||
151 | char msg[128]; | ||
152 | int len; | ||
153 | |||
154 | top->total_lost_warned = top->session->hists.stats.total_lost; | ||
155 | pthread_mutex_lock(&ui__lock); | ||
156 | ui_browser__set_color(&browser->b, HE_COLORSET_TOP); | ||
157 | len = snprintf(msg, sizeof(msg), | ||
158 | " WARNING: LOST %" PRIu64 " events, Check IO/CPU overload", | ||
159 | top->total_lost_warned); | ||
160 | if (len > browser->b.width) | ||
161 | len = browser->b.width; | ||
162 | SLsmg_gotorc(0, browser->b.width - len); | ||
163 | slsmg_write_nstring(msg, len); | ||
164 | pthread_mutex_unlock(&ui__lock); | ||
165 | } | ||
166 | |||
146 | static int perf_top_browser__run(struct perf_top_browser *browser) | 167 | static int perf_top_browser__run(struct perf_top_browser *browser) |
147 | { | 168 | { |
148 | int key; | 169 | int key; |
@@ -174,6 +195,9 @@ static int perf_top_browser__run(struct perf_top_browser *browser) | |||
174 | ui_browser__set_color(&browser->b, NEWT_COLORSET_ROOT); | 195 | ui_browser__set_color(&browser->b, NEWT_COLORSET_ROOT); |
175 | SLsmg_gotorc(0, 0); | 196 | SLsmg_gotorc(0, 0); |
176 | slsmg_write_nstring(title, browser->b.width); | 197 | slsmg_write_nstring(title, browser->b.width); |
198 | |||
199 | if (top->total_lost_warned != top->session->hists.stats.total_lost) | ||
200 | perf_top_browser__warn_lost(browser); | ||
177 | break; | 201 | break; |
178 | case 'a': | 202 | case 'a': |
179 | case NEWT_KEY_RIGHT: | 203 | case NEWT_KEY_RIGHT: |