diff options
115 files changed, 1040 insertions, 495 deletions
diff --git a/Documentation/hwmon/jc42 b/Documentation/hwmon/jc42 index 0e76ef12e4c6..a22ecf48f255 100644 --- a/Documentation/hwmon/jc42 +++ b/Documentation/hwmon/jc42 | |||
@@ -51,7 +51,8 @@ Supported chips: | |||
51 | * JEDEC JC 42.4 compliant temperature sensor chips | 51 | * JEDEC JC 42.4 compliant temperature sensor chips |
52 | Prefix: 'jc42' | 52 | Prefix: 'jc42' |
53 | Addresses scanned: I2C 0x18 - 0x1f | 53 | Addresses scanned: I2C 0x18 - 0x1f |
54 | Datasheet: - | 54 | Datasheet: |
55 | http://www.jedec.org/sites/default/files/docs/4_01_04R19.pdf | ||
55 | 56 | ||
56 | Author: | 57 | Author: |
57 | Guenter Roeck <guenter.roeck@ericsson.com> | 58 | Guenter Roeck <guenter.roeck@ericsson.com> |
@@ -60,7 +61,11 @@ Author: | |||
60 | Description | 61 | Description |
61 | ----------- | 62 | ----------- |
62 | 63 | ||
63 | This driver implements support for JEDEC JC 42.4 compliant temperature sensors. | 64 | This driver implements support for JEDEC JC 42.4 compliant temperature sensors, |
65 | which are used on many DDR3 memory modules for mobile devices and servers. Some | ||
66 | systems use the sensor to prevent memory overheating by automatically throttling | ||
67 | the memory controller. | ||
68 | |||
64 | The driver auto-detects the chips listed above, but can be manually instantiated | 69 | The driver auto-detects the chips listed above, but can be manually instantiated |
65 | to support other JC 42.4 compliant chips. | 70 | to support other JC 42.4 compliant chips. |
66 | 71 | ||
@@ -81,15 +86,19 @@ limits. The chip supports only a single register to configure the hysteresis, | |||
81 | which applies to all limits. This register can be written by writing into | 86 | which applies to all limits. This register can be written by writing into |
82 | temp1_crit_hyst. Other hysteresis attributes are read-only. | 87 | temp1_crit_hyst. Other hysteresis attributes are read-only. |
83 | 88 | ||
89 | If the BIOS has configured the sensor for automatic temperature management, it | ||
90 | is likely that it has locked the registers, i.e., that the temperature limits | ||
91 | cannot be changed. | ||
92 | |||
84 | Sysfs entries | 93 | Sysfs entries |
85 | ------------- | 94 | ------------- |
86 | 95 | ||
87 | temp1_input Temperature (RO) | 96 | temp1_input Temperature (RO) |
88 | temp1_min Minimum temperature (RW) | 97 | temp1_min Minimum temperature (RO or RW) |
89 | temp1_max Maximum temperature (RW) | 98 | temp1_max Maximum temperature (RO or RW) |
90 | temp1_crit Critical high temperature (RW) | 99 | temp1_crit Critical high temperature (RO or RW) |
91 | 100 | ||
92 | temp1_crit_hyst Critical hysteresis temperature (RW) | 101 | temp1_crit_hyst Critical hysteresis temperature (RO or RW) |
93 | temp1_max_hyst Maximum hysteresis temperature (RO) | 102 | temp1_max_hyst Maximum hysteresis temperature (RO) |
94 | 103 | ||
95 | temp1_min_alarm Temperature low alarm | 104 | temp1_min_alarm Temperature low alarm |
diff --git a/Documentation/hwmon/k10temp b/Documentation/hwmon/k10temp index 6526eee525a6..d2b56a4fd1f5 100644 --- a/Documentation/hwmon/k10temp +++ b/Documentation/hwmon/k10temp | |||
@@ -9,6 +9,8 @@ Supported chips: | |||
9 | Socket S1G3: Athlon II, Sempron, Turion II | 9 | Socket S1G3: Athlon II, Sempron, Turion II |
10 | * AMD Family 11h processors: | 10 | * AMD Family 11h processors: |
11 | Socket S1G2: Athlon (X2), Sempron (X2), Turion X2 (Ultra) | 11 | Socket S1G2: Athlon (X2), Sempron (X2), Turion X2 (Ultra) |
12 | * AMD Family 12h processors: "Llano" | ||
13 | * AMD Family 14h processors: "Brazos" (C/E/G-Series) | ||
12 | 14 | ||
13 | Prefix: 'k10temp' | 15 | Prefix: 'k10temp' |
14 | Addresses scanned: PCI space | 16 | Addresses scanned: PCI space |
@@ -17,10 +19,14 @@ Supported chips: | |||
17 | http://support.amd.com/us/Processor_TechDocs/31116.pdf | 19 | http://support.amd.com/us/Processor_TechDocs/31116.pdf |
18 | BIOS and Kernel Developer's Guide (BKDG) for AMD Family 11h Processors: | 20 | BIOS and Kernel Developer's Guide (BKDG) for AMD Family 11h Processors: |
19 | http://support.amd.com/us/Processor_TechDocs/41256.pdf | 21 | http://support.amd.com/us/Processor_TechDocs/41256.pdf |
22 | BIOS and Kernel Developer's Guide (BKDG) for AMD Family 14h Models 00h-0Fh Processors: | ||
23 | http://support.amd.com/us/Processor_TechDocs/43170.pdf | ||
20 | Revision Guide for AMD Family 10h Processors: | 24 | Revision Guide for AMD Family 10h Processors: |
21 | http://support.amd.com/us/Processor_TechDocs/41322.pdf | 25 | http://support.amd.com/us/Processor_TechDocs/41322.pdf |
22 | Revision Guide for AMD Family 11h Processors: | 26 | Revision Guide for AMD Family 11h Processors: |
23 | http://support.amd.com/us/Processor_TechDocs/41788.pdf | 27 | http://support.amd.com/us/Processor_TechDocs/41788.pdf |
28 | Revision Guide for AMD Family 14h Models 00h-0Fh Processors: | ||
29 | http://support.amd.com/us/Processor_TechDocs/47534.pdf | ||
24 | AMD Family 11h Processor Power and Thermal Data Sheet for Notebooks: | 30 | AMD Family 11h Processor Power and Thermal Data Sheet for Notebooks: |
25 | http://support.amd.com/us/Processor_TechDocs/43373.pdf | 31 | http://support.amd.com/us/Processor_TechDocs/43373.pdf |
26 | AMD Family 10h Server and Workstation Processor Power and Thermal Data Sheet: | 32 | AMD Family 10h Server and Workstation Processor Power and Thermal Data Sheet: |
@@ -34,7 +40,7 @@ Description | |||
34 | ----------- | 40 | ----------- |
35 | 41 | ||
36 | This driver permits reading of the internal temperature sensor of AMD | 42 | This driver permits reading of the internal temperature sensor of AMD |
37 | Family 10h and 11h processors. | 43 | Family 10h/11h/12h/14h processors. |
38 | 44 | ||
39 | All these processors have a sensor, but on those for Socket F or AM2+, | 45 | All these processors have a sensor, but on those for Socket F or AM2+, |
40 | the sensor may return inconsistent values (erratum 319). The driver | 46 | the sensor may return inconsistent values (erratum 319). The driver |
diff --git a/Documentation/networking/Makefile b/Documentation/networking/Makefile index 5aba7a33aeeb..24c308dd3fd1 100644 --- a/Documentation/networking/Makefile +++ b/Documentation/networking/Makefile | |||
@@ -4,6 +4,8 @@ obj- := dummy.o | |||
4 | # List of programs to build | 4 | # List of programs to build |
5 | hostprogs-y := ifenslave | 5 | hostprogs-y := ifenslave |
6 | 6 | ||
7 | HOSTCFLAGS_ifenslave.o += -I$(objtree)/usr/include | ||
8 | |||
7 | # Tell kbuild to always build the programs | 9 | # Tell kbuild to always build the programs |
8 | always := $(hostprogs-y) | 10 | always := $(hostprogs-y) |
9 | 11 | ||
diff --git a/Documentation/workqueue.txt b/Documentation/workqueue.txt index 996a27d9b8db..01c513fac40e 100644 --- a/Documentation/workqueue.txt +++ b/Documentation/workqueue.txt | |||
@@ -190,9 +190,9 @@ resources, scheduled and executed. | |||
190 | * Long running CPU intensive workloads which can be better | 190 | * Long running CPU intensive workloads which can be better |
191 | managed by the system scheduler. | 191 | managed by the system scheduler. |
192 | 192 | ||
193 | WQ_FREEZEABLE | 193 | WQ_FREEZABLE |
194 | 194 | ||
195 | A freezeable wq participates in the freeze phase of the system | 195 | A freezable wq participates in the freeze phase of the system |
196 | suspend operations. Work items on the wq are drained and no | 196 | suspend operations. Work items on the wq are drained and no |
197 | new work item starts execution until thawed. | 197 | new work item starts execution until thawed. |
198 | 198 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 5dd6c751e6a6..6f99e1260db8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -885,7 +885,7 @@ S: Supported | |||
885 | 885 | ||
886 | ARM/QUALCOMM MSM MACHINE SUPPORT | 886 | ARM/QUALCOMM MSM MACHINE SUPPORT |
887 | M: David Brown <davidb@codeaurora.org> | 887 | M: David Brown <davidb@codeaurora.org> |
888 | M: Daniel Walker <dwalker@codeaurora.org> | 888 | M: Daniel Walker <dwalker@fifo99.com> |
889 | M: Bryan Huntsman <bryanh@codeaurora.org> | 889 | M: Bryan Huntsman <bryanh@codeaurora.org> |
890 | L: linux-arm-msm@vger.kernel.org | 890 | L: linux-arm-msm@vger.kernel.org |
891 | F: arch/arm/mach-msm/ | 891 | F: arch/arm/mach-msm/ |
@@ -2873,7 +2873,6 @@ M: Guenter Roeck <guenter.roeck@ericsson.com> | |||
2873 | L: lm-sensors@lm-sensors.org | 2873 | L: lm-sensors@lm-sensors.org |
2874 | W: http://www.lm-sensors.org/ | 2874 | W: http://www.lm-sensors.org/ |
2875 | T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/ | 2875 | T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/ |
2876 | T: quilt kernel.org/pub/linux/kernel/people/groeck/linux-staging/ | ||
2877 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git | 2876 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git |
2878 | S: Maintained | 2877 | S: Maintained |
2879 | F: Documentation/hwmon/ | 2878 | F: Documentation/hwmon/ |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 26d45e5b636b..166efa2a19cd 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -1177,6 +1177,31 @@ config ARM_ERRATA_743622 | |||
1177 | visible impact on the overall performance or power consumption of the | 1177 | visible impact on the overall performance or power consumption of the |
1178 | processor. | 1178 | processor. |
1179 | 1179 | ||
1180 | config ARM_ERRATA_751472 | ||
1181 | bool "ARM errata: Interrupted ICIALLUIS may prevent completion of broadcasted operation" | ||
1182 | depends on CPU_V7 && SMP | ||
1183 | help | ||
1184 | This option enables the workaround for the 751472 Cortex-A9 (prior | ||
1185 | to r3p0) erratum. An interrupted ICIALLUIS operation may prevent the | ||
1186 | completion of a following broadcasted operation if the second | ||
1187 | operation is received by a CPU before the ICIALLUIS has completed, | ||
1188 | potentially leading to corrupted entries in the cache or TLB. | ||
1189 | |||
1190 | config ARM_ERRATA_753970 | ||
1191 | bool "ARM errata: cache sync operation may be faulty" | ||
1192 | depends on CACHE_PL310 | ||
1193 | help | ||
1194 | This option enables the workaround for the 753970 PL310 (r3p0) erratum. | ||
1195 | |||
1196 | Under some condition the effect of cache sync operation on | ||
1197 | the store buffer still remains when the operation completes. | ||
1198 | This means that the store buffer is always asked to drain and | ||
1199 | this prevents it from merging any further writes. The workaround | ||
1200 | is to replace the normal offset of cache sync operation (0x730) | ||
1201 | by another offset targeting an unmapped PL310 register 0x740. | ||
1202 | This has the same effect as the cache sync operation: store buffer | ||
1203 | drain and waiting for all buffers empty. | ||
1204 | |||
1180 | endmenu | 1205 | endmenu |
1181 | 1206 | ||
1182 | source "arch/arm/common/Kconfig" | 1207 | source "arch/arm/common/Kconfig" |
diff --git a/arch/arm/Makefile b/arch/arm/Makefile index c22c1adfedd6..6f7b29294c80 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile | |||
@@ -15,7 +15,7 @@ ifeq ($(CONFIG_CPU_ENDIAN_BE8),y) | |||
15 | LDFLAGS_vmlinux += --be8 | 15 | LDFLAGS_vmlinux += --be8 |
16 | endif | 16 | endif |
17 | 17 | ||
18 | OBJCOPYFLAGS :=-O binary -R .note -R .note.gnu.build-id -R .comment -S | 18 | OBJCOPYFLAGS :=-O binary -R .comment -S |
19 | GZFLAGS :=-9 | 19 | GZFLAGS :=-9 |
20 | #KBUILD_CFLAGS +=-pipe | 20 | #KBUILD_CFLAGS +=-pipe |
21 | # Explicitly specifiy 32-bit ARM ISA since toolchain default can be -mthumb: | 21 | # Explicitly specifiy 32-bit ARM ISA since toolchain default can be -mthumb: |
diff --git a/arch/arm/boot/compressed/.gitignore b/arch/arm/boot/compressed/.gitignore index ab204db594d3..c6028967d336 100644 --- a/arch/arm/boot/compressed/.gitignore +++ b/arch/arm/boot/compressed/.gitignore | |||
@@ -1,3 +1,7 @@ | |||
1 | font.c | 1 | font.c |
2 | piggy.gz | 2 | lib1funcs.S |
3 | piggy.gzip | ||
4 | piggy.lzo | ||
5 | piggy.lzma | ||
6 | vmlinux | ||
3 | vmlinux.lds | 7 | vmlinux.lds |
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h index 5aeec1e1735c..16bd48031583 100644 --- a/arch/arm/include/asm/hardware/cache-l2x0.h +++ b/arch/arm/include/asm/hardware/cache-l2x0.h | |||
@@ -36,6 +36,7 @@ | |||
36 | #define L2X0_RAW_INTR_STAT 0x21C | 36 | #define L2X0_RAW_INTR_STAT 0x21C |
37 | #define L2X0_INTR_CLEAR 0x220 | 37 | #define L2X0_INTR_CLEAR 0x220 |
38 | #define L2X0_CACHE_SYNC 0x730 | 38 | #define L2X0_CACHE_SYNC 0x730 |
39 | #define L2X0_DUMMY_REG 0x740 | ||
39 | #define L2X0_INV_LINE_PA 0x770 | 40 | #define L2X0_INV_LINE_PA 0x770 |
40 | #define L2X0_INV_WAY 0x77C | 41 | #define L2X0_INV_WAY 0x77C |
41 | #define L2X0_CLEAN_LINE_PA 0x7B0 | 42 | #define L2X0_CLEAN_LINE_PA 0x7B0 |
diff --git a/arch/arm/include/asm/hardware/sp810.h b/arch/arm/include/asm/hardware/sp810.h index 721847dc68ab..e0d1c0cfa548 100644 --- a/arch/arm/include/asm/hardware/sp810.h +++ b/arch/arm/include/asm/hardware/sp810.h | |||
@@ -58,6 +58,9 @@ | |||
58 | 58 | ||
59 | static inline void sysctl_soft_reset(void __iomem *base) | 59 | static inline void sysctl_soft_reset(void __iomem *base) |
60 | { | 60 | { |
61 | /* switch to slow mode */ | ||
62 | writel(0x2, base + SCCTRL); | ||
63 | |||
61 | /* writing any value to SCSYSSTAT reg will reset system */ | 64 | /* writing any value to SCSYSSTAT reg will reset system */ |
62 | writel(0, base + SCSYSSTAT); | 65 | writel(0, base + SCSYSSTAT); |
63 | } | 66 | } |
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index f41a6f57cd12..82dfe5d0c41e 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h | |||
@@ -18,16 +18,34 @@ | |||
18 | #define __ASMARM_TLB_H | 18 | #define __ASMARM_TLB_H |
19 | 19 | ||
20 | #include <asm/cacheflush.h> | 20 | #include <asm/cacheflush.h> |
21 | #include <asm/tlbflush.h> | ||
22 | 21 | ||
23 | #ifndef CONFIG_MMU | 22 | #ifndef CONFIG_MMU |
24 | 23 | ||
25 | #include <linux/pagemap.h> | 24 | #include <linux/pagemap.h> |
25 | |||
26 | #define tlb_flush(tlb) ((void) tlb) | ||
27 | |||
26 | #include <asm-generic/tlb.h> | 28 | #include <asm-generic/tlb.h> |
27 | 29 | ||
28 | #else /* !CONFIG_MMU */ | 30 | #else /* !CONFIG_MMU */ |
29 | 31 | ||
32 | #include <linux/swap.h> | ||
30 | #include <asm/pgalloc.h> | 33 | #include <asm/pgalloc.h> |
34 | #include <asm/tlbflush.h> | ||
35 | |||
36 | /* | ||
37 | * We need to delay page freeing for SMP as other CPUs can access pages | ||
38 | * which have been removed but not yet had their TLB entries invalidated. | ||
39 | * Also, as ARMv7 speculative prefetch can drag new entries into the TLB, | ||
40 | * we need to apply this same delaying tactic to ensure correct operation. | ||
41 | */ | ||
42 | #if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7) | ||
43 | #define tlb_fast_mode(tlb) 0 | ||
44 | #define FREE_PTE_NR 500 | ||
45 | #else | ||
46 | #define tlb_fast_mode(tlb) 1 | ||
47 | #define FREE_PTE_NR 0 | ||
48 | #endif | ||
31 | 49 | ||
32 | /* | 50 | /* |
33 | * TLB handling. This allows us to remove pages from the page | 51 | * TLB handling. This allows us to remove pages from the page |
@@ -36,12 +54,58 @@ | |||
36 | struct mmu_gather { | 54 | struct mmu_gather { |
37 | struct mm_struct *mm; | 55 | struct mm_struct *mm; |
38 | unsigned int fullmm; | 56 | unsigned int fullmm; |
57 | struct vm_area_struct *vma; | ||
39 | unsigned long range_start; | 58 | unsigned long range_start; |
40 | unsigned long range_end; | 59 | unsigned long range_end; |
60 | unsigned int nr; | ||
61 | struct page *pages[FREE_PTE_NR]; | ||
41 | }; | 62 | }; |
42 | 63 | ||
43 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); | 64 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); |
44 | 65 | ||
66 | /* | ||
67 | * This is unnecessarily complex. There's three ways the TLB shootdown | ||
68 | * code is used: | ||
69 | * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region(). | ||
70 | * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called. | ||
71 | * tlb->vma will be non-NULL. | ||
72 | * 2. Unmapping all vmas. See exit_mmap(). | ||
73 | * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called. | ||
74 | * tlb->vma will be non-NULL. Additionally, page tables will be freed. | ||
75 | * 3. Unmapping argument pages. See shift_arg_pages(). | ||
76 | * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called. | ||
77 | * tlb->vma will be NULL. | ||
78 | */ | ||
79 | static inline void tlb_flush(struct mmu_gather *tlb) | ||
80 | { | ||
81 | if (tlb->fullmm || !tlb->vma) | ||
82 | flush_tlb_mm(tlb->mm); | ||
83 | else if (tlb->range_end > 0) { | ||
84 | flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end); | ||
85 | tlb->range_start = TASK_SIZE; | ||
86 | tlb->range_end = 0; | ||
87 | } | ||
88 | } | ||
89 | |||
90 | static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr) | ||
91 | { | ||
92 | if (!tlb->fullmm) { | ||
93 | if (addr < tlb->range_start) | ||
94 | tlb->range_start = addr; | ||
95 | if (addr + PAGE_SIZE > tlb->range_end) | ||
96 | tlb->range_end = addr + PAGE_SIZE; | ||
97 | } | ||
98 | } | ||
99 | |||
100 | static inline void tlb_flush_mmu(struct mmu_gather *tlb) | ||
101 | { | ||
102 | tlb_flush(tlb); | ||
103 | if (!tlb_fast_mode(tlb)) { | ||
104 | free_pages_and_swap_cache(tlb->pages, tlb->nr); | ||
105 | tlb->nr = 0; | ||
106 | } | ||
107 | } | ||
108 | |||
45 | static inline struct mmu_gather * | 109 | static inline struct mmu_gather * |
46 | tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) | 110 | tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) |
47 | { | 111 | { |
@@ -49,6 +113,8 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) | |||
49 | 113 | ||
50 | tlb->mm = mm; | 114 | tlb->mm = mm; |
51 | tlb->fullmm = full_mm_flush; | 115 | tlb->fullmm = full_mm_flush; |
116 | tlb->vma = NULL; | ||
117 | tlb->nr = 0; | ||
52 | 118 | ||
53 | return tlb; | 119 | return tlb; |
54 | } | 120 | } |
@@ -56,8 +122,7 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) | |||
56 | static inline void | 122 | static inline void |
57 | tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) | 123 | tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) |
58 | { | 124 | { |
59 | if (tlb->fullmm) | 125 | tlb_flush_mmu(tlb); |
60 | flush_tlb_mm(tlb->mm); | ||
61 | 126 | ||
62 | /* keep the page table cache within bounds */ | 127 | /* keep the page table cache within bounds */ |
63 | check_pgt_cache(); | 128 | check_pgt_cache(); |
@@ -71,12 +136,7 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) | |||
71 | static inline void | 136 | static inline void |
72 | tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr) | 137 | tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr) |
73 | { | 138 | { |
74 | if (!tlb->fullmm) { | 139 | tlb_add_flush(tlb, addr); |
75 | if (addr < tlb->range_start) | ||
76 | tlb->range_start = addr; | ||
77 | if (addr + PAGE_SIZE > tlb->range_end) | ||
78 | tlb->range_end = addr + PAGE_SIZE; | ||
79 | } | ||
80 | } | 140 | } |
81 | 141 | ||
82 | /* | 142 | /* |
@@ -89,6 +149,7 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) | |||
89 | { | 149 | { |
90 | if (!tlb->fullmm) { | 150 | if (!tlb->fullmm) { |
91 | flush_cache_range(vma, vma->vm_start, vma->vm_end); | 151 | flush_cache_range(vma, vma->vm_start, vma->vm_end); |
152 | tlb->vma = vma; | ||
92 | tlb->range_start = TASK_SIZE; | 153 | tlb->range_start = TASK_SIZE; |
93 | tlb->range_end = 0; | 154 | tlb->range_end = 0; |
94 | } | 155 | } |
@@ -97,12 +158,30 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) | |||
97 | static inline void | 158 | static inline void |
98 | tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) | 159 | tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) |
99 | { | 160 | { |
100 | if (!tlb->fullmm && tlb->range_end > 0) | 161 | if (!tlb->fullmm) |
101 | flush_tlb_range(vma, tlb->range_start, tlb->range_end); | 162 | tlb_flush(tlb); |
163 | } | ||
164 | |||
165 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | ||
166 | { | ||
167 | if (tlb_fast_mode(tlb)) { | ||
168 | free_page_and_swap_cache(page); | ||
169 | } else { | ||
170 | tlb->pages[tlb->nr++] = page; | ||
171 | if (tlb->nr >= FREE_PTE_NR) | ||
172 | tlb_flush_mmu(tlb); | ||
173 | } | ||
174 | } | ||
175 | |||
176 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, | ||
177 | unsigned long addr) | ||
178 | { | ||
179 | pgtable_page_dtor(pte); | ||
180 | tlb_add_flush(tlb, addr); | ||
181 | tlb_remove_page(tlb, pte); | ||
102 | } | 182 | } |
103 | 183 | ||
104 | #define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) | 184 | #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) |
105 | #define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep) | ||
106 | #define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) | 185 | #define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) |
107 | 186 | ||
108 | #define tlb_migrate_finish(mm) do { } while (0) | 187 | #define tlb_migrate_finish(mm) do { } while (0) |
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index ce7378ea15a2..d2005de383b8 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h | |||
@@ -10,12 +10,7 @@ | |||
10 | #ifndef _ASMARM_TLBFLUSH_H | 10 | #ifndef _ASMARM_TLBFLUSH_H |
11 | #define _ASMARM_TLBFLUSH_H | 11 | #define _ASMARM_TLBFLUSH_H |
12 | 12 | ||
13 | 13 | #ifdef CONFIG_MMU | |
14 | #ifndef CONFIG_MMU | ||
15 | |||
16 | #define tlb_flush(tlb) ((void) tlb) | ||
17 | |||
18 | #else /* CONFIG_MMU */ | ||
19 | 14 | ||
20 | #include <asm/glue.h> | 15 | #include <asm/glue.h> |
21 | 16 | ||
diff --git a/arch/arm/kernel/kprobes-decode.c b/arch/arm/kernel/kprobes-decode.c index 2c1f0050c9c4..8f6ed43861f1 100644 --- a/arch/arm/kernel/kprobes-decode.c +++ b/arch/arm/kernel/kprobes-decode.c | |||
@@ -1437,7 +1437,7 @@ arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi) | |||
1437 | 1437 | ||
1438 | return space_cccc_1100_010x(insn, asi); | 1438 | return space_cccc_1100_010x(insn, asi); |
1439 | 1439 | ||
1440 | } else if ((insn & 0x0e000000) == 0x0c400000) { | 1440 | } else if ((insn & 0x0e000000) == 0x0c000000) { |
1441 | 1441 | ||
1442 | return space_cccc_110x(insn, asi); | 1442 | return space_cccc_110x(insn, asi); |
1443 | 1443 | ||
diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c index b8af96ea62e6..2c79eec19262 100644 --- a/arch/arm/kernel/pmu.c +++ b/arch/arm/kernel/pmu.c | |||
@@ -97,28 +97,34 @@ set_irq_affinity(int irq, | |||
97 | irq, cpu); | 97 | irq, cpu); |
98 | return err; | 98 | return err; |
99 | #else | 99 | #else |
100 | return 0; | 100 | return -EINVAL; |
101 | #endif | 101 | #endif |
102 | } | 102 | } |
103 | 103 | ||
104 | static int | 104 | static int |
105 | init_cpu_pmu(void) | 105 | init_cpu_pmu(void) |
106 | { | 106 | { |
107 | int i, err = 0; | 107 | int i, irqs, err = 0; |
108 | struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU]; | 108 | struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU]; |
109 | 109 | ||
110 | if (!pdev) { | 110 | if (!pdev) |
111 | err = -ENODEV; | 111 | return -ENODEV; |
112 | goto out; | 112 | |
113 | } | 113 | irqs = pdev->num_resources; |
114 | |||
115 | /* | ||
116 | * If we have a single PMU interrupt that we can't shift, assume that | ||
117 | * we're running on a uniprocessor machine and continue. | ||
118 | */ | ||
119 | if (irqs == 1 && !irq_can_set_affinity(platform_get_irq(pdev, 0))) | ||
120 | return 0; | ||
114 | 121 | ||
115 | for (i = 0; i < pdev->num_resources; ++i) { | 122 | for (i = 0; i < irqs; ++i) { |
116 | err = set_irq_affinity(platform_get_irq(pdev, i), i); | 123 | err = set_irq_affinity(platform_get_irq(pdev, i), i); |
117 | if (err) | 124 | if (err) |
118 | break; | 125 | break; |
119 | } | 126 | } |
120 | 127 | ||
121 | out: | ||
122 | return err; | 128 | return err; |
123 | } | 129 | } |
124 | 130 | ||
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 420b8d6485d6..5ea4fb718b97 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -226,8 +226,8 @@ int cpu_architecture(void) | |||
226 | * Register 0 and check for VMSAv7 or PMSAv7 */ | 226 | * Register 0 and check for VMSAv7 or PMSAv7 */ |
227 | asm("mrc p15, 0, %0, c0, c1, 4" | 227 | asm("mrc p15, 0, %0, c0, c1, 4" |
228 | : "=r" (mmfr0)); | 228 | : "=r" (mmfr0)); |
229 | if ((mmfr0 & 0x0000000f) == 0x00000003 || | 229 | if ((mmfr0 & 0x0000000f) >= 0x00000003 || |
230 | (mmfr0 & 0x000000f0) == 0x00000030) | 230 | (mmfr0 & 0x000000f0) >= 0x00000030) |
231 | cpu_arch = CPU_ARCH_ARMv7; | 231 | cpu_arch = CPU_ARCH_ARMv7; |
232 | else if ((mmfr0 & 0x0000000f) == 0x00000002 || | 232 | else if ((mmfr0 & 0x0000000f) == 0x00000002 || |
233 | (mmfr0 & 0x000000f0) == 0x00000020) | 233 | (mmfr0 & 0x000000f0) == 0x00000020) |
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 907d5a620bca..abaf8445ce25 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c | |||
@@ -474,7 +474,9 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka, | |||
474 | unsigned long handler = (unsigned long)ka->sa.sa_handler; | 474 | unsigned long handler = (unsigned long)ka->sa.sa_handler; |
475 | unsigned long retcode; | 475 | unsigned long retcode; |
476 | int thumb = 0; | 476 | int thumb = 0; |
477 | unsigned long cpsr = regs->ARM_cpsr & ~PSR_f; | 477 | unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT); |
478 | |||
479 | cpsr |= PSR_ENDSTATE; | ||
478 | 480 | ||
479 | /* | 481 | /* |
480 | * Maybe we need to deliver a 32-bit signal to a 26-bit task. | 482 | * Maybe we need to deliver a 32-bit signal to a 26-bit task. |
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 86b66f3f2031..61462790757f 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S | |||
@@ -21,6 +21,12 @@ | |||
21 | #define ARM_CPU_KEEP(x) | 21 | #define ARM_CPU_KEEP(x) |
22 | #endif | 22 | #endif |
23 | 23 | ||
24 | #if defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK) | ||
25 | #define ARM_EXIT_KEEP(x) x | ||
26 | #else | ||
27 | #define ARM_EXIT_KEEP(x) | ||
28 | #endif | ||
29 | |||
24 | OUTPUT_ARCH(arm) | 30 | OUTPUT_ARCH(arm) |
25 | ENTRY(stext) | 31 | ENTRY(stext) |
26 | 32 | ||
@@ -43,6 +49,7 @@ SECTIONS | |||
43 | _sinittext = .; | 49 | _sinittext = .; |
44 | HEAD_TEXT | 50 | HEAD_TEXT |
45 | INIT_TEXT | 51 | INIT_TEXT |
52 | ARM_EXIT_KEEP(EXIT_TEXT) | ||
46 | _einittext = .; | 53 | _einittext = .; |
47 | ARM_CPU_DISCARD(PROC_INFO) | 54 | ARM_CPU_DISCARD(PROC_INFO) |
48 | __arch_info_begin = .; | 55 | __arch_info_begin = .; |
@@ -67,6 +74,7 @@ SECTIONS | |||
67 | #ifndef CONFIG_XIP_KERNEL | 74 | #ifndef CONFIG_XIP_KERNEL |
68 | __init_begin = _stext; | 75 | __init_begin = _stext; |
69 | INIT_DATA | 76 | INIT_DATA |
77 | ARM_EXIT_KEEP(EXIT_DATA) | ||
70 | #endif | 78 | #endif |
71 | } | 79 | } |
72 | 80 | ||
@@ -162,6 +170,7 @@ SECTIONS | |||
162 | . = ALIGN(PAGE_SIZE); | 170 | . = ALIGN(PAGE_SIZE); |
163 | __init_begin = .; | 171 | __init_begin = .; |
164 | INIT_DATA | 172 | INIT_DATA |
173 | ARM_EXIT_KEEP(EXIT_DATA) | ||
165 | . = ALIGN(PAGE_SIZE); | 174 | . = ALIGN(PAGE_SIZE); |
166 | __init_end = .; | 175 | __init_end = .; |
167 | #endif | 176 | #endif |
@@ -247,6 +256,8 @@ SECTIONS | |||
247 | } | 256 | } |
248 | #endif | 257 | #endif |
249 | 258 | ||
259 | NOTES | ||
260 | |||
250 | BSS_SECTION(0, 0, 0) | 261 | BSS_SECTION(0, 0, 0) |
251 | _end = .; | 262 | _end = .; |
252 | 263 | ||
diff --git a/arch/arm/mach-spear3xx/include/mach/spear320.h b/arch/arm/mach-spear3xx/include/mach/spear320.h index cacf17a958cd..53677e464d4b 100644 --- a/arch/arm/mach-spear3xx/include/mach/spear320.h +++ b/arch/arm/mach-spear3xx/include/mach/spear320.h | |||
@@ -62,7 +62,7 @@ | |||
62 | #define SPEAR320_SMII1_BASE 0xAB000000 | 62 | #define SPEAR320_SMII1_BASE 0xAB000000 |
63 | #define SPEAR320_SMII1_SIZE 0x01000000 | 63 | #define SPEAR320_SMII1_SIZE 0x01000000 |
64 | 64 | ||
65 | #define SPEAR320_SOC_CONFIG_BASE 0xB4000000 | 65 | #define SPEAR320_SOC_CONFIG_BASE 0xB3000000 |
66 | #define SPEAR320_SOC_CONFIG_SIZE 0x00000070 | 66 | #define SPEAR320_SOC_CONFIG_SIZE 0x00000070 |
67 | /* Interrupt registers offsets and masks */ | 67 | /* Interrupt registers offsets and masks */ |
68 | #define INT_STS_MASK_REG 0x04 | 68 | #define INT_STS_MASK_REG 0x04 |
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 170c9bb95866..f2ce38e085d2 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c | |||
@@ -49,7 +49,13 @@ static inline void cache_wait(void __iomem *reg, unsigned long mask) | |||
49 | static inline void cache_sync(void) | 49 | static inline void cache_sync(void) |
50 | { | 50 | { |
51 | void __iomem *base = l2x0_base; | 51 | void __iomem *base = l2x0_base; |
52 | |||
53 | #ifdef CONFIG_ARM_ERRATA_753970 | ||
54 | /* write to an unmmapped register */ | ||
55 | writel_relaxed(0, base + L2X0_DUMMY_REG); | ||
56 | #else | ||
52 | writel_relaxed(0, base + L2X0_CACHE_SYNC); | 57 | writel_relaxed(0, base + L2X0_CACHE_SYNC); |
58 | #endif | ||
53 | cache_wait(base + L2X0_CACHE_SYNC, 1); | 59 | cache_wait(base + L2X0_CACHE_SYNC, 1); |
54 | } | 60 | } |
55 | 61 | ||
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 0c1172b56b4e..8e3356239136 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
@@ -264,6 +264,12 @@ __v7_setup: | |||
264 | orreq r10, r10, #1 << 6 @ set bit #6 | 264 | orreq r10, r10, #1 << 6 @ set bit #6 |
265 | mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register | 265 | mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register |
266 | #endif | 266 | #endif |
267 | #ifdef CONFIG_ARM_ERRATA_751472 | ||
268 | cmp r6, #0x30 @ present prior to r3p0 | ||
269 | mrclt p15, 0, r10, c15, c0, 1 @ read diagnostic register | ||
270 | orrlt r10, r10, #1 << 11 @ set bit #11 | ||
271 | mcrlt p15, 0, r10, c15, c0, 1 @ write diagnostic register | ||
272 | #endif | ||
267 | 273 | ||
268 | 3: mov r10, #0 | 274 | 3: mov r10, #0 |
269 | #ifdef HARVARD_CACHE | 275 | #ifdef HARVARD_CACHE |
diff --git a/arch/arm/plat-spear/include/plat/uncompress.h b/arch/arm/plat-spear/include/plat/uncompress.h index 99ba6789cc97..6dd455bafdfd 100644 --- a/arch/arm/plat-spear/include/plat/uncompress.h +++ b/arch/arm/plat-spear/include/plat/uncompress.h | |||
@@ -24,10 +24,10 @@ static inline void putc(int c) | |||
24 | { | 24 | { |
25 | void __iomem *base = (void __iomem *)SPEAR_DBG_UART_BASE; | 25 | void __iomem *base = (void __iomem *)SPEAR_DBG_UART_BASE; |
26 | 26 | ||
27 | while (readl(base + UART01x_FR) & UART01x_FR_TXFF) | 27 | while (readl_relaxed(base + UART01x_FR) & UART01x_FR_TXFF) |
28 | barrier(); | 28 | barrier(); |
29 | 29 | ||
30 | writel(c, base + UART01x_DR); | 30 | writel_relaxed(c, base + UART01x_DR); |
31 | } | 31 | } |
32 | 32 | ||
33 | static inline void flush(void) | 33 | static inline void flush(void) |
diff --git a/arch/arm/plat-spear/include/plat/vmalloc.h b/arch/arm/plat-spear/include/plat/vmalloc.h index 09e9372aea21..8c8b24d07046 100644 --- a/arch/arm/plat-spear/include/plat/vmalloc.h +++ b/arch/arm/plat-spear/include/plat/vmalloc.h | |||
@@ -14,6 +14,6 @@ | |||
14 | #ifndef __PLAT_VMALLOC_H | 14 | #ifndef __PLAT_VMALLOC_H |
15 | #define __PLAT_VMALLOC_H | 15 | #define __PLAT_VMALLOC_H |
16 | 16 | ||
17 | #define VMALLOC_END 0xF0000000 | 17 | #define VMALLOC_END 0xF0000000UL |
18 | 18 | ||
19 | #endif /* __PLAT_VMALLOC_H */ | 19 | #endif /* __PLAT_VMALLOC_H */ |
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c index 0851eb1e919e..2751b3a8a66f 100644 --- a/arch/s390/boot/compressed/misc.c +++ b/arch/s390/boot/compressed/misc.c | |||
@@ -133,11 +133,12 @@ unsigned long decompress_kernel(void) | |||
133 | unsigned long output_addr; | 133 | unsigned long output_addr; |
134 | unsigned char *output; | 134 | unsigned char *output; |
135 | 135 | ||
136 | check_ipl_parmblock((void *) 0, (unsigned long) output + SZ__bss_start); | 136 | output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL; |
137 | check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start); | ||
137 | memset(&_bss, 0, &_ebss - &_bss); | 138 | memset(&_bss, 0, &_ebss - &_bss); |
138 | free_mem_ptr = (unsigned long)&_end; | 139 | free_mem_ptr = (unsigned long)&_end; |
139 | free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; | 140 | free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; |
140 | output = (unsigned char *) ((free_mem_end_ptr + 4095UL) & -4096UL); | 141 | output = (unsigned char *) output_addr; |
141 | 142 | ||
142 | #ifdef CONFIG_BLK_DEV_INITRD | 143 | #ifdef CONFIG_BLK_DEV_INITRD |
143 | /* | 144 | /* |
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index 76daea117181..5c5ba10384c2 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h | |||
@@ -36,14 +36,19 @@ | |||
36 | 36 | ||
37 | static inline int atomic_read(const atomic_t *v) | 37 | static inline int atomic_read(const atomic_t *v) |
38 | { | 38 | { |
39 | barrier(); | 39 | int c; |
40 | return v->counter; | 40 | |
41 | asm volatile( | ||
42 | " l %0,%1\n" | ||
43 | : "=d" (c) : "Q" (v->counter)); | ||
44 | return c; | ||
41 | } | 45 | } |
42 | 46 | ||
43 | static inline void atomic_set(atomic_t *v, int i) | 47 | static inline void atomic_set(atomic_t *v, int i) |
44 | { | 48 | { |
45 | v->counter = i; | 49 | asm volatile( |
46 | barrier(); | 50 | " st %1,%0\n" |
51 | : "=Q" (v->counter) : "d" (i)); | ||
47 | } | 52 | } |
48 | 53 | ||
49 | static inline int atomic_add_return(int i, atomic_t *v) | 54 | static inline int atomic_add_return(int i, atomic_t *v) |
@@ -128,14 +133,19 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) | |||
128 | 133 | ||
129 | static inline long long atomic64_read(const atomic64_t *v) | 134 | static inline long long atomic64_read(const atomic64_t *v) |
130 | { | 135 | { |
131 | barrier(); | 136 | long long c; |
132 | return v->counter; | 137 | |
138 | asm volatile( | ||
139 | " lg %0,%1\n" | ||
140 | : "=d" (c) : "Q" (v->counter)); | ||
141 | return c; | ||
133 | } | 142 | } |
134 | 143 | ||
135 | static inline void atomic64_set(atomic64_t *v, long long i) | 144 | static inline void atomic64_set(atomic64_t *v, long long i) |
136 | { | 145 | { |
137 | v->counter = i; | 146 | asm volatile( |
138 | barrier(); | 147 | " stg %1,%0\n" |
148 | : "=Q" (v->counter) : "d" (i)); | ||
139 | } | 149 | } |
140 | 150 | ||
141 | static inline long long atomic64_add_return(long long i, atomic64_t *v) | 151 | static inline long long atomic64_add_return(long long i, atomic64_t *v) |
diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h index 24aafa68b643..2a30d5ac0667 100644 --- a/arch/s390/include/asm/cache.h +++ b/arch/s390/include/asm/cache.h | |||
@@ -13,6 +13,7 @@ | |||
13 | 13 | ||
14 | #define L1_CACHE_BYTES 256 | 14 | #define L1_CACHE_BYTES 256 |
15 | #define L1_CACHE_SHIFT 8 | 15 | #define L1_CACHE_SHIFT 8 |
16 | #define NET_SKB_PAD 32 | ||
16 | 17 | ||
17 | #define __read_mostly __attribute__((__section__(".data..read_mostly"))) | 18 | #define __read_mostly __attribute__((__section__(".data..read_mostly"))) |
18 | 19 | ||
diff --git a/arch/sparc/include/asm/pcr.h b/arch/sparc/include/asm/pcr.h index a2f5c61f924e..843e4faf6a50 100644 --- a/arch/sparc/include/asm/pcr.h +++ b/arch/sparc/include/asm/pcr.h | |||
@@ -43,4 +43,6 @@ static inline u64 picl_value(unsigned int nmi_hz) | |||
43 | 43 | ||
44 | extern u64 pcr_enable; | 44 | extern u64 pcr_enable; |
45 | 45 | ||
46 | extern int pcr_arch_init(void); | ||
47 | |||
46 | #endif /* __PCR_H */ | 48 | #endif /* __PCR_H */ |
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 47977a77f6c6..72509d0e34be 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c | |||
@@ -255,10 +255,9 @@ static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu, | |||
255 | static int iommu_alloc_ctx(struct iommu *iommu) | 255 | static int iommu_alloc_ctx(struct iommu *iommu) |
256 | { | 256 | { |
257 | int lowest = iommu->ctx_lowest_free; | 257 | int lowest = iommu->ctx_lowest_free; |
258 | int sz = IOMMU_NUM_CTXS - lowest; | 258 | int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest); |
259 | int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest); | ||
260 | 259 | ||
261 | if (unlikely(n == sz)) { | 260 | if (unlikely(n == IOMMU_NUM_CTXS)) { |
262 | n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1); | 261 | n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1); |
263 | if (unlikely(n == lowest)) { | 262 | if (unlikely(n == lowest)) { |
264 | printk(KERN_WARNING "IOMMU: Ran out of contexts.\n"); | 263 | printk(KERN_WARNING "IOMMU: Ran out of contexts.\n"); |
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c index ae96cf52a955..7c2ced612b8f 100644 --- a/arch/sparc/kernel/pcr.c +++ b/arch/sparc/kernel/pcr.c | |||
@@ -167,5 +167,3 @@ out_unregister: | |||
167 | unregister_perf_hsvc(); | 167 | unregister_perf_hsvc(); |
168 | return err; | 168 | return err; |
169 | } | 169 | } |
170 | |||
171 | early_initcall(pcr_arch_init); | ||
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index b6a2b8f47040..555a76d1f4a1 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <asm/mdesc.h> | 49 | #include <asm/mdesc.h> |
50 | #include <asm/ldc.h> | 50 | #include <asm/ldc.h> |
51 | #include <asm/hypervisor.h> | 51 | #include <asm/hypervisor.h> |
52 | #include <asm/pcr.h> | ||
52 | 53 | ||
53 | #include "cpumap.h" | 54 | #include "cpumap.h" |
54 | 55 | ||
@@ -1358,6 +1359,7 @@ void __cpu_die(unsigned int cpu) | |||
1358 | 1359 | ||
1359 | void __init smp_cpus_done(unsigned int max_cpus) | 1360 | void __init smp_cpus_done(unsigned int max_cpus) |
1360 | { | 1361 | { |
1362 | pcr_arch_init(); | ||
1361 | } | 1363 | } |
1362 | 1364 | ||
1363 | void smp_send_reschedule(int cpu) | 1365 | void smp_send_reschedule(int cpu) |
diff --git a/arch/sparc/kernel/una_asm_32.S b/arch/sparc/kernel/una_asm_32.S index 8cc03458eb7e..8f096e84a937 100644 --- a/arch/sparc/kernel/una_asm_32.S +++ b/arch/sparc/kernel/una_asm_32.S | |||
@@ -24,9 +24,9 @@ retl_efault: | |||
24 | .globl __do_int_store | 24 | .globl __do_int_store |
25 | __do_int_store: | 25 | __do_int_store: |
26 | ld [%o2], %g1 | 26 | ld [%o2], %g1 |
27 | cmp %1, 2 | 27 | cmp %o1, 2 |
28 | be 2f | 28 | be 2f |
29 | cmp %1, 4 | 29 | cmp %o1, 4 |
30 | be 1f | 30 | be 1f |
31 | srl %g1, 24, %g2 | 31 | srl %g1, 24, %g2 |
32 | srl %g1, 16, %g7 | 32 | srl %g1, 16, %g7 |
diff --git a/arch/sparc/lib/bitext.c b/arch/sparc/lib/bitext.c index 764b3eb7b604..48d00e72ce15 100644 --- a/arch/sparc/lib/bitext.c +++ b/arch/sparc/lib/bitext.c | |||
@@ -10,7 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/string.h> | 12 | #include <linux/string.h> |
13 | #include <linux/bitops.h> | 13 | #include <linux/bitmap.h> |
14 | 14 | ||
15 | #include <asm/bitext.h> | 15 | #include <asm/bitext.h> |
16 | 16 | ||
@@ -80,8 +80,7 @@ int bit_map_string_get(struct bit_map *t, int len, int align) | |||
80 | while (test_bit(offset + i, t->map) == 0) { | 80 | while (test_bit(offset + i, t->map) == 0) { |
81 | i++; | 81 | i++; |
82 | if (i == len) { | 82 | if (i == len) { |
83 | for (i = 0; i < len; i++) | 83 | bitmap_set(t->map, offset, len); |
84 | __set_bit(offset + i, t->map); | ||
85 | if (offset == t->first_free) | 84 | if (offset == t->first_free) |
86 | t->first_free = find_next_zero_bit | 85 | t->first_free = find_next_zero_bit |
87 | (t->map, t->size, | 86 | (t->map, t->size, |
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index 73fb1c4f4cd4..25ef1a4556e6 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c | |||
@@ -866,8 +866,9 @@ static int popen(struct atm_vcc *vcc) | |||
866 | } | 866 | } |
867 | 867 | ||
868 | skb = alloc_skb(sizeof(*header), GFP_ATOMIC); | 868 | skb = alloc_skb(sizeof(*header), GFP_ATOMIC); |
869 | if (!skb && net_ratelimit()) { | 869 | if (!skb) { |
870 | dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n"); | 870 | if (net_ratelimit()) |
871 | dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n"); | ||
871 | return -ENOMEM; | 872 | return -ENOMEM; |
872 | } | 873 | } |
873 | header = (void *)skb_put(skb, sizeof(*header)); | 874 | header = (void *)skb_put(skb, sizeof(*header)); |
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index a126e614601f..333c21289d97 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c | |||
@@ -39,6 +39,8 @@ static struct usb_device_id ath3k_table[] = { | |||
39 | /* Atheros AR3011 with sflash firmware*/ | 39 | /* Atheros AR3011 with sflash firmware*/ |
40 | { USB_DEVICE(0x0CF3, 0x3002) }, | 40 | { USB_DEVICE(0x0CF3, 0x3002) }, |
41 | 41 | ||
42 | /* Atheros AR9285 Malbec with sflash firmware */ | ||
43 | { USB_DEVICE(0x03F0, 0x311D) }, | ||
42 | { } /* Terminating entry */ | 44 | { } /* Terminating entry */ |
43 | }; | 45 | }; |
44 | 46 | ||
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 1da773f899a2..4cefa91e6c34 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c | |||
@@ -102,6 +102,9 @@ static struct usb_device_id blacklist_table[] = { | |||
102 | /* Atheros 3011 with sflash firmware */ | 102 | /* Atheros 3011 with sflash firmware */ |
103 | { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, | 103 | { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, |
104 | 104 | ||
105 | /* Atheros AR9285 Malbec with sflash firmware */ | ||
106 | { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, | ||
107 | |||
105 | /* Broadcom BCM2035 */ | 108 | /* Broadcom BCM2035 */ |
106 | { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU }, | 109 | { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU }, |
107 | { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU }, | 110 | { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU }, |
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index faf5a2c65926..36e0fa161c2b 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c | |||
@@ -577,11 +577,9 @@ duration: | |||
577 | if (rc) | 577 | if (rc) |
578 | return; | 578 | return; |
579 | 579 | ||
580 | if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 || | 580 | if (be32_to_cpu(tpm_cmd.header.out.return_code) |
581 | be32_to_cpu(tpm_cmd.header.out.length) | 581 | != 3 * sizeof(u32)) |
582 | != sizeof(tpm_cmd.header.out) + sizeof(u32) + 3 * sizeof(u32)) | ||
583 | return; | 582 | return; |
584 | |||
585 | duration_cap = &tpm_cmd.params.getcap_out.cap.duration; | 583 | duration_cap = &tpm_cmd.params.getcap_out.cap.duration; |
586 | chip->vendor.duration[TPM_SHORT] = | 584 | chip->vendor.duration[TPM_SHORT] = |
587 | usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short)); | 585 | usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short)); |
@@ -941,18 +939,6 @@ ssize_t tpm_show_caps_1_2(struct device * dev, | |||
941 | } | 939 | } |
942 | EXPORT_SYMBOL_GPL(tpm_show_caps_1_2); | 940 | EXPORT_SYMBOL_GPL(tpm_show_caps_1_2); |
943 | 941 | ||
944 | ssize_t tpm_show_timeouts(struct device *dev, struct device_attribute *attr, | ||
945 | char *buf) | ||
946 | { | ||
947 | struct tpm_chip *chip = dev_get_drvdata(dev); | ||
948 | |||
949 | return sprintf(buf, "%d %d %d\n", | ||
950 | jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]), | ||
951 | jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]), | ||
952 | jiffies_to_usecs(chip->vendor.duration[TPM_LONG])); | ||
953 | } | ||
954 | EXPORT_SYMBOL_GPL(tpm_show_timeouts); | ||
955 | |||
956 | ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr, | 942 | ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr, |
957 | const char *buf, size_t count) | 943 | const char *buf, size_t count) |
958 | { | 944 | { |
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index d84ff772c26f..72ddb031b69a 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h | |||
@@ -56,8 +56,6 @@ extern ssize_t tpm_show_owned(struct device *, struct device_attribute *attr, | |||
56 | char *); | 56 | char *); |
57 | extern ssize_t tpm_show_temp_deactivated(struct device *, | 57 | extern ssize_t tpm_show_temp_deactivated(struct device *, |
58 | struct device_attribute *attr, char *); | 58 | struct device_attribute *attr, char *); |
59 | extern ssize_t tpm_show_timeouts(struct device *, | ||
60 | struct device_attribute *attr, char *); | ||
61 | 59 | ||
62 | struct tpm_chip; | 60 | struct tpm_chip; |
63 | 61 | ||
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index 0d1d38e5f266..dd21df55689d 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c | |||
@@ -376,7 +376,6 @@ static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, | |||
376 | NULL); | 376 | NULL); |
377 | static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL); | 377 | static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL); |
378 | static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); | 378 | static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); |
379 | static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL); | ||
380 | 379 | ||
381 | static struct attribute *tis_attrs[] = { | 380 | static struct attribute *tis_attrs[] = { |
382 | &dev_attr_pubek.attr, | 381 | &dev_attr_pubek.attr, |
@@ -386,8 +385,7 @@ static struct attribute *tis_attrs[] = { | |||
386 | &dev_attr_owned.attr, | 385 | &dev_attr_owned.attr, |
387 | &dev_attr_temp_deactivated.attr, | 386 | &dev_attr_temp_deactivated.attr, |
388 | &dev_attr_caps.attr, | 387 | &dev_attr_caps.attr, |
389 | &dev_attr_cancel.attr, | 388 | &dev_attr_cancel.attr, NULL, |
390 | &dev_attr_timeouts.attr, NULL, | ||
391 | }; | 389 | }; |
392 | 390 | ||
393 | static struct attribute_group tis_attr_grp = { | 391 | static struct attribute_group tis_attr_grp = { |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 49e5e99917e2..6bdab891c64e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c | |||
@@ -6228,7 +6228,7 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb, | |||
6228 | entry->tvconf.has_component_output = false; | 6228 | entry->tvconf.has_component_output = false; |
6229 | break; | 6229 | break; |
6230 | case OUTPUT_LVDS: | 6230 | case OUTPUT_LVDS: |
6231 | if ((conn & 0x00003f00) != 0x10) | 6231 | if ((conn & 0x00003f00) >> 8 != 0x10) |
6232 | entry->lvdsconf.use_straps_for_mode = true; | 6232 | entry->lvdsconf.use_straps_for_mode = true; |
6233 | entry->lvdsconf.use_power_scripts = true; | 6233 | entry->lvdsconf.use_power_scripts = true; |
6234 | break; | 6234 | break; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index a7fae26f4654..d38a4d9f9b0b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -128,6 +128,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
128 | } | 128 | } |
129 | } | 129 | } |
130 | 130 | ||
131 | nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; | ||
131 | nouveau_bo_placement_set(nvbo, flags, 0); | 132 | nouveau_bo_placement_set(nvbo, flags, 0); |
132 | 133 | ||
133 | nvbo->channel = chan; | 134 | nvbo->channel = chan; |
@@ -166,17 +167,17 @@ static void | |||
166 | set_placement_range(struct nouveau_bo *nvbo, uint32_t type) | 167 | set_placement_range(struct nouveau_bo *nvbo, uint32_t type) |
167 | { | 168 | { |
168 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); | 169 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); |
170 | int vram_pages = dev_priv->vram_size >> PAGE_SHIFT; | ||
169 | 171 | ||
170 | if (dev_priv->card_type == NV_10 && | 172 | if (dev_priv->card_type == NV_10 && |
171 | nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM)) { | 173 | nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && |
174 | nvbo->bo.mem.num_pages < vram_pages / 2) { | ||
172 | /* | 175 | /* |
173 | * Make sure that the color and depth buffers are handled | 176 | * Make sure that the color and depth buffers are handled |
174 | * by independent memory controller units. Up to a 9x | 177 | * by independent memory controller units. Up to a 9x |
175 | * speed up when alpha-blending and depth-test are enabled | 178 | * speed up when alpha-blending and depth-test are enabled |
176 | * at the same time. | 179 | * at the same time. |
177 | */ | 180 | */ |
178 | int vram_pages = dev_priv->vram_size >> PAGE_SHIFT; | ||
179 | |||
180 | if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) { | 181 | if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) { |
181 | nvbo->placement.fpfn = vram_pages / 2; | 182 | nvbo->placement.fpfn = vram_pages / 2; |
182 | nvbo->placement.lpfn = ~0; | 183 | nvbo->placement.lpfn = ~0; |
@@ -785,7 +786,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, | |||
785 | if (ret) | 786 | if (ret) |
786 | goto out; | 787 | goto out; |
787 | 788 | ||
788 | ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); | 789 | ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem); |
789 | out: | 790 | out: |
790 | ttm_bo_mem_put(bo, &tmp_mem); | 791 | ttm_bo_mem_put(bo, &tmp_mem); |
791 | return ret; | 792 | return ret; |
@@ -811,11 +812,11 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, | |||
811 | if (ret) | 812 | if (ret) |
812 | return ret; | 813 | return ret; |
813 | 814 | ||
814 | ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem); | 815 | ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem); |
815 | if (ret) | 816 | if (ret) |
816 | goto out; | 817 | goto out; |
817 | 818 | ||
818 | ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); | 819 | ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem); |
819 | if (ret) | 820 | if (ret) |
820 | goto out; | 821 | goto out; |
821 | 822 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index a21e00076839..390d82c3c4b0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
@@ -507,6 +507,7 @@ nouveau_connector_native_mode(struct drm_connector *connector) | |||
507 | int high_w = 0, high_h = 0, high_v = 0; | 507 | int high_w = 0, high_h = 0, high_v = 0; |
508 | 508 | ||
509 | list_for_each_entry(mode, &nv_connector->base.probed_modes, head) { | 509 | list_for_each_entry(mode, &nv_connector->base.probed_modes, head) { |
510 | mode->vrefresh = drm_mode_vrefresh(mode); | ||
510 | if (helper->mode_valid(connector, mode) != MODE_OK || | 511 | if (helper->mode_valid(connector, mode) != MODE_OK || |
511 | (mode->flags & DRM_MODE_FLAG_INTERLACE)) | 512 | (mode->flags & DRM_MODE_FLAG_INTERLACE)) |
512 | continue; | 513 | continue; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c index f05c0cddfeca..4399e2f34db4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.c +++ b/drivers/gpu/drm/nouveau/nouveau_pm.c | |||
@@ -543,7 +543,7 @@ nouveau_pm_resume(struct drm_device *dev) | |||
543 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; | 543 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; |
544 | struct nouveau_pm_level *perflvl; | 544 | struct nouveau_pm_level *perflvl; |
545 | 545 | ||
546 | if (pm->cur == &pm->boot) | 546 | if (!pm->cur || pm->cur == &pm->boot) |
547 | return; | 547 | return; |
548 | 548 | ||
549 | perflvl = pm->cur; | 549 | perflvl = pm->cur; |
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c index ef23550407b5..c82db37d9f41 100644 --- a/drivers/gpu/drm/nouveau/nv04_dfp.c +++ b/drivers/gpu/drm/nouveau/nv04_dfp.c | |||
@@ -342,8 +342,8 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder, | |||
342 | if (nv_encoder->dcb->type == OUTPUT_LVDS) { | 342 | if (nv_encoder->dcb->type == OUTPUT_LVDS) { |
343 | bool duallink, dummy; | 343 | bool duallink, dummy; |
344 | 344 | ||
345 | nouveau_bios_parse_lvds_table(dev, nv_connector->native_mode-> | 345 | nouveau_bios_parse_lvds_table(dev, output_mode->clock, |
346 | clock, &duallink, &dummy); | 346 | &duallink, &dummy); |
347 | if (duallink) | 347 | if (duallink) |
348 | regp->fp_control |= (8 << 28); | 348 | regp->fp_control |= (8 << 28); |
349 | } else | 349 | } else |
@@ -518,8 +518,6 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode) | |||
518 | return; | 518 | return; |
519 | 519 | ||
520 | if (nv_encoder->dcb->lvdsconf.use_power_scripts) { | 520 | if (nv_encoder->dcb->lvdsconf.use_power_scripts) { |
521 | struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder); | ||
522 | |||
523 | /* when removing an output, crtc may not be set, but PANEL_OFF | 521 | /* when removing an output, crtc may not be set, but PANEL_OFF |
524 | * must still be run | 522 | * must still be run |
525 | */ | 523 | */ |
@@ -527,12 +525,8 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode) | |||
527 | nv04_dfp_get_bound_head(dev, nv_encoder->dcb); | 525 | nv04_dfp_get_bound_head(dev, nv_encoder->dcb); |
528 | 526 | ||
529 | if (mode == DRM_MODE_DPMS_ON) { | 527 | if (mode == DRM_MODE_DPMS_ON) { |
530 | if (!nv_connector->native_mode) { | ||
531 | NV_ERROR(dev, "Not turning on LVDS without native mode\n"); | ||
532 | return; | ||
533 | } | ||
534 | call_lvds_script(dev, nv_encoder->dcb, head, | 528 | call_lvds_script(dev, nv_encoder->dcb, head, |
535 | LVDS_PANEL_ON, nv_connector->native_mode->clock); | 529 | LVDS_PANEL_ON, nv_encoder->mode.clock); |
536 | } else | 530 | } else |
537 | /* pxclk of 0 is fine for PANEL_OFF, and for a | 531 | /* pxclk of 0 is fine for PANEL_OFF, and for a |
538 | * disconnected LVDS encoder there is no native_mode | 532 | * disconnected LVDS encoder there is no native_mode |
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index 8870d72388c8..18d30c2c1aa6 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c | |||
@@ -211,18 +211,32 @@ nv40_graph_set_tile_region(struct drm_device *dev, int i) | |||
211 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; | 211 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; |
212 | 212 | ||
213 | switch (dev_priv->chipset) { | 213 | switch (dev_priv->chipset) { |
214 | case 0x40: | ||
215 | case 0x41: /* guess */ | ||
216 | case 0x42: | ||
217 | case 0x43: | ||
218 | case 0x45: /* guess */ | ||
219 | case 0x4e: | ||
220 | nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); | ||
221 | nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); | ||
222 | nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); | ||
223 | nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch); | ||
224 | nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit); | ||
225 | nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr); | ||
226 | break; | ||
214 | case 0x44: | 227 | case 0x44: |
215 | case 0x4a: | 228 | case 0x4a: |
216 | case 0x4e: | ||
217 | nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); | 229 | nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); |
218 | nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); | 230 | nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); |
219 | nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); | 231 | nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); |
220 | break; | 232 | break; |
221 | |||
222 | case 0x46: | 233 | case 0x46: |
223 | case 0x47: | 234 | case 0x47: |
224 | case 0x49: | 235 | case 0x49: |
225 | case 0x4b: | 236 | case 0x4b: |
237 | case 0x4c: | ||
238 | case 0x67: | ||
239 | default: | ||
226 | nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch); | 240 | nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch); |
227 | nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit); | 241 | nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit); |
228 | nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr); | 242 | nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr); |
@@ -230,15 +244,6 @@ nv40_graph_set_tile_region(struct drm_device *dev, int i) | |||
230 | nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit); | 244 | nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit); |
231 | nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr); | 245 | nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr); |
232 | break; | 246 | break; |
233 | |||
234 | default: | ||
235 | nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); | ||
236 | nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); | ||
237 | nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); | ||
238 | nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch); | ||
239 | nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit); | ||
240 | nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr); | ||
241 | break; | ||
242 | } | 247 | } |
243 | } | 248 | } |
244 | 249 | ||
@@ -396,17 +401,20 @@ nv40_graph_init(struct drm_device *dev) | |||
396 | break; | 401 | break; |
397 | default: | 402 | default: |
398 | switch (dev_priv->chipset) { | 403 | switch (dev_priv->chipset) { |
399 | case 0x46: | 404 | case 0x41: |
400 | case 0x47: | 405 | case 0x42: |
401 | case 0x49: | 406 | case 0x43: |
402 | case 0x4b: | 407 | case 0x45: |
403 | nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0)); | 408 | case 0x4e: |
404 | nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1)); | 409 | case 0x44: |
405 | break; | 410 | case 0x4a: |
406 | default: | ||
407 | nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0)); | 411 | nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0)); |
408 | nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1)); | 412 | nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1)); |
409 | break; | 413 | break; |
414 | default: | ||
415 | nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0)); | ||
416 | nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1)); | ||
417 | break; | ||
410 | } | 418 | } |
411 | nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0)); | 419 | nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0)); |
412 | nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1)); | 420 | nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1)); |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 095bc507fb16..a4e5e53e0a62 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -557,9 +557,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
557 | 557 | ||
558 | /* use recommended ref_div for ss */ | 558 | /* use recommended ref_div for ss */ |
559 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | 559 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
560 | pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; | ||
561 | if (ss_enabled) { | 560 | if (ss_enabled) { |
562 | if (ss->refdiv) { | 561 | if (ss->refdiv) { |
562 | pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; | ||
563 | pll->flags |= RADEON_PLL_USE_REF_DIV; | 563 | pll->flags |= RADEON_PLL_USE_REF_DIV; |
564 | pll->reference_div = ss->refdiv; | 564 | pll->reference_div = ss->refdiv; |
565 | if (ASIC_IS_AVIVO(rdev)) | 565 | if (ASIC_IS_AVIVO(rdev)) |
@@ -662,10 +662,12 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
662 | index, (uint32_t *)&args); | 662 | index, (uint32_t *)&args); |
663 | adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10; | 663 | adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10; |
664 | if (args.v3.sOutput.ucRefDiv) { | 664 | if (args.v3.sOutput.ucRefDiv) { |
665 | pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; | ||
665 | pll->flags |= RADEON_PLL_USE_REF_DIV; | 666 | pll->flags |= RADEON_PLL_USE_REF_DIV; |
666 | pll->reference_div = args.v3.sOutput.ucRefDiv; | 667 | pll->reference_div = args.v3.sOutput.ucRefDiv; |
667 | } | 668 | } |
668 | if (args.v3.sOutput.ucPostDiv) { | 669 | if (args.v3.sOutput.ucPostDiv) { |
670 | pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; | ||
669 | pll->flags |= RADEON_PLL_USE_POST_DIV; | 671 | pll->flags |= RADEON_PLL_USE_POST_DIV; |
670 | pll->post_div = args.v3.sOutput.ucPostDiv; | 672 | pll->post_div = args.v3.sOutput.ucPostDiv; |
671 | } | 673 | } |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 768c60ee4ab6..069efa8c8ecf 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -910,6 +910,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
910 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | 910 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; |
911 | break; | 911 | break; |
912 | case R300_TX_FORMAT_X16: | 912 | case R300_TX_FORMAT_X16: |
913 | case R300_TX_FORMAT_FL_I16: | ||
913 | case R300_TX_FORMAT_Y8X8: | 914 | case R300_TX_FORMAT_Y8X8: |
914 | case R300_TX_FORMAT_Z5Y6X5: | 915 | case R300_TX_FORMAT_Z5Y6X5: |
915 | case R300_TX_FORMAT_Z6Y5X5: | 916 | case R300_TX_FORMAT_Z6Y5X5: |
@@ -922,6 +923,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
922 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | 923 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; |
923 | break; | 924 | break; |
924 | case R300_TX_FORMAT_Y16X16: | 925 | case R300_TX_FORMAT_Y16X16: |
926 | case R300_TX_FORMAT_FL_I16A16: | ||
925 | case R300_TX_FORMAT_Z11Y11X10: | 927 | case R300_TX_FORMAT_Z11Y11X10: |
926 | case R300_TX_FORMAT_Z10Y11X11: | 928 | case R300_TX_FORMAT_Z10Y11X11: |
927 | case R300_TX_FORMAT_W8Z8Y8X8: | 929 | case R300_TX_FORMAT_W8Z8Y8X8: |
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 773e484f1646..297bc9a7d6e6 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig | |||
@@ -238,13 +238,13 @@ config SENSORS_K8TEMP | |||
238 | will be called k8temp. | 238 | will be called k8temp. |
239 | 239 | ||
240 | config SENSORS_K10TEMP | 240 | config SENSORS_K10TEMP |
241 | tristate "AMD Phenom/Sempron/Turion/Opteron temperature sensor" | 241 | tristate "AMD Family 10h/11h/12h/14h temperature sensor" |
242 | depends on X86 && PCI | 242 | depends on X86 && PCI |
243 | help | 243 | help |
244 | If you say yes here you get support for the temperature | 244 | If you say yes here you get support for the temperature |
245 | sensor(s) inside your CPU. Supported are later revisions of | 245 | sensor(s) inside your CPU. Supported are later revisions of |
246 | the AMD Family 10h and all revisions of the AMD Family 11h | 246 | the AMD Family 10h and all revisions of the AMD Family 11h, |
247 | microarchitectures. | 247 | 12h (Llano), and 14h (Brazos) microarchitectures. |
248 | 248 | ||
249 | This driver can also be built as a module. If so, the module | 249 | This driver can also be built as a module. If so, the module |
250 | will be called k10temp. | 250 | will be called k10temp. |
@@ -455,13 +455,14 @@ config SENSORS_JZ4740 | |||
455 | called jz4740-hwmon. | 455 | called jz4740-hwmon. |
456 | 456 | ||
457 | config SENSORS_JC42 | 457 | config SENSORS_JC42 |
458 | tristate "JEDEC JC42.4 compliant temperature sensors" | 458 | tristate "JEDEC JC42.4 compliant memory module temperature sensors" |
459 | depends on I2C | 459 | depends on I2C |
460 | help | 460 | help |
461 | If you say yes here you get support for Jedec JC42.4 compliant | 461 | If you say yes here, you get support for JEDEC JC42.4 compliant |
462 | temperature sensors. Support will include, but not be limited to, | 462 | temperature sensors, which are used on many DDR3 memory modules for |
463 | ADT7408, CAT34TS02,, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243, | 463 | mobile devices and servers. Support will include, but not be limited |
464 | MCP9843, SE97, SE98, STTS424, TSE2002B3, and TS3000B3. | 464 | to, ADT7408, CAT34TS02, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243, |
465 | MCP9843, SE97, SE98, STTS424(E), TSE2002B3, and TS3000B3. | ||
465 | 466 | ||
466 | This driver can also be built as a module. If so, the module | 467 | This driver can also be built as a module. If so, the module |
467 | will be called jc42. | 468 | will be called jc42. |
@@ -574,7 +575,7 @@ config SENSORS_LM85 | |||
574 | help | 575 | help |
575 | If you say yes here you get support for National Semiconductor LM85 | 576 | If you say yes here you get support for National Semiconductor LM85 |
576 | sensor chips and clones: ADM1027, ADT7463, ADT7468, EMC6D100, | 577 | sensor chips and clones: ADM1027, ADT7463, ADT7468, EMC6D100, |
577 | EMC6D101 and EMC6D102. | 578 | EMC6D101, EMC6D102, and EMC6D103. |
578 | 579 | ||
579 | This driver can also be built as a module. If so, the module | 580 | This driver can also be built as a module. If so, the module |
580 | will be called lm85. | 581 | will be called lm85. |
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c index 340fc78c8dde..934991237061 100644 --- a/drivers/hwmon/jc42.c +++ b/drivers/hwmon/jc42.c | |||
@@ -53,6 +53,8 @@ static const unsigned short normal_i2c[] = { | |||
53 | 53 | ||
54 | /* Configuration register defines */ | 54 | /* Configuration register defines */ |
55 | #define JC42_CFG_CRIT_ONLY (1 << 2) | 55 | #define JC42_CFG_CRIT_ONLY (1 << 2) |
56 | #define JC42_CFG_TCRIT_LOCK (1 << 6) | ||
57 | #define JC42_CFG_EVENT_LOCK (1 << 7) | ||
56 | #define JC42_CFG_SHUTDOWN (1 << 8) | 58 | #define JC42_CFG_SHUTDOWN (1 << 8) |
57 | #define JC42_CFG_HYST_SHIFT 9 | 59 | #define JC42_CFG_HYST_SHIFT 9 |
58 | #define JC42_CFG_HYST_MASK 0x03 | 60 | #define JC42_CFG_HYST_MASK 0x03 |
@@ -332,7 +334,7 @@ static ssize_t set_temp_crit_hyst(struct device *dev, | |||
332 | { | 334 | { |
333 | struct i2c_client *client = to_i2c_client(dev); | 335 | struct i2c_client *client = to_i2c_client(dev); |
334 | struct jc42_data *data = i2c_get_clientdata(client); | 336 | struct jc42_data *data = i2c_get_clientdata(client); |
335 | long val; | 337 | unsigned long val; |
336 | int diff, hyst; | 338 | int diff, hyst; |
337 | int err; | 339 | int err; |
338 | int ret = count; | 340 | int ret = count; |
@@ -380,14 +382,14 @@ static ssize_t show_alarm(struct device *dev, | |||
380 | 382 | ||
381 | static DEVICE_ATTR(temp1_input, S_IRUGO, | 383 | static DEVICE_ATTR(temp1_input, S_IRUGO, |
382 | show_temp_input, NULL); | 384 | show_temp_input, NULL); |
383 | static DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, | 385 | static DEVICE_ATTR(temp1_crit, S_IRUGO, |
384 | show_temp_crit, set_temp_crit); | 386 | show_temp_crit, set_temp_crit); |
385 | static DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, | 387 | static DEVICE_ATTR(temp1_min, S_IRUGO, |
386 | show_temp_min, set_temp_min); | 388 | show_temp_min, set_temp_min); |
387 | static DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, | 389 | static DEVICE_ATTR(temp1_max, S_IRUGO, |
388 | show_temp_max, set_temp_max); | 390 | show_temp_max, set_temp_max); |
389 | 391 | ||
390 | static DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, | 392 | static DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, |
391 | show_temp_crit_hyst, set_temp_crit_hyst); | 393 | show_temp_crit_hyst, set_temp_crit_hyst); |
392 | static DEVICE_ATTR(temp1_max_hyst, S_IRUGO, | 394 | static DEVICE_ATTR(temp1_max_hyst, S_IRUGO, |
393 | show_temp_max_hyst, NULL); | 395 | show_temp_max_hyst, NULL); |
@@ -412,8 +414,31 @@ static struct attribute *jc42_attributes[] = { | |||
412 | NULL | 414 | NULL |
413 | }; | 415 | }; |
414 | 416 | ||
417 | static mode_t jc42_attribute_mode(struct kobject *kobj, | ||
418 | struct attribute *attr, int index) | ||
419 | { | ||
420 | struct device *dev = container_of(kobj, struct device, kobj); | ||
421 | struct i2c_client *client = to_i2c_client(dev); | ||
422 | struct jc42_data *data = i2c_get_clientdata(client); | ||
423 | unsigned int config = data->config; | ||
424 | bool readonly; | ||
425 | |||
426 | if (attr == &dev_attr_temp1_crit.attr) | ||
427 | readonly = config & JC42_CFG_TCRIT_LOCK; | ||
428 | else if (attr == &dev_attr_temp1_min.attr || | ||
429 | attr == &dev_attr_temp1_max.attr) | ||
430 | readonly = config & JC42_CFG_EVENT_LOCK; | ||
431 | else if (attr == &dev_attr_temp1_crit_hyst.attr) | ||
432 | readonly = config & (JC42_CFG_EVENT_LOCK | JC42_CFG_TCRIT_LOCK); | ||
433 | else | ||
434 | readonly = true; | ||
435 | |||
436 | return S_IRUGO | (readonly ? 0 : S_IWUSR); | ||
437 | } | ||
438 | |||
415 | static const struct attribute_group jc42_group = { | 439 | static const struct attribute_group jc42_group = { |
416 | .attrs = jc42_attributes, | 440 | .attrs = jc42_attributes, |
441 | .is_visible = jc42_attribute_mode, | ||
417 | }; | 442 | }; |
418 | 443 | ||
419 | /* Return 0 if detection is successful, -ENODEV otherwise */ | 444 | /* Return 0 if detection is successful, -ENODEV otherwise */ |
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index da5a2404cd3e..82bf65aa2968 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * k10temp.c - AMD Family 10h/11h processor hardware monitoring | 2 | * k10temp.c - AMD Family 10h/11h/12h/14h processor hardware monitoring |
3 | * | 3 | * |
4 | * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de> | 4 | * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de> |
5 | * | 5 | * |
@@ -25,7 +25,7 @@ | |||
25 | #include <linux/pci.h> | 25 | #include <linux/pci.h> |
26 | #include <asm/processor.h> | 26 | #include <asm/processor.h> |
27 | 27 | ||
28 | MODULE_DESCRIPTION("AMD Family 10h/11h CPU core temperature monitor"); | 28 | MODULE_DESCRIPTION("AMD Family 10h/11h/12h/14h CPU core temperature monitor"); |
29 | MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>"); | 29 | MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>"); |
30 | MODULE_LICENSE("GPL"); | 30 | MODULE_LICENSE("GPL"); |
31 | 31 | ||
@@ -208,6 +208,7 @@ static void __devexit k10temp_remove(struct pci_dev *pdev) | |||
208 | static const struct pci_device_id k10temp_id_table[] = { | 208 | static const struct pci_device_id k10temp_id_table[] = { |
209 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, | 209 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, |
210 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) }, | 210 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) }, |
211 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, | ||
211 | {} | 212 | {} |
212 | }; | 213 | }; |
213 | MODULE_DEVICE_TABLE(pci, k10temp_id_table); | 214 | MODULE_DEVICE_TABLE(pci, k10temp_id_table); |
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c index 1e229847f37a..d2cc28660816 100644 --- a/drivers/hwmon/lm85.c +++ b/drivers/hwmon/lm85.c | |||
@@ -41,7 +41,7 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; | |||
41 | enum chips { | 41 | enum chips { |
42 | any_chip, lm85b, lm85c, | 42 | any_chip, lm85b, lm85c, |
43 | adm1027, adt7463, adt7468, | 43 | adm1027, adt7463, adt7468, |
44 | emc6d100, emc6d102 | 44 | emc6d100, emc6d102, emc6d103 |
45 | }; | 45 | }; |
46 | 46 | ||
47 | /* The LM85 registers */ | 47 | /* The LM85 registers */ |
@@ -90,6 +90,9 @@ enum chips { | |||
90 | #define LM85_VERSTEP_EMC6D100_A0 0x60 | 90 | #define LM85_VERSTEP_EMC6D100_A0 0x60 |
91 | #define LM85_VERSTEP_EMC6D100_A1 0x61 | 91 | #define LM85_VERSTEP_EMC6D100_A1 0x61 |
92 | #define LM85_VERSTEP_EMC6D102 0x65 | 92 | #define LM85_VERSTEP_EMC6D102 0x65 |
93 | #define LM85_VERSTEP_EMC6D103_A0 0x68 | ||
94 | #define LM85_VERSTEP_EMC6D103_A1 0x69 | ||
95 | #define LM85_VERSTEP_EMC6D103S 0x6A /* Also known as EMC6D103:A2 */ | ||
93 | 96 | ||
94 | #define LM85_REG_CONFIG 0x40 | 97 | #define LM85_REG_CONFIG 0x40 |
95 | 98 | ||
@@ -348,6 +351,7 @@ static const struct i2c_device_id lm85_id[] = { | |||
348 | { "emc6d100", emc6d100 }, | 351 | { "emc6d100", emc6d100 }, |
349 | { "emc6d101", emc6d100 }, | 352 | { "emc6d101", emc6d100 }, |
350 | { "emc6d102", emc6d102 }, | 353 | { "emc6d102", emc6d102 }, |
354 | { "emc6d103", emc6d103 }, | ||
351 | { } | 355 | { } |
352 | }; | 356 | }; |
353 | MODULE_DEVICE_TABLE(i2c, lm85_id); | 357 | MODULE_DEVICE_TABLE(i2c, lm85_id); |
@@ -1250,6 +1254,20 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info) | |||
1250 | case LM85_VERSTEP_EMC6D102: | 1254 | case LM85_VERSTEP_EMC6D102: |
1251 | type_name = "emc6d102"; | 1255 | type_name = "emc6d102"; |
1252 | break; | 1256 | break; |
1257 | case LM85_VERSTEP_EMC6D103_A0: | ||
1258 | case LM85_VERSTEP_EMC6D103_A1: | ||
1259 | type_name = "emc6d103"; | ||
1260 | break; | ||
1261 | /* | ||
1262 | * Registers apparently missing in EMC6D103S/EMC6D103:A2 | ||
1263 | * compared to EMC6D103:A0, EMC6D103:A1, and EMC6D102 | ||
1264 | * (according to the data sheets), but used unconditionally | ||
1265 | * in the driver: 62[5:7], 6D[0:7], and 6E[0:7]. | ||
1266 | * So skip EMC6D103S for now. | ||
1267 | case LM85_VERSTEP_EMC6D103S: | ||
1268 | type_name = "emc6d103s"; | ||
1269 | break; | ||
1270 | */ | ||
1253 | } | 1271 | } |
1254 | } else { | 1272 | } else { |
1255 | dev_dbg(&adapter->dev, | 1273 | dev_dbg(&adapter->dev, |
@@ -1283,6 +1301,7 @@ static int lm85_probe(struct i2c_client *client, | |||
1283 | case adt7468: | 1301 | case adt7468: |
1284 | case emc6d100: | 1302 | case emc6d100: |
1285 | case emc6d102: | 1303 | case emc6d102: |
1304 | case emc6d103: | ||
1286 | data->freq_map = adm1027_freq_map; | 1305 | data->freq_map = adm1027_freq_map; |
1287 | break; | 1306 | break; |
1288 | default: | 1307 | default: |
@@ -1468,7 +1487,7 @@ static struct lm85_data *lm85_update_device(struct device *dev) | |||
1468 | /* More alarm bits */ | 1487 | /* More alarm bits */ |
1469 | data->alarms |= lm85_read_value(client, | 1488 | data->alarms |= lm85_read_value(client, |
1470 | EMC6D100_REG_ALARM3) << 16; | 1489 | EMC6D100_REG_ALARM3) << 16; |
1471 | } else if (data->type == emc6d102) { | 1490 | } else if (data->type == emc6d102 || data->type == emc6d103) { |
1472 | /* Have to read LSB bits after the MSB ones because | 1491 | /* Have to read LSB bits after the MSB ones because |
1473 | the reading of the MSB bits has frozen the | 1492 | the reading of the MSB bits has frozen the |
1474 | LSBs (backward from the ADM1027). | 1493 | LSBs (backward from the ADM1027). |
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 8b606fd64022..08c194861af5 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c | |||
@@ -2610,9 +2610,11 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) | |||
2610 | netif_carrier_on(nesvnic->netdev); | 2610 | netif_carrier_on(nesvnic->netdev); |
2611 | 2611 | ||
2612 | spin_lock(&nesvnic->port_ibevent_lock); | 2612 | spin_lock(&nesvnic->port_ibevent_lock); |
2613 | if (nesdev->iw_status == 0) { | 2613 | if (nesvnic->of_device_registered) { |
2614 | nesdev->iw_status = 1; | 2614 | if (nesdev->iw_status == 0) { |
2615 | nes_port_ibevent(nesvnic); | 2615 | nesdev->iw_status = 1; |
2616 | nes_port_ibevent(nesvnic); | ||
2617 | } | ||
2616 | } | 2618 | } |
2617 | spin_unlock(&nesvnic->port_ibevent_lock); | 2619 | spin_unlock(&nesvnic->port_ibevent_lock); |
2618 | } | 2620 | } |
@@ -2642,9 +2644,11 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) | |||
2642 | netif_carrier_off(nesvnic->netdev); | 2644 | netif_carrier_off(nesvnic->netdev); |
2643 | 2645 | ||
2644 | spin_lock(&nesvnic->port_ibevent_lock); | 2646 | spin_lock(&nesvnic->port_ibevent_lock); |
2645 | if (nesdev->iw_status == 1) { | 2647 | if (nesvnic->of_device_registered) { |
2646 | nesdev->iw_status = 0; | 2648 | if (nesdev->iw_status == 1) { |
2647 | nes_port_ibevent(nesvnic); | 2649 | nesdev->iw_status = 0; |
2650 | nes_port_ibevent(nesvnic); | ||
2651 | } | ||
2648 | } | 2652 | } |
2649 | spin_unlock(&nesvnic->port_ibevent_lock); | 2653 | spin_unlock(&nesvnic->port_ibevent_lock); |
2650 | } | 2654 | } |
@@ -2703,9 +2707,11 @@ void nes_recheck_link_status(struct work_struct *work) | |||
2703 | netif_carrier_on(nesvnic->netdev); | 2707 | netif_carrier_on(nesvnic->netdev); |
2704 | 2708 | ||
2705 | spin_lock(&nesvnic->port_ibevent_lock); | 2709 | spin_lock(&nesvnic->port_ibevent_lock); |
2706 | if (nesdev->iw_status == 0) { | 2710 | if (nesvnic->of_device_registered) { |
2707 | nesdev->iw_status = 1; | 2711 | if (nesdev->iw_status == 0) { |
2708 | nes_port_ibevent(nesvnic); | 2712 | nesdev->iw_status = 1; |
2713 | nes_port_ibevent(nesvnic); | ||
2714 | } | ||
2709 | } | 2715 | } |
2710 | spin_unlock(&nesvnic->port_ibevent_lock); | 2716 | spin_unlock(&nesvnic->port_ibevent_lock); |
2711 | } | 2717 | } |
@@ -2723,9 +2729,11 @@ void nes_recheck_link_status(struct work_struct *work) | |||
2723 | netif_carrier_off(nesvnic->netdev); | 2729 | netif_carrier_off(nesvnic->netdev); |
2724 | 2730 | ||
2725 | spin_lock(&nesvnic->port_ibevent_lock); | 2731 | spin_lock(&nesvnic->port_ibevent_lock); |
2726 | if (nesdev->iw_status == 1) { | 2732 | if (nesvnic->of_device_registered) { |
2727 | nesdev->iw_status = 0; | 2733 | if (nesdev->iw_status == 1) { |
2728 | nes_port_ibevent(nesvnic); | 2734 | nesdev->iw_status = 0; |
2735 | nes_port_ibevent(nesvnic); | ||
2736 | } | ||
2729 | } | 2737 | } |
2730 | spin_unlock(&nesvnic->port_ibevent_lock); | 2738 | spin_unlock(&nesvnic->port_ibevent_lock); |
2731 | } | 2739 | } |
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index 8245237b67ce..eca0c41f1226 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c | |||
@@ -1005,7 +1005,8 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr) | |||
1005 | * there are still requests that haven't been acked. | 1005 | * there are still requests that haven't been acked. |
1006 | */ | 1006 | */ |
1007 | if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail && | 1007 | if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail && |
1008 | !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN))) | 1008 | !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)) && |
1009 | (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) | ||
1009 | start_timer(qp); | 1010 | start_timer(qp); |
1010 | 1011 | ||
1011 | while (qp->s_last != qp->s_acked) { | 1012 | while (qp->s_last != qp->s_acked) { |
@@ -1439,6 +1440,8 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp, | |||
1439 | } | 1440 | } |
1440 | 1441 | ||
1441 | spin_lock_irqsave(&qp->s_lock, flags); | 1442 | spin_lock_irqsave(&qp->s_lock, flags); |
1443 | if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) | ||
1444 | goto ack_done; | ||
1442 | 1445 | ||
1443 | /* Ignore invalid responses. */ | 1446 | /* Ignore invalid responses. */ |
1444 | if (qib_cmp24(psn, qp->s_next_psn) >= 0) | 1447 | if (qib_cmp24(psn, qp->s_next_psn) >= 0) |
diff --git a/drivers/isdn/hisax/isdnl2.c b/drivers/isdn/hisax/isdnl2.c index 0858791978d8..cfff0c41d298 100644 --- a/drivers/isdn/hisax/isdnl2.c +++ b/drivers/isdn/hisax/isdnl2.c | |||
@@ -1247,10 +1247,10 @@ static void | |||
1247 | l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) | 1247 | l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) |
1248 | { | 1248 | { |
1249 | struct PStack *st = fi->userdata; | 1249 | struct PStack *st = fi->userdata; |
1250 | struct sk_buff *skb, *oskb; | 1250 | struct sk_buff *skb; |
1251 | struct Layer2 *l2 = &st->l2; | 1251 | struct Layer2 *l2 = &st->l2; |
1252 | u_char header[MAX_HEADER_LEN]; | 1252 | u_char header[MAX_HEADER_LEN]; |
1253 | int i; | 1253 | int i, hdr_space_needed; |
1254 | int unsigned p1; | 1254 | int unsigned p1; |
1255 | u_long flags; | 1255 | u_long flags; |
1256 | 1256 | ||
@@ -1261,6 +1261,16 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) | |||
1261 | if (!skb) | 1261 | if (!skb) |
1262 | return; | 1262 | return; |
1263 | 1263 | ||
1264 | hdr_space_needed = l2headersize(l2, 0); | ||
1265 | if (hdr_space_needed > skb_headroom(skb)) { | ||
1266 | struct sk_buff *orig_skb = skb; | ||
1267 | |||
1268 | skb = skb_realloc_headroom(skb, hdr_space_needed); | ||
1269 | if (!skb) { | ||
1270 | dev_kfree_skb(orig_skb); | ||
1271 | return; | ||
1272 | } | ||
1273 | } | ||
1264 | spin_lock_irqsave(&l2->lock, flags); | 1274 | spin_lock_irqsave(&l2->lock, flags); |
1265 | if(test_bit(FLG_MOD128, &l2->flag)) | 1275 | if(test_bit(FLG_MOD128, &l2->flag)) |
1266 | p1 = (l2->vs - l2->va) % 128; | 1276 | p1 = (l2->vs - l2->va) % 128; |
@@ -1285,19 +1295,7 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) | |||
1285 | l2->vs = (l2->vs + 1) % 8; | 1295 | l2->vs = (l2->vs + 1) % 8; |
1286 | } | 1296 | } |
1287 | spin_unlock_irqrestore(&l2->lock, flags); | 1297 | spin_unlock_irqrestore(&l2->lock, flags); |
1288 | p1 = skb->data - skb->head; | 1298 | memcpy(skb_push(skb, i), header, i); |
1289 | if (p1 >= i) | ||
1290 | memcpy(skb_push(skb, i), header, i); | ||
1291 | else { | ||
1292 | printk(KERN_WARNING | ||
1293 | "isdl2 pull_iqueue skb header(%d/%d) too short\n", i, p1); | ||
1294 | oskb = skb; | ||
1295 | skb = alloc_skb(oskb->len + i, GFP_ATOMIC); | ||
1296 | memcpy(skb_put(skb, i), header, i); | ||
1297 | skb_copy_from_linear_data(oskb, | ||
1298 | skb_put(skb, oskb->len), oskb->len); | ||
1299 | dev_kfree_skb(oskb); | ||
1300 | } | ||
1301 | st->l2.l2l1(st, PH_PULL | INDICATION, skb); | 1299 | st->l2.l2l1(st, PH_PULL | INDICATION, skb); |
1302 | test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag); | 1300 | test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag); |
1303 | if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) { | 1301 | if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) { |
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c index e9a3eab7b0cf..8c1d85e27be4 100644 --- a/drivers/memstick/core/memstick.c +++ b/drivers/memstick/core/memstick.c | |||
@@ -621,7 +621,7 @@ static int __init memstick_init(void) | |||
621 | { | 621 | { |
622 | int rc; | 622 | int rc; |
623 | 623 | ||
624 | workqueue = create_freezeable_workqueue("kmemstick"); | 624 | workqueue = create_freezable_workqueue("kmemstick"); |
625 | if (!workqueue) | 625 | if (!workqueue) |
626 | return -ENOMEM; | 626 | return -ENOMEM; |
627 | 627 | ||
diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c index 5f6852dff40b..44d4475a09dd 100644 --- a/drivers/misc/tifm_core.c +++ b/drivers/misc/tifm_core.c | |||
@@ -329,7 +329,7 @@ static int __init tifm_init(void) | |||
329 | { | 329 | { |
330 | int rc; | 330 | int rc; |
331 | 331 | ||
332 | workqueue = create_freezeable_workqueue("tifm"); | 332 | workqueue = create_freezable_workqueue("tifm"); |
333 | if (!workqueue) | 333 | if (!workqueue) |
334 | return -ENOMEM; | 334 | return -ENOMEM; |
335 | 335 | ||
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index 4d2ea8e80140..6df5a55da110 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c | |||
@@ -785,7 +785,7 @@ static int __init vmballoon_init(void) | |||
785 | if (x86_hyper != &x86_hyper_vmware) | 785 | if (x86_hyper != &x86_hyper_vmware) |
786 | return -ENODEV; | 786 | return -ENODEV; |
787 | 787 | ||
788 | vmballoon_wq = create_freezeable_workqueue("vmmemctl"); | 788 | vmballoon_wq = create_freezable_workqueue("vmmemctl"); |
789 | if (!vmballoon_wq) { | 789 | if (!vmballoon_wq) { |
790 | pr_err("failed to create workqueue\n"); | 790 | pr_err("failed to create workqueue\n"); |
791 | return -ENOMEM; | 791 | return -ENOMEM; |
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c index d9d7efbc77cc..6322d1fb5d62 100644 --- a/drivers/mtd/nand/r852.c +++ b/drivers/mtd/nand/r852.c | |||
@@ -930,7 +930,7 @@ int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) | |||
930 | 930 | ||
931 | init_completion(&dev->dma_done); | 931 | init_completion(&dev->dma_done); |
932 | 932 | ||
933 | dev->card_workqueue = create_freezeable_workqueue(DRV_NAME); | 933 | dev->card_workqueue = create_freezable_workqueue(DRV_NAME); |
934 | 934 | ||
935 | if (!dev->card_workqueue) | 935 | if (!dev->card_workqueue) |
936 | goto error9; | 936 | goto error9; |
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c index 67822cf6c025..ac0d6a8613b5 100644 --- a/drivers/mtd/sm_ftl.c +++ b/drivers/mtd/sm_ftl.c | |||
@@ -1258,7 +1258,7 @@ static struct mtd_blktrans_ops sm_ftl_ops = { | |||
1258 | static __init int sm_module_init(void) | 1258 | static __init int sm_module_init(void) |
1259 | { | 1259 | { |
1260 | int error = 0; | 1260 | int error = 0; |
1261 | cache_flush_workqueue = create_freezeable_workqueue("smflush"); | 1261 | cache_flush_workqueue = create_freezable_workqueue("smflush"); |
1262 | 1262 | ||
1263 | if (IS_ERR(cache_flush_workqueue)) | 1263 | if (IS_ERR(cache_flush_workqueue)) |
1264 | return PTR_ERR(cache_flush_workqueue); | 1264 | return PTR_ERR(cache_flush_workqueue); |
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c index 7ab534aee452..7513c4523ac4 100644 --- a/drivers/net/can/mcp251x.c +++ b/drivers/net/can/mcp251x.c | |||
@@ -940,7 +940,7 @@ static int mcp251x_open(struct net_device *net) | |||
940 | goto open_unlock; | 940 | goto open_unlock; |
941 | } | 941 | } |
942 | 942 | ||
943 | priv->wq = create_freezeable_workqueue("mcp251x_wq"); | 943 | priv->wq = create_freezable_workqueue("mcp251x_wq"); |
944 | INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler); | 944 | INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler); |
945 | INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler); | 945 | INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler); |
946 | 946 | ||
diff --git a/drivers/net/can/softing/Kconfig b/drivers/net/can/softing/Kconfig index 8ba81b3ddd90..5de46a9a77bb 100644 --- a/drivers/net/can/softing/Kconfig +++ b/drivers/net/can/softing/Kconfig | |||
@@ -18,7 +18,7 @@ config CAN_SOFTING | |||
18 | config CAN_SOFTING_CS | 18 | config CAN_SOFTING_CS |
19 | tristate "Softing Gmbh CAN pcmcia cards" | 19 | tristate "Softing Gmbh CAN pcmcia cards" |
20 | depends on PCMCIA | 20 | depends on PCMCIA |
21 | select CAN_SOFTING | 21 | depends on CAN_SOFTING |
22 | ---help--- | 22 | ---help--- |
23 | Support for PCMCIA cards from Softing Gmbh & some cards | 23 | Support for PCMCIA cards from Softing Gmbh & some cards |
24 | from Vector Gmbh. | 24 | from Vector Gmbh. |
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c index 56166ae2059f..6aad64df4dcb 100644 --- a/drivers/net/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/cxgb4vf/cxgb4vf_main.c | |||
@@ -2040,7 +2040,7 @@ static int __devinit setup_debugfs(struct adapter *adapter) | |||
2040 | { | 2040 | { |
2041 | int i; | 2041 | int i; |
2042 | 2042 | ||
2043 | BUG_ON(adapter->debugfs_root == NULL); | 2043 | BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root)); |
2044 | 2044 | ||
2045 | /* | 2045 | /* |
2046 | * Debugfs support is best effort. | 2046 | * Debugfs support is best effort. |
@@ -2061,7 +2061,7 @@ static int __devinit setup_debugfs(struct adapter *adapter) | |||
2061 | */ | 2061 | */ |
2062 | static void cleanup_debugfs(struct adapter *adapter) | 2062 | static void cleanup_debugfs(struct adapter *adapter) |
2063 | { | 2063 | { |
2064 | BUG_ON(adapter->debugfs_root == NULL); | 2064 | BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root)); |
2065 | 2065 | ||
2066 | /* | 2066 | /* |
2067 | * Unlike our sister routine cleanup_proc(), we don't need to remove | 2067 | * Unlike our sister routine cleanup_proc(), we don't need to remove |
@@ -2489,17 +2489,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, | |||
2489 | struct net_device *netdev; | 2489 | struct net_device *netdev; |
2490 | 2490 | ||
2491 | /* | 2491 | /* |
2492 | * Vet our module parameters. | ||
2493 | */ | ||
2494 | if (msi != MSI_MSIX && msi != MSI_MSI) { | ||
2495 | dev_err(&pdev->dev, "bad module parameter msi=%d; must be %d" | ||
2496 | " (MSI-X or MSI) or %d (MSI)\n", msi, MSI_MSIX, | ||
2497 | MSI_MSI); | ||
2498 | err = -EINVAL; | ||
2499 | goto err_out; | ||
2500 | } | ||
2501 | |||
2502 | /* | ||
2503 | * Print our driver banner the first time we're called to initialize a | 2492 | * Print our driver banner the first time we're called to initialize a |
2504 | * device. | 2493 | * device. |
2505 | */ | 2494 | */ |
@@ -2711,11 +2700,11 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, | |||
2711 | /* | 2700 | /* |
2712 | * Set up our debugfs entries. | 2701 | * Set up our debugfs entries. |
2713 | */ | 2702 | */ |
2714 | if (cxgb4vf_debugfs_root) { | 2703 | if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) { |
2715 | adapter->debugfs_root = | 2704 | adapter->debugfs_root = |
2716 | debugfs_create_dir(pci_name(pdev), | 2705 | debugfs_create_dir(pci_name(pdev), |
2717 | cxgb4vf_debugfs_root); | 2706 | cxgb4vf_debugfs_root); |
2718 | if (adapter->debugfs_root == NULL) | 2707 | if (IS_ERR_OR_NULL(adapter->debugfs_root)) |
2719 | dev_warn(&pdev->dev, "could not create debugfs" | 2708 | dev_warn(&pdev->dev, "could not create debugfs" |
2720 | " directory"); | 2709 | " directory"); |
2721 | else | 2710 | else |
@@ -2770,7 +2759,7 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, | |||
2770 | */ | 2759 | */ |
2771 | 2760 | ||
2772 | err_free_debugfs: | 2761 | err_free_debugfs: |
2773 | if (adapter->debugfs_root) { | 2762 | if (!IS_ERR_OR_NULL(adapter->debugfs_root)) { |
2774 | cleanup_debugfs(adapter); | 2763 | cleanup_debugfs(adapter); |
2775 | debugfs_remove_recursive(adapter->debugfs_root); | 2764 | debugfs_remove_recursive(adapter->debugfs_root); |
2776 | } | 2765 | } |
@@ -2802,7 +2791,6 @@ err_release_regions: | |||
2802 | err_disable_device: | 2791 | err_disable_device: |
2803 | pci_disable_device(pdev); | 2792 | pci_disable_device(pdev); |
2804 | 2793 | ||
2805 | err_out: | ||
2806 | return err; | 2794 | return err; |
2807 | } | 2795 | } |
2808 | 2796 | ||
@@ -2840,7 +2828,7 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev) | |||
2840 | /* | 2828 | /* |
2841 | * Tear down our debugfs entries. | 2829 | * Tear down our debugfs entries. |
2842 | */ | 2830 | */ |
2843 | if (adapter->debugfs_root) { | 2831 | if (!IS_ERR_OR_NULL(adapter->debugfs_root)) { |
2844 | cleanup_debugfs(adapter); | 2832 | cleanup_debugfs(adapter); |
2845 | debugfs_remove_recursive(adapter->debugfs_root); | 2833 | debugfs_remove_recursive(adapter->debugfs_root); |
2846 | } | 2834 | } |
@@ -2874,6 +2862,46 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev) | |||
2874 | } | 2862 | } |
2875 | 2863 | ||
2876 | /* | 2864 | /* |
2865 | * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt | ||
2866 | * delivery. | ||
2867 | */ | ||
2868 | static void __devexit cxgb4vf_pci_shutdown(struct pci_dev *pdev) | ||
2869 | { | ||
2870 | struct adapter *adapter; | ||
2871 | int pidx; | ||
2872 | |||
2873 | adapter = pci_get_drvdata(pdev); | ||
2874 | if (!adapter) | ||
2875 | return; | ||
2876 | |||
2877 | /* | ||
2878 | * Disable all Virtual Interfaces. This will shut down the | ||
2879 | * delivery of all ingress packets into the chip for these | ||
2880 | * Virtual Interfaces. | ||
2881 | */ | ||
2882 | for_each_port(adapter, pidx) { | ||
2883 | struct net_device *netdev; | ||
2884 | struct port_info *pi; | ||
2885 | |||
2886 | if (!test_bit(pidx, &adapter->registered_device_map)) | ||
2887 | continue; | ||
2888 | |||
2889 | netdev = adapter->port[pidx]; | ||
2890 | if (!netdev) | ||
2891 | continue; | ||
2892 | |||
2893 | pi = netdev_priv(netdev); | ||
2894 | t4vf_enable_vi(adapter, pi->viid, false, false); | ||
2895 | } | ||
2896 | |||
2897 | /* | ||
2898 | * Free up all Queues which will prevent further DMA and | ||
2899 | * Interrupts allowing various internal pathways to drain. | ||
2900 | */ | ||
2901 | t4vf_free_sge_resources(adapter); | ||
2902 | } | ||
2903 | |||
2904 | /* | ||
2877 | * PCI Device registration data structures. | 2905 | * PCI Device registration data structures. |
2878 | */ | 2906 | */ |
2879 | #define CH_DEVICE(devid, idx) \ | 2907 | #define CH_DEVICE(devid, idx) \ |
@@ -2906,6 +2934,7 @@ static struct pci_driver cxgb4vf_driver = { | |||
2906 | .id_table = cxgb4vf_pci_tbl, | 2934 | .id_table = cxgb4vf_pci_tbl, |
2907 | .probe = cxgb4vf_pci_probe, | 2935 | .probe = cxgb4vf_pci_probe, |
2908 | .remove = __devexit_p(cxgb4vf_pci_remove), | 2936 | .remove = __devexit_p(cxgb4vf_pci_remove), |
2937 | .shutdown = __devexit_p(cxgb4vf_pci_shutdown), | ||
2909 | }; | 2938 | }; |
2910 | 2939 | ||
2911 | /* | 2940 | /* |
@@ -2915,14 +2944,25 @@ static int __init cxgb4vf_module_init(void) | |||
2915 | { | 2944 | { |
2916 | int ret; | 2945 | int ret; |
2917 | 2946 | ||
2947 | /* | ||
2948 | * Vet our module parameters. | ||
2949 | */ | ||
2950 | if (msi != MSI_MSIX && msi != MSI_MSI) { | ||
2951 | printk(KERN_WARNING KBUILD_MODNAME | ||
2952 | ": bad module parameter msi=%d; must be %d" | ||
2953 | " (MSI-X or MSI) or %d (MSI)\n", | ||
2954 | msi, MSI_MSIX, MSI_MSI); | ||
2955 | return -EINVAL; | ||
2956 | } | ||
2957 | |||
2918 | /* Debugfs support is optional, just warn if this fails */ | 2958 | /* Debugfs support is optional, just warn if this fails */ |
2919 | cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); | 2959 | cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); |
2920 | if (!cxgb4vf_debugfs_root) | 2960 | if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) |
2921 | printk(KERN_WARNING KBUILD_MODNAME ": could not create" | 2961 | printk(KERN_WARNING KBUILD_MODNAME ": could not create" |
2922 | " debugfs entry, continuing\n"); | 2962 | " debugfs entry, continuing\n"); |
2923 | 2963 | ||
2924 | ret = pci_register_driver(&cxgb4vf_driver); | 2964 | ret = pci_register_driver(&cxgb4vf_driver); |
2925 | if (ret < 0) | 2965 | if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) |
2926 | debugfs_remove(cxgb4vf_debugfs_root); | 2966 | debugfs_remove(cxgb4vf_debugfs_root); |
2927 | return ret; | 2967 | return ret; |
2928 | } | 2968 | } |
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c index 0f51c80475ce..192db226ec7f 100644 --- a/drivers/net/cxgb4vf/t4vf_hw.c +++ b/drivers/net/cxgb4vf/t4vf_hw.c | |||
@@ -171,7 +171,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, | |||
171 | delay_idx = 0; | 171 | delay_idx = 0; |
172 | ms = delay[0]; | 172 | ms = delay[0]; |
173 | 173 | ||
174 | for (i = 0; i < 500; i += ms) { | 174 | for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { |
175 | if (sleep_ok) { | 175 | if (sleep_ok) { |
176 | ms = delay[delay_idx]; | 176 | ms = delay[delay_idx]; |
177 | if (delay_idx < ARRAY_SIZE(delay) - 1) | 177 | if (delay_idx < ARRAY_SIZE(delay) - 1) |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 3065870cf2a7..3fa110ddb041 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -937,6 +937,9 @@ static void e1000_print_hw_hang(struct work_struct *work) | |||
937 | u16 phy_status, phy_1000t_status, phy_ext_status; | 937 | u16 phy_status, phy_1000t_status, phy_ext_status; |
938 | u16 pci_status; | 938 | u16 pci_status; |
939 | 939 | ||
940 | if (test_bit(__E1000_DOWN, &adapter->state)) | ||
941 | return; | ||
942 | |||
940 | e1e_rphy(hw, PHY_STATUS, &phy_status); | 943 | e1e_rphy(hw, PHY_STATUS, &phy_status); |
941 | e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); | 944 | e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); |
942 | e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); | 945 | e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); |
@@ -1506,6 +1509,9 @@ static void e1000e_downshift_workaround(struct work_struct *work) | |||
1506 | struct e1000_adapter *adapter = container_of(work, | 1509 | struct e1000_adapter *adapter = container_of(work, |
1507 | struct e1000_adapter, downshift_task); | 1510 | struct e1000_adapter, downshift_task); |
1508 | 1511 | ||
1512 | if (test_bit(__E1000_DOWN, &adapter->state)) | ||
1513 | return; | ||
1514 | |||
1509 | e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); | 1515 | e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); |
1510 | } | 1516 | } |
1511 | 1517 | ||
@@ -3338,6 +3344,21 @@ int e1000e_up(struct e1000_adapter *adapter) | |||
3338 | return 0; | 3344 | return 0; |
3339 | } | 3345 | } |
3340 | 3346 | ||
3347 | static void e1000e_flush_descriptors(struct e1000_adapter *adapter) | ||
3348 | { | ||
3349 | struct e1000_hw *hw = &adapter->hw; | ||
3350 | |||
3351 | if (!(adapter->flags2 & FLAG2_DMA_BURST)) | ||
3352 | return; | ||
3353 | |||
3354 | /* flush pending descriptor writebacks to memory */ | ||
3355 | ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); | ||
3356 | ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); | ||
3357 | |||
3358 | /* execute the writes immediately */ | ||
3359 | e1e_flush(); | ||
3360 | } | ||
3361 | |||
3341 | void e1000e_down(struct e1000_adapter *adapter) | 3362 | void e1000e_down(struct e1000_adapter *adapter) |
3342 | { | 3363 | { |
3343 | struct net_device *netdev = adapter->netdev; | 3364 | struct net_device *netdev = adapter->netdev; |
@@ -3377,6 +3398,9 @@ void e1000e_down(struct e1000_adapter *adapter) | |||
3377 | 3398 | ||
3378 | if (!pci_channel_offline(adapter->pdev)) | 3399 | if (!pci_channel_offline(adapter->pdev)) |
3379 | e1000e_reset(adapter); | 3400 | e1000e_reset(adapter); |
3401 | |||
3402 | e1000e_flush_descriptors(adapter); | ||
3403 | |||
3380 | e1000_clean_tx_ring(adapter); | 3404 | e1000_clean_tx_ring(adapter); |
3381 | e1000_clean_rx_ring(adapter); | 3405 | e1000_clean_rx_ring(adapter); |
3382 | 3406 | ||
@@ -3765,6 +3789,10 @@ static void e1000e_update_phy_task(struct work_struct *work) | |||
3765 | { | 3789 | { |
3766 | struct e1000_adapter *adapter = container_of(work, | 3790 | struct e1000_adapter *adapter = container_of(work, |
3767 | struct e1000_adapter, update_phy_task); | 3791 | struct e1000_adapter, update_phy_task); |
3792 | |||
3793 | if (test_bit(__E1000_DOWN, &adapter->state)) | ||
3794 | return; | ||
3795 | |||
3768 | e1000_get_phy_info(&adapter->hw); | 3796 | e1000_get_phy_info(&adapter->hw); |
3769 | } | 3797 | } |
3770 | 3798 | ||
@@ -3775,6 +3803,10 @@ static void e1000e_update_phy_task(struct work_struct *work) | |||
3775 | static void e1000_update_phy_info(unsigned long data) | 3803 | static void e1000_update_phy_info(unsigned long data) |
3776 | { | 3804 | { |
3777 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; | 3805 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; |
3806 | |||
3807 | if (test_bit(__E1000_DOWN, &adapter->state)) | ||
3808 | return; | ||
3809 | |||
3778 | schedule_work(&adapter->update_phy_task); | 3810 | schedule_work(&adapter->update_phy_task); |
3779 | } | 3811 | } |
3780 | 3812 | ||
@@ -4149,6 +4181,9 @@ static void e1000_watchdog_task(struct work_struct *work) | |||
4149 | u32 link, tctl; | 4181 | u32 link, tctl; |
4150 | int tx_pending = 0; | 4182 | int tx_pending = 0; |
4151 | 4183 | ||
4184 | if (test_bit(__E1000_DOWN, &adapter->state)) | ||
4185 | return; | ||
4186 | |||
4152 | link = e1000e_has_link(adapter); | 4187 | link = e1000e_has_link(adapter); |
4153 | if ((netif_carrier_ok(netdev)) && link) { | 4188 | if ((netif_carrier_ok(netdev)) && link) { |
4154 | /* Cancel scheduled suspend requests. */ | 4189 | /* Cancel scheduled suspend requests. */ |
@@ -4337,19 +4372,12 @@ link_up: | |||
4337 | else | 4372 | else |
4338 | ew32(ICS, E1000_ICS_RXDMT0); | 4373 | ew32(ICS, E1000_ICS_RXDMT0); |
4339 | 4374 | ||
4375 | /* flush pending descriptors to memory before detecting Tx hang */ | ||
4376 | e1000e_flush_descriptors(adapter); | ||
4377 | |||
4340 | /* Force detection of hung controller every watchdog period */ | 4378 | /* Force detection of hung controller every watchdog period */ |
4341 | adapter->detect_tx_hung = 1; | 4379 | adapter->detect_tx_hung = 1; |
4342 | 4380 | ||
4343 | /* flush partial descriptors to memory before detecting Tx hang */ | ||
4344 | if (adapter->flags2 & FLAG2_DMA_BURST) { | ||
4345 | ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); | ||
4346 | ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); | ||
4347 | /* | ||
4348 | * no need to flush the writes because the timeout code does | ||
4349 | * an er32 first thing | ||
4350 | */ | ||
4351 | } | ||
4352 | |||
4353 | /* | 4381 | /* |
4354 | * With 82571 controllers, LAA may be overwritten due to controller | 4382 | * With 82571 controllers, LAA may be overwritten due to controller |
4355 | * reset from the other port. Set the appropriate LAA in RAR[0] | 4383 | * reset from the other port. Set the appropriate LAA in RAR[0] |
@@ -4887,6 +4915,10 @@ static void e1000_reset_task(struct work_struct *work) | |||
4887 | struct e1000_adapter *adapter; | 4915 | struct e1000_adapter *adapter; |
4888 | adapter = container_of(work, struct e1000_adapter, reset_task); | 4916 | adapter = container_of(work, struct e1000_adapter, reset_task); |
4889 | 4917 | ||
4918 | /* don't run the task if already down */ | ||
4919 | if (test_bit(__E1000_DOWN, &adapter->state)) | ||
4920 | return; | ||
4921 | |||
4890 | if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) && | 4922 | if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) && |
4891 | (adapter->flags & FLAG_RX_RESTART_NOW))) { | 4923 | (adapter->flags & FLAG_RX_RESTART_NOW))) { |
4892 | e1000e_dump(adapter); | 4924 | e1000e_dump(adapter); |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index af09296ef0dd..9c0b1bac6af6 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -5645,6 +5645,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
5645 | goto out_error; | 5645 | goto out_error; |
5646 | } | 5646 | } |
5647 | 5647 | ||
5648 | netif_carrier_off(dev); | ||
5649 | |||
5648 | dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n", | 5650 | dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n", |
5649 | dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); | 5651 | dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); |
5650 | 5652 | ||
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c index 8753980668c7..c54a88274d51 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ixgbe/ixgbe_fcoe.c | |||
@@ -159,7 +159,7 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, | |||
159 | struct scatterlist *sg; | 159 | struct scatterlist *sg; |
160 | unsigned int i, j, dmacount; | 160 | unsigned int i, j, dmacount; |
161 | unsigned int len; | 161 | unsigned int len; |
162 | static const unsigned int bufflen = 4096; | 162 | static const unsigned int bufflen = IXGBE_FCBUFF_MIN; |
163 | unsigned int firstoff = 0; | 163 | unsigned int firstoff = 0; |
164 | unsigned int lastsize; | 164 | unsigned int lastsize; |
165 | unsigned int thisoff = 0; | 165 | unsigned int thisoff = 0; |
@@ -254,6 +254,24 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, | |||
254 | /* only the last buffer may have non-full bufflen */ | 254 | /* only the last buffer may have non-full bufflen */ |
255 | lastsize = thisoff + thislen; | 255 | lastsize = thisoff + thislen; |
256 | 256 | ||
257 | /* | ||
258 | * lastsize can not be buffer len. | ||
259 | * If it is then adding another buffer with lastsize = 1. | ||
260 | */ | ||
261 | if (lastsize == bufflen) { | ||
262 | if (j >= IXGBE_BUFFCNT_MAX) { | ||
263 | e_err(drv, "xid=%x:%d,%d,%d:addr=%llx " | ||
264 | "not enough user buffers. We need an extra " | ||
265 | "buffer because lastsize is bufflen.\n", | ||
266 | xid, i, j, dmacount, (u64)addr); | ||
267 | goto out_noddp_free; | ||
268 | } | ||
269 | |||
270 | ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma); | ||
271 | j++; | ||
272 | lastsize = 1; | ||
273 | } | ||
274 | |||
257 | fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); | 275 | fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); |
258 | fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); | 276 | fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); |
259 | fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); | 277 | fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); |
@@ -532,6 +550,24 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) | |||
532 | e_err(drv, "failed to allocated FCoE DDP pool\n"); | 550 | e_err(drv, "failed to allocated FCoE DDP pool\n"); |
533 | 551 | ||
534 | spin_lock_init(&fcoe->lock); | 552 | spin_lock_init(&fcoe->lock); |
553 | |||
554 | /* Extra buffer to be shared by all DDPs for HW work around */ | ||
555 | fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); | ||
556 | if (fcoe->extra_ddp_buffer == NULL) { | ||
557 | e_err(drv, "failed to allocated extra DDP buffer\n"); | ||
558 | goto out_extra_ddp_buffer_alloc; | ||
559 | } | ||
560 | |||
561 | fcoe->extra_ddp_buffer_dma = | ||
562 | dma_map_single(&adapter->pdev->dev, | ||
563 | fcoe->extra_ddp_buffer, | ||
564 | IXGBE_FCBUFF_MIN, | ||
565 | DMA_FROM_DEVICE); | ||
566 | if (dma_mapping_error(&adapter->pdev->dev, | ||
567 | fcoe->extra_ddp_buffer_dma)) { | ||
568 | e_err(drv, "failed to map extra DDP buffer\n"); | ||
569 | goto out_extra_ddp_buffer_dma; | ||
570 | } | ||
535 | } | 571 | } |
536 | 572 | ||
537 | /* Enable L2 eth type filter for FCoE */ | 573 | /* Enable L2 eth type filter for FCoE */ |
@@ -581,6 +617,14 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) | |||
581 | } | 617 | } |
582 | } | 618 | } |
583 | #endif | 619 | #endif |
620 | |||
621 | return; | ||
622 | |||
623 | out_extra_ddp_buffer_dma: | ||
624 | kfree(fcoe->extra_ddp_buffer); | ||
625 | out_extra_ddp_buffer_alloc: | ||
626 | pci_pool_destroy(fcoe->pool); | ||
627 | fcoe->pool = NULL; | ||
584 | } | 628 | } |
585 | 629 | ||
586 | /** | 630 | /** |
@@ -600,6 +644,11 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter) | |||
600 | if (fcoe->pool) { | 644 | if (fcoe->pool) { |
601 | for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) | 645 | for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) |
602 | ixgbe_fcoe_ddp_put(adapter->netdev, i); | 646 | ixgbe_fcoe_ddp_put(adapter->netdev, i); |
647 | dma_unmap_single(&adapter->pdev->dev, | ||
648 | fcoe->extra_ddp_buffer_dma, | ||
649 | IXGBE_FCBUFF_MIN, | ||
650 | DMA_FROM_DEVICE); | ||
651 | kfree(fcoe->extra_ddp_buffer); | ||
603 | pci_pool_destroy(fcoe->pool); | 652 | pci_pool_destroy(fcoe->pool); |
604 | fcoe->pool = NULL; | 653 | fcoe->pool = NULL; |
605 | } | 654 | } |
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h index 4bc2c551c8db..65cc8fb14fe7 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.h +++ b/drivers/net/ixgbe/ixgbe_fcoe.h | |||
@@ -70,6 +70,8 @@ struct ixgbe_fcoe { | |||
70 | spinlock_t lock; | 70 | spinlock_t lock; |
71 | struct pci_pool *pool; | 71 | struct pci_pool *pool; |
72 | struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; | 72 | struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; |
73 | unsigned char *extra_ddp_buffer; | ||
74 | dma_addr_t extra_ddp_buffer_dma; | ||
73 | }; | 75 | }; |
74 | 76 | ||
75 | #endif /* _IXGBE_FCOE_H */ | 77 | #endif /* _IXGBE_FCOE_H */ |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index fbae703b46d7..30f9ccfb4f87 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -3728,7 +3728,8 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) | |||
3728 | * We need to try and force an autonegotiation | 3728 | * We need to try and force an autonegotiation |
3729 | * session, then bring up link. | 3729 | * session, then bring up link. |
3730 | */ | 3730 | */ |
3731 | hw->mac.ops.setup_sfp(hw); | 3731 | if (hw->mac.ops.setup_sfp) |
3732 | hw->mac.ops.setup_sfp(hw); | ||
3732 | if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) | 3733 | if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) |
3733 | schedule_work(&adapter->multispeed_fiber_task); | 3734 | schedule_work(&adapter->multispeed_fiber_task); |
3734 | } else { | 3735 | } else { |
@@ -5968,7 +5969,8 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work) | |||
5968 | unregister_netdev(adapter->netdev); | 5969 | unregister_netdev(adapter->netdev); |
5969 | return; | 5970 | return; |
5970 | } | 5971 | } |
5971 | hw->mac.ops.setup_sfp(hw); | 5972 | if (hw->mac.ops.setup_sfp) |
5973 | hw->mac.ops.setup_sfp(hw); | ||
5972 | 5974 | ||
5973 | if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) | 5975 | if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) |
5974 | /* This will also work for DA Twinax connections */ | 5976 | /* This will also work for DA Twinax connections */ |
diff --git a/drivers/net/pch_gbe/pch_gbe.h b/drivers/net/pch_gbe/pch_gbe.h index a0c26a99520f..e1e33c80fb25 100644 --- a/drivers/net/pch_gbe/pch_gbe.h +++ b/drivers/net/pch_gbe/pch_gbe.h | |||
@@ -73,7 +73,7 @@ struct pch_gbe_regs { | |||
73 | struct pch_gbe_regs_mac_adr mac_adr[16]; | 73 | struct pch_gbe_regs_mac_adr mac_adr[16]; |
74 | u32 ADDR_MASK; | 74 | u32 ADDR_MASK; |
75 | u32 MIIM; | 75 | u32 MIIM; |
76 | u32 reserve2; | 76 | u32 MAC_ADDR_LOAD; |
77 | u32 RGMII_ST; | 77 | u32 RGMII_ST; |
78 | u32 RGMII_CTRL; | 78 | u32 RGMII_CTRL; |
79 | u32 reserve3[3]; | 79 | u32 reserve3[3]; |
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c index 4c9a7d4f3fca..b99e90aca37d 100644 --- a/drivers/net/pch_gbe/pch_gbe_main.c +++ b/drivers/net/pch_gbe/pch_gbe_main.c | |||
@@ -29,6 +29,7 @@ const char pch_driver_version[] = DRV_VERSION; | |||
29 | #define PCH_GBE_SHORT_PKT 64 | 29 | #define PCH_GBE_SHORT_PKT 64 |
30 | #define DSC_INIT16 0xC000 | 30 | #define DSC_INIT16 0xC000 |
31 | #define PCH_GBE_DMA_ALIGN 0 | 31 | #define PCH_GBE_DMA_ALIGN 0 |
32 | #define PCH_GBE_DMA_PADDING 2 | ||
32 | #define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */ | 33 | #define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */ |
33 | #define PCH_GBE_COPYBREAK_DEFAULT 256 | 34 | #define PCH_GBE_COPYBREAK_DEFAULT 256 |
34 | #define PCH_GBE_PCI_BAR 1 | 35 | #define PCH_GBE_PCI_BAR 1 |
@@ -88,6 +89,12 @@ static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; | |||
88 | static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg); | 89 | static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg); |
89 | static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg, | 90 | static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg, |
90 | int data); | 91 | int data); |
92 | |||
93 | inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw) | ||
94 | { | ||
95 | iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD); | ||
96 | } | ||
97 | |||
91 | /** | 98 | /** |
92 | * pch_gbe_mac_read_mac_addr - Read MAC address | 99 | * pch_gbe_mac_read_mac_addr - Read MAC address |
93 | * @hw: Pointer to the HW structure | 100 | * @hw: Pointer to the HW structure |
@@ -1365,16 +1372,13 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, | |||
1365 | struct pch_gbe_buffer *buffer_info; | 1372 | struct pch_gbe_buffer *buffer_info; |
1366 | struct pch_gbe_rx_desc *rx_desc; | 1373 | struct pch_gbe_rx_desc *rx_desc; |
1367 | u32 length; | 1374 | u32 length; |
1368 | unsigned char tmp_packet[ETH_HLEN]; | ||
1369 | unsigned int i; | 1375 | unsigned int i; |
1370 | unsigned int cleaned_count = 0; | 1376 | unsigned int cleaned_count = 0; |
1371 | bool cleaned = false; | 1377 | bool cleaned = false; |
1372 | struct sk_buff *skb; | 1378 | struct sk_buff *skb, *new_skb; |
1373 | u8 dma_status; | 1379 | u8 dma_status; |
1374 | u16 gbec_status; | 1380 | u16 gbec_status; |
1375 | u32 tcp_ip_status; | 1381 | u32 tcp_ip_status; |
1376 | u8 skb_copy_flag = 0; | ||
1377 | u8 skb_padding_flag = 0; | ||
1378 | 1382 | ||
1379 | i = rx_ring->next_to_clean; | 1383 | i = rx_ring->next_to_clean; |
1380 | 1384 | ||
@@ -1418,55 +1422,70 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, | |||
1418 | pr_err("Receive CRC Error\n"); | 1422 | pr_err("Receive CRC Error\n"); |
1419 | } else { | 1423 | } else { |
1420 | /* get receive length */ | 1424 | /* get receive length */ |
1421 | /* length convert[-3], padding[-2] */ | 1425 | /* length convert[-3] */ |
1422 | length = (rx_desc->rx_words_eob) - 3 - 2; | 1426 | length = (rx_desc->rx_words_eob) - 3; |
1423 | 1427 | ||
1424 | /* Decide the data conversion method */ | 1428 | /* Decide the data conversion method */ |
1425 | if (!adapter->rx_csum) { | 1429 | if (!adapter->rx_csum) { |
1426 | /* [Header:14][payload] */ | 1430 | /* [Header:14][payload] */ |
1427 | skb_padding_flag = 0; | 1431 | if (NET_IP_ALIGN) { |
1428 | skb_copy_flag = 1; | 1432 | /* Because alignment differs, |
1433 | * the new_skb is newly allocated, | ||
1434 | * and data is copied to new_skb.*/ | ||
1435 | new_skb = netdev_alloc_skb(netdev, | ||
1436 | length + NET_IP_ALIGN); | ||
1437 | if (!new_skb) { | ||
1438 | /* dorrop error */ | ||
1439 | pr_err("New skb allocation " | ||
1440 | "Error\n"); | ||
1441 | goto dorrop; | ||
1442 | } | ||
1443 | skb_reserve(new_skb, NET_IP_ALIGN); | ||
1444 | memcpy(new_skb->data, skb->data, | ||
1445 | length); | ||
1446 | skb = new_skb; | ||
1447 | } else { | ||
1448 | /* DMA buffer is used as SKB as it is.*/ | ||
1449 | buffer_info->skb = NULL; | ||
1450 | } | ||
1429 | } else { | 1451 | } else { |
1430 | /* [Header:14][padding:2][payload] */ | 1452 | /* [Header:14][padding:2][payload] */ |
1431 | skb_padding_flag = 1; | 1453 | /* The length includes padding length */ |
1432 | if (length < copybreak) | 1454 | length = length - PCH_GBE_DMA_PADDING; |
1433 | skb_copy_flag = 1; | 1455 | if ((length < copybreak) || |
1434 | else | 1456 | (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) { |
1435 | skb_copy_flag = 0; | 1457 | /* Because alignment differs, |
1436 | } | 1458 | * the new_skb is newly allocated, |
1437 | 1459 | * and data is copied to new_skb. | |
1438 | /* Data conversion */ | 1460 | * Padding data is deleted |
1439 | if (skb_copy_flag) { /* recycle skb */ | 1461 | * at the time of a copy.*/ |
1440 | struct sk_buff *new_skb; | 1462 | new_skb = netdev_alloc_skb(netdev, |
1441 | new_skb = | 1463 | length + NET_IP_ALIGN); |
1442 | netdev_alloc_skb(netdev, | 1464 | if (!new_skb) { |
1443 | length + NET_IP_ALIGN); | 1465 | /* dorrop error */ |
1444 | if (new_skb) { | 1466 | pr_err("New skb allocation " |
1445 | if (!skb_padding_flag) { | 1467 | "Error\n"); |
1446 | skb_reserve(new_skb, | 1468 | goto dorrop; |
1447 | NET_IP_ALIGN); | ||
1448 | } | 1469 | } |
1470 | skb_reserve(new_skb, NET_IP_ALIGN); | ||
1449 | memcpy(new_skb->data, skb->data, | 1471 | memcpy(new_skb->data, skb->data, |
1450 | length); | 1472 | ETH_HLEN); |
1451 | /* save the skb | 1473 | memcpy(&new_skb->data[ETH_HLEN], |
1452 | * in buffer_info as good */ | 1474 | &skb->data[ETH_HLEN + |
1475 | PCH_GBE_DMA_PADDING], | ||
1476 | length - ETH_HLEN); | ||
1453 | skb = new_skb; | 1477 | skb = new_skb; |
1454 | } else if (!skb_padding_flag) { | 1478 | } else { |
1455 | /* dorrop error */ | 1479 | /* Padding data is deleted |
1456 | pr_err("New skb allocation Error\n"); | 1480 | * by moving header data.*/ |
1457 | goto dorrop; | 1481 | memmove(&skb->data[PCH_GBE_DMA_PADDING], |
1482 | &skb->data[0], ETH_HLEN); | ||
1483 | skb_reserve(skb, NET_IP_ALIGN); | ||
1484 | buffer_info->skb = NULL; | ||
1458 | } | 1485 | } |
1459 | } else { | ||
1460 | buffer_info->skb = NULL; | ||
1461 | } | 1486 | } |
1462 | if (skb_padding_flag) { | 1487 | /* The length includes FCS length */ |
1463 | memcpy(&tmp_packet[0], &skb->data[0], ETH_HLEN); | 1488 | length = length - ETH_FCS_LEN; |
1464 | memcpy(&skb->data[NET_IP_ALIGN], &tmp_packet[0], | ||
1465 | ETH_HLEN); | ||
1466 | skb_reserve(skb, NET_IP_ALIGN); | ||
1467 | |||
1468 | } | ||
1469 | |||
1470 | /* update status of driver */ | 1489 | /* update status of driver */ |
1471 | adapter->stats.rx_bytes += length; | 1490 | adapter->stats.rx_bytes += length; |
1472 | adapter->stats.rx_packets++; | 1491 | adapter->stats.rx_packets++; |
@@ -2318,6 +2337,7 @@ static int pch_gbe_probe(struct pci_dev *pdev, | |||
2318 | netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO; | 2337 | netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO; |
2319 | pch_gbe_set_ethtool_ops(netdev); | 2338 | pch_gbe_set_ethtool_ops(netdev); |
2320 | 2339 | ||
2340 | pch_gbe_mac_load_mac_addr(&adapter->hw); | ||
2321 | pch_gbe_mac_reset_hw(&adapter->hw); | 2341 | pch_gbe_mac_reset_hw(&adapter->hw); |
2322 | 2342 | ||
2323 | /* setup the private structure */ | 2343 | /* setup the private structure */ |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 59ccf0c5c610..469ab0b7ce31 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -3190,6 +3190,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3190 | if (pci_dev_run_wake(pdev)) | 3190 | if (pci_dev_run_wake(pdev)) |
3191 | pm_runtime_put_noidle(&pdev->dev); | 3191 | pm_runtime_put_noidle(&pdev->dev); |
3192 | 3192 | ||
3193 | netif_carrier_off(dev); | ||
3194 | |||
3193 | out: | 3195 | out: |
3194 | return rc; | 3196 | return rc; |
3195 | 3197 | ||
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c index 34a0af3837f9..0e5f03135b50 100644 --- a/drivers/net/stmmac/stmmac_main.c +++ b/drivers/net/stmmac/stmmac_main.c | |||
@@ -1560,8 +1560,10 @@ static int stmmac_mac_device_setup(struct net_device *dev) | |||
1560 | 1560 | ||
1561 | priv->hw = device; | 1561 | priv->hw = device; |
1562 | 1562 | ||
1563 | if (device_can_wakeup(priv->device)) | 1563 | if (device_can_wakeup(priv->device)) { |
1564 | priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ | 1564 | priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ |
1565 | enable_irq_wake(dev->irq); | ||
1566 | } | ||
1565 | 1567 | ||
1566 | return 0; | 1568 | return 0; |
1567 | } | 1569 | } |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 93b32d366611..06c0e5033656 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -11158,7 +11158,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
11158 | if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) | 11158 | if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) |
11159 | break; /* We have no PHY */ | 11159 | break; /* We have no PHY */ |
11160 | 11160 | ||
11161 | if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) | 11161 | if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) || |
11162 | ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && | ||
11163 | !netif_running(dev))) | ||
11162 | return -EAGAIN; | 11164 | return -EAGAIN; |
11163 | 11165 | ||
11164 | spin_lock_bh(&tp->lock); | 11166 | spin_lock_bh(&tp->lock); |
@@ -11174,7 +11176,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
11174 | if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) | 11176 | if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) |
11175 | break; /* We have no PHY */ | 11177 | break; /* We have no PHY */ |
11176 | 11178 | ||
11177 | if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) | 11179 | if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) || |
11180 | ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && | ||
11181 | !netif_running(dev))) | ||
11178 | return -EAGAIN; | 11182 | return -EAGAIN; |
11179 | 11183 | ||
11180 | spin_lock_bh(&tp->lock); | 11184 | spin_lock_bh(&tp->lock); |
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index bed8fcedff49..6d83812603b6 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c | |||
@@ -2628,15 +2628,15 @@ exit: | |||
2628 | 2628 | ||
2629 | static void hso_free_tiomget(struct hso_serial *serial) | 2629 | static void hso_free_tiomget(struct hso_serial *serial) |
2630 | { | 2630 | { |
2631 | struct hso_tiocmget *tiocmget = serial->tiocmget; | 2631 | struct hso_tiocmget *tiocmget; |
2632 | if (!serial) | ||
2633 | return; | ||
2634 | tiocmget = serial->tiocmget; | ||
2632 | if (tiocmget) { | 2635 | if (tiocmget) { |
2633 | if (tiocmget->urb) { | 2636 | usb_free_urb(tiocmget->urb); |
2634 | usb_free_urb(tiocmget->urb); | 2637 | tiocmget->urb = NULL; |
2635 | tiocmget->urb = NULL; | ||
2636 | } | ||
2637 | serial->tiocmget = NULL; | 2638 | serial->tiocmget = NULL; |
2638 | kfree(tiocmget); | 2639 | kfree(tiocmget); |
2639 | |||
2640 | } | 2640 | } |
2641 | } | 2641 | } |
2642 | 2642 | ||
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index ed9a41643ff4..95c41d56631c 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c | |||
@@ -931,8 +931,10 @@ fail_halt: | |||
931 | if (urb != NULL) { | 931 | if (urb != NULL) { |
932 | clear_bit (EVENT_RX_MEMORY, &dev->flags); | 932 | clear_bit (EVENT_RX_MEMORY, &dev->flags); |
933 | status = usb_autopm_get_interface(dev->intf); | 933 | status = usb_autopm_get_interface(dev->intf); |
934 | if (status < 0) | 934 | if (status < 0) { |
935 | usb_free_urb(urb); | ||
935 | goto fail_lowmem; | 936 | goto fail_lowmem; |
937 | } | ||
936 | if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK) | 938 | if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK) |
937 | resched = 0; | 939 | resched = 0; |
938 | usb_autopm_put_interface(dev->intf); | 940 | usb_autopm_put_interface(dev->intf); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c index a9b852be4509..39b6f16c87fa 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c | |||
@@ -402,72 +402,6 @@ static void iwl3945_accumulative_statistics(struct iwl_priv *priv, | |||
402 | } | 402 | } |
403 | #endif | 403 | #endif |
404 | 404 | ||
405 | /** | ||
406 | * iwl3945_good_plcp_health - checks for plcp error. | ||
407 | * | ||
408 | * When the plcp error is exceeding the thresholds, reset the radio | ||
409 | * to improve the throughput. | ||
410 | */ | ||
411 | static bool iwl3945_good_plcp_health(struct iwl_priv *priv, | ||
412 | struct iwl_rx_packet *pkt) | ||
413 | { | ||
414 | bool rc = true; | ||
415 | struct iwl3945_notif_statistics current_stat; | ||
416 | int combined_plcp_delta; | ||
417 | unsigned int plcp_msec; | ||
418 | unsigned long plcp_received_jiffies; | ||
419 | |||
420 | if (priv->cfg->base_params->plcp_delta_threshold == | ||
421 | IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) { | ||
422 | IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n"); | ||
423 | return rc; | ||
424 | } | ||
425 | memcpy(¤t_stat, pkt->u.raw, sizeof(struct | ||
426 | iwl3945_notif_statistics)); | ||
427 | /* | ||
428 | * check for plcp_err and trigger radio reset if it exceeds | ||
429 | * the plcp error threshold plcp_delta. | ||
430 | */ | ||
431 | plcp_received_jiffies = jiffies; | ||
432 | plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies - | ||
433 | (long) priv->plcp_jiffies); | ||
434 | priv->plcp_jiffies = plcp_received_jiffies; | ||
435 | /* | ||
436 | * check to make sure plcp_msec is not 0 to prevent division | ||
437 | * by zero. | ||
438 | */ | ||
439 | if (plcp_msec) { | ||
440 | combined_plcp_delta = | ||
441 | (le32_to_cpu(current_stat.rx.ofdm.plcp_err) - | ||
442 | le32_to_cpu(priv->_3945.statistics.rx.ofdm.plcp_err)); | ||
443 | |||
444 | if ((combined_plcp_delta > 0) && | ||
445 | ((combined_plcp_delta * 100) / plcp_msec) > | ||
446 | priv->cfg->base_params->plcp_delta_threshold) { | ||
447 | /* | ||
448 | * if plcp_err exceed the threshold, the following | ||
449 | * data is printed in csv format: | ||
450 | * Text: plcp_err exceeded %d, | ||
451 | * Received ofdm.plcp_err, | ||
452 | * Current ofdm.plcp_err, | ||
453 | * combined_plcp_delta, | ||
454 | * plcp_msec | ||
455 | */ | ||
456 | IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, " | ||
457 | "%u, %d, %u mSecs\n", | ||
458 | priv->cfg->base_params->plcp_delta_threshold, | ||
459 | le32_to_cpu(current_stat.rx.ofdm.plcp_err), | ||
460 | combined_plcp_delta, plcp_msec); | ||
461 | /* | ||
462 | * Reset the RF radio due to the high plcp | ||
463 | * error rate | ||
464 | */ | ||
465 | rc = false; | ||
466 | } | ||
467 | } | ||
468 | return rc; | ||
469 | } | ||
470 | |||
471 | void iwl3945_hw_rx_statistics(struct iwl_priv *priv, | 405 | void iwl3945_hw_rx_statistics(struct iwl_priv *priv, |
472 | struct iwl_rx_mem_buffer *rxb) | 406 | struct iwl_rx_mem_buffer *rxb) |
473 | { | 407 | { |
@@ -2734,7 +2668,6 @@ static struct iwl_lib_ops iwl3945_lib = { | |||
2734 | .isr_ops = { | 2668 | .isr_ops = { |
2735 | .isr = iwl_isr_legacy, | 2669 | .isr = iwl_isr_legacy, |
2736 | }, | 2670 | }, |
2737 | .check_plcp_health = iwl3945_good_plcp_health, | ||
2738 | 2671 | ||
2739 | .debugfs_ops = { | 2672 | .debugfs_ops = { |
2740 | .rx_stats_read = iwl3945_ucode_rx_stats_read, | 2673 | .rx_stats_read = iwl3945_ucode_rx_stats_read, |
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index cdd97192dc69..4941cade319f 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig | |||
@@ -97,6 +97,18 @@ config RTC_INTF_DEV | |||
97 | 97 | ||
98 | If unsure, say Y. | 98 | If unsure, say Y. |
99 | 99 | ||
100 | config RTC_INTF_DEV_UIE_EMUL | ||
101 | bool "RTC UIE emulation on dev interface" | ||
102 | depends on RTC_INTF_DEV | ||
103 | help | ||
104 | Provides an emulation for RTC_UIE if the underlying rtc chip | ||
105 | driver does not expose RTC_UIE ioctls. Those requests generate | ||
106 | once-per-second update interrupts, used for synchronization. | ||
107 | |||
108 | The emulation code will read the time from the hardware | ||
109 | clock several times per second, please enable this option | ||
110 | only if you know that you really need it. | ||
111 | |||
100 | config RTC_DRV_TEST | 112 | config RTC_DRV_TEST |
101 | tristate "Test driver/device" | 113 | tristate "Test driver/device" |
102 | help | 114 | help |
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index a0c01967244d..cb2f0728fd70 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c | |||
@@ -209,9 +209,8 @@ int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled) | |||
209 | } | 209 | } |
210 | 210 | ||
211 | if (err) | 211 | if (err) |
212 | return err; | 212 | /* nothing */; |
213 | 213 | else if (!rtc->ops) | |
214 | if (!rtc->ops) | ||
215 | err = -ENODEV; | 214 | err = -ENODEV; |
216 | else if (!rtc->ops->alarm_irq_enable) | 215 | else if (!rtc->ops->alarm_irq_enable) |
217 | err = -EINVAL; | 216 | err = -EINVAL; |
@@ -229,6 +228,12 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled) | |||
229 | if (err) | 228 | if (err) |
230 | return err; | 229 | return err; |
231 | 230 | ||
231 | #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL | ||
232 | if (enabled == 0 && rtc->uie_irq_active) { | ||
233 | mutex_unlock(&rtc->ops_lock); | ||
234 | return rtc_dev_update_irq_enable_emul(rtc, 0); | ||
235 | } | ||
236 | #endif | ||
232 | /* make sure we're changing state */ | 237 | /* make sure we're changing state */ |
233 | if (rtc->uie_rtctimer.enabled == enabled) | 238 | if (rtc->uie_rtctimer.enabled == enabled) |
234 | goto out; | 239 | goto out; |
@@ -248,6 +253,16 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled) | |||
248 | 253 | ||
249 | out: | 254 | out: |
250 | mutex_unlock(&rtc->ops_lock); | 255 | mutex_unlock(&rtc->ops_lock); |
256 | #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL | ||
257 | /* | ||
258 | * Enable emulation if the driver did not provide | ||
259 | * the update_irq_enable function pointer or if returned | ||
260 | * -EINVAL to signal that it has been configured without | ||
261 | * interrupts or that are not available at the moment. | ||
262 | */ | ||
263 | if (err == -EINVAL) | ||
264 | err = rtc_dev_update_irq_enable_emul(rtc, enabled); | ||
265 | #endif | ||
251 | return err; | 266 | return err; |
252 | 267 | ||
253 | } | 268 | } |
@@ -263,7 +278,7 @@ EXPORT_SYMBOL_GPL(rtc_update_irq_enable); | |||
263 | * | 278 | * |
264 | * Triggers the registered irq_task function callback. | 279 | * Triggers the registered irq_task function callback. |
265 | */ | 280 | */ |
266 | static void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode) | 281 | void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode) |
267 | { | 282 | { |
268 | unsigned long flags; | 283 | unsigned long flags; |
269 | 284 | ||
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c index 37c3cc1b3dd5..d0e06edb14c5 100644 --- a/drivers/rtc/rtc-dev.c +++ b/drivers/rtc/rtc-dev.c | |||
@@ -46,6 +46,105 @@ static int rtc_dev_open(struct inode *inode, struct file *file) | |||
46 | return err; | 46 | return err; |
47 | } | 47 | } |
48 | 48 | ||
49 | #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL | ||
50 | /* | ||
51 | * Routine to poll RTC seconds field for change as often as possible, | ||
52 | * after first RTC_UIE use timer to reduce polling | ||
53 | */ | ||
54 | static void rtc_uie_task(struct work_struct *work) | ||
55 | { | ||
56 | struct rtc_device *rtc = | ||
57 | container_of(work, struct rtc_device, uie_task); | ||
58 | struct rtc_time tm; | ||
59 | int num = 0; | ||
60 | int err; | ||
61 | |||
62 | err = rtc_read_time(rtc, &tm); | ||
63 | |||
64 | spin_lock_irq(&rtc->irq_lock); | ||
65 | if (rtc->stop_uie_polling || err) { | ||
66 | rtc->uie_task_active = 0; | ||
67 | } else if (rtc->oldsecs != tm.tm_sec) { | ||
68 | num = (tm.tm_sec + 60 - rtc->oldsecs) % 60; | ||
69 | rtc->oldsecs = tm.tm_sec; | ||
70 | rtc->uie_timer.expires = jiffies + HZ - (HZ/10); | ||
71 | rtc->uie_timer_active = 1; | ||
72 | rtc->uie_task_active = 0; | ||
73 | add_timer(&rtc->uie_timer); | ||
74 | } else if (schedule_work(&rtc->uie_task) == 0) { | ||
75 | rtc->uie_task_active = 0; | ||
76 | } | ||
77 | spin_unlock_irq(&rtc->irq_lock); | ||
78 | if (num) | ||
79 | rtc_handle_legacy_irq(rtc, num, RTC_UF); | ||
80 | } | ||
81 | static void rtc_uie_timer(unsigned long data) | ||
82 | { | ||
83 | struct rtc_device *rtc = (struct rtc_device *)data; | ||
84 | unsigned long flags; | ||
85 | |||
86 | spin_lock_irqsave(&rtc->irq_lock, flags); | ||
87 | rtc->uie_timer_active = 0; | ||
88 | rtc->uie_task_active = 1; | ||
89 | if ((schedule_work(&rtc->uie_task) == 0)) | ||
90 | rtc->uie_task_active = 0; | ||
91 | spin_unlock_irqrestore(&rtc->irq_lock, flags); | ||
92 | } | ||
93 | |||
94 | static int clear_uie(struct rtc_device *rtc) | ||
95 | { | ||
96 | spin_lock_irq(&rtc->irq_lock); | ||
97 | if (rtc->uie_irq_active) { | ||
98 | rtc->stop_uie_polling = 1; | ||
99 | if (rtc->uie_timer_active) { | ||
100 | spin_unlock_irq(&rtc->irq_lock); | ||
101 | del_timer_sync(&rtc->uie_timer); | ||
102 | spin_lock_irq(&rtc->irq_lock); | ||
103 | rtc->uie_timer_active = 0; | ||
104 | } | ||
105 | if (rtc->uie_task_active) { | ||
106 | spin_unlock_irq(&rtc->irq_lock); | ||
107 | flush_scheduled_work(); | ||
108 | spin_lock_irq(&rtc->irq_lock); | ||
109 | } | ||
110 | rtc->uie_irq_active = 0; | ||
111 | } | ||
112 | spin_unlock_irq(&rtc->irq_lock); | ||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | static int set_uie(struct rtc_device *rtc) | ||
117 | { | ||
118 | struct rtc_time tm; | ||
119 | int err; | ||
120 | |||
121 | err = rtc_read_time(rtc, &tm); | ||
122 | if (err) | ||
123 | return err; | ||
124 | spin_lock_irq(&rtc->irq_lock); | ||
125 | if (!rtc->uie_irq_active) { | ||
126 | rtc->uie_irq_active = 1; | ||
127 | rtc->stop_uie_polling = 0; | ||
128 | rtc->oldsecs = tm.tm_sec; | ||
129 | rtc->uie_task_active = 1; | ||
130 | if (schedule_work(&rtc->uie_task) == 0) | ||
131 | rtc->uie_task_active = 0; | ||
132 | } | ||
133 | rtc->irq_data = 0; | ||
134 | spin_unlock_irq(&rtc->irq_lock); | ||
135 | return 0; | ||
136 | } | ||
137 | |||
138 | int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, unsigned int enabled) | ||
139 | { | ||
140 | if (enabled) | ||
141 | return set_uie(rtc); | ||
142 | else | ||
143 | return clear_uie(rtc); | ||
144 | } | ||
145 | EXPORT_SYMBOL(rtc_dev_update_irq_enable_emul); | ||
146 | |||
147 | #endif /* CONFIG_RTC_INTF_DEV_UIE_EMUL */ | ||
49 | 148 | ||
50 | static ssize_t | 149 | static ssize_t |
51 | rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) | 150 | rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) |
@@ -387,6 +486,11 @@ void rtc_dev_prepare(struct rtc_device *rtc) | |||
387 | 486 | ||
388 | rtc->dev.devt = MKDEV(MAJOR(rtc_devt), rtc->id); | 487 | rtc->dev.devt = MKDEV(MAJOR(rtc_devt), rtc->id); |
389 | 488 | ||
489 | #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL | ||
490 | INIT_WORK(&rtc->uie_task, rtc_uie_task); | ||
491 | setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc); | ||
492 | #endif | ||
493 | |||
390 | cdev_init(&rtc->char_dev, &rtc_dev_fops); | 494 | cdev_init(&rtc->char_dev, &rtc_dev_fops); |
391 | rtc->char_dev.owner = rtc->owner; | 495 | rtc->char_dev.owner = rtc->owner; |
392 | } | 496 | } |
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 318672d05563..a9fe23d5bd0f 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -72,7 +72,7 @@ static struct dasd_discipline dasd_eckd_discipline; | |||
72 | static struct ccw_device_id dasd_eckd_ids[] = { | 72 | static struct ccw_device_id dasd_eckd_ids[] = { |
73 | { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1}, | 73 | { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1}, |
74 | { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2}, | 74 | { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2}, |
75 | { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), .driver_info = 0x3}, | 75 | { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3}, |
76 | { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4}, | 76 | { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4}, |
77 | { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5}, | 77 | { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5}, |
78 | { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6}, | 78 | { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6}, |
diff --git a/drivers/spi/pxa2xx_spi_pci.c b/drivers/spi/pxa2xx_spi_pci.c index 351d8a375b57..19752b09e155 100644 --- a/drivers/spi/pxa2xx_spi_pci.c +++ b/drivers/spi/pxa2xx_spi_pci.c | |||
@@ -7,10 +7,9 @@ | |||
7 | #include <linux/of_device.h> | 7 | #include <linux/of_device.h> |
8 | #include <linux/spi/pxa2xx_spi.h> | 8 | #include <linux/spi/pxa2xx_spi.h> |
9 | 9 | ||
10 | struct awesome_struct { | 10 | struct ce4100_info { |
11 | struct ssp_device ssp; | 11 | struct ssp_device ssp; |
12 | struct platform_device spi_pdev; | 12 | struct platform_device *spi_pdev; |
13 | struct pxa2xx_spi_master spi_pdata; | ||
14 | }; | 13 | }; |
15 | 14 | ||
16 | static DEFINE_MUTEX(ssp_lock); | 15 | static DEFINE_MUTEX(ssp_lock); |
@@ -51,23 +50,15 @@ void pxa_ssp_free(struct ssp_device *ssp) | |||
51 | } | 50 | } |
52 | EXPORT_SYMBOL_GPL(pxa_ssp_free); | 51 | EXPORT_SYMBOL_GPL(pxa_ssp_free); |
53 | 52 | ||
54 | static void plat_dev_release(struct device *dev) | ||
55 | { | ||
56 | struct awesome_struct *as = container_of(dev, | ||
57 | struct awesome_struct, spi_pdev.dev); | ||
58 | |||
59 | of_device_node_put(&as->spi_pdev.dev); | ||
60 | } | ||
61 | |||
62 | static int __devinit ce4100_spi_probe(struct pci_dev *dev, | 53 | static int __devinit ce4100_spi_probe(struct pci_dev *dev, |
63 | const struct pci_device_id *ent) | 54 | const struct pci_device_id *ent) |
64 | { | 55 | { |
65 | int ret; | 56 | int ret; |
66 | resource_size_t phys_beg; | 57 | resource_size_t phys_beg; |
67 | resource_size_t phys_len; | 58 | resource_size_t phys_len; |
68 | struct awesome_struct *spi_info; | 59 | struct ce4100_info *spi_info; |
69 | struct platform_device *pdev; | 60 | struct platform_device *pdev; |
70 | struct pxa2xx_spi_master *spi_pdata; | 61 | struct pxa2xx_spi_master spi_pdata; |
71 | struct ssp_device *ssp; | 62 | struct ssp_device *ssp; |
72 | 63 | ||
73 | ret = pci_enable_device(dev); | 64 | ret = pci_enable_device(dev); |
@@ -84,33 +75,30 @@ static int __devinit ce4100_spi_probe(struct pci_dev *dev, | |||
84 | return ret; | 75 | return ret; |
85 | } | 76 | } |
86 | 77 | ||
78 | pdev = platform_device_alloc("pxa2xx-spi", dev->devfn); | ||
87 | spi_info = kzalloc(sizeof(*spi_info), GFP_KERNEL); | 79 | spi_info = kzalloc(sizeof(*spi_info), GFP_KERNEL); |
88 | if (!spi_info) { | 80 | if (!pdev || !spi_info ) { |
89 | ret = -ENOMEM; | 81 | ret = -ENOMEM; |
90 | goto err_kz; | 82 | goto err_nomem; |
91 | } | 83 | } |
92 | ssp = &spi_info->ssp; | 84 | memset(&spi_pdata, 0, sizeof(spi_pdata)); |
93 | pdev = &spi_info->spi_pdev; | 85 | spi_pdata.num_chipselect = dev->devfn; |
94 | spi_pdata = &spi_info->spi_pdata; | ||
95 | 86 | ||
96 | pdev->name = "pxa2xx-spi"; | 87 | ret = platform_device_add_data(pdev, &spi_pdata, sizeof(spi_pdata)); |
97 | pdev->id = dev->devfn; | 88 | if (ret) |
98 | pdev->dev.parent = &dev->dev; | 89 | goto err_nomem; |
99 | pdev->dev.platform_data = &spi_info->spi_pdata; | ||
100 | 90 | ||
91 | pdev->dev.parent = &dev->dev; | ||
101 | #ifdef CONFIG_OF | 92 | #ifdef CONFIG_OF |
102 | pdev->dev.of_node = dev->dev.of_node; | 93 | pdev->dev.of_node = dev->dev.of_node; |
103 | #endif | 94 | #endif |
104 | pdev->dev.release = plat_dev_release; | 95 | ssp = &spi_info->ssp; |
105 | |||
106 | spi_pdata->num_chipselect = dev->devfn; | ||
107 | |||
108 | ssp->phys_base = pci_resource_start(dev, 0); | 96 | ssp->phys_base = pci_resource_start(dev, 0); |
109 | ssp->mmio_base = ioremap(phys_beg, phys_len); | 97 | ssp->mmio_base = ioremap(phys_beg, phys_len); |
110 | if (!ssp->mmio_base) { | 98 | if (!ssp->mmio_base) { |
111 | dev_err(&pdev->dev, "failed to ioremap() registers\n"); | 99 | dev_err(&pdev->dev, "failed to ioremap() registers\n"); |
112 | ret = -EIO; | 100 | ret = -EIO; |
113 | goto err_remap; | 101 | goto err_nomem; |
114 | } | 102 | } |
115 | ssp->irq = dev->irq; | 103 | ssp->irq = dev->irq; |
116 | ssp->port_id = pdev->id; | 104 | ssp->port_id = pdev->id; |
@@ -122,7 +110,7 @@ static int __devinit ce4100_spi_probe(struct pci_dev *dev, | |||
122 | 110 | ||
123 | pci_set_drvdata(dev, spi_info); | 111 | pci_set_drvdata(dev, spi_info); |
124 | 112 | ||
125 | ret = platform_device_register(pdev); | 113 | ret = platform_device_add(pdev); |
126 | if (ret) | 114 | if (ret) |
127 | goto err_dev_add; | 115 | goto err_dev_add; |
128 | 116 | ||
@@ -135,27 +123,21 @@ err_dev_add: | |||
135 | mutex_unlock(&ssp_lock); | 123 | mutex_unlock(&ssp_lock); |
136 | iounmap(ssp->mmio_base); | 124 | iounmap(ssp->mmio_base); |
137 | 125 | ||
138 | err_remap: | 126 | err_nomem: |
139 | kfree(spi_info); | ||
140 | |||
141 | err_kz: | ||
142 | release_mem_region(phys_beg, phys_len); | 127 | release_mem_region(phys_beg, phys_len); |
143 | 128 | platform_device_put(pdev); | |
129 | kfree(spi_info); | ||
144 | return ret; | 130 | return ret; |
145 | } | 131 | } |
146 | 132 | ||
147 | static void __devexit ce4100_spi_remove(struct pci_dev *dev) | 133 | static void __devexit ce4100_spi_remove(struct pci_dev *dev) |
148 | { | 134 | { |
149 | struct awesome_struct *spi_info; | 135 | struct ce4100_info *spi_info; |
150 | struct platform_device *pdev; | ||
151 | struct ssp_device *ssp; | 136 | struct ssp_device *ssp; |
152 | 137 | ||
153 | spi_info = pci_get_drvdata(dev); | 138 | spi_info = pci_get_drvdata(dev); |
154 | |||
155 | ssp = &spi_info->ssp; | 139 | ssp = &spi_info->ssp; |
156 | pdev = &spi_info->spi_pdev; | 140 | platform_device_unregister(spi_info->spi_pdev); |
157 | |||
158 | platform_device_unregister(pdev); | ||
159 | 141 | ||
160 | iounmap(ssp->mmio_base); | 142 | iounmap(ssp->mmio_base); |
161 | release_mem_region(pci_resource_start(dev, 0), | 143 | release_mem_region(pci_resource_start(dev, 0), |
@@ -171,7 +153,6 @@ static void __devexit ce4100_spi_remove(struct pci_dev *dev) | |||
171 | } | 153 | } |
172 | 154 | ||
173 | static struct pci_device_id ce4100_spi_devices[] __devinitdata = { | 155 | static struct pci_device_id ce4100_spi_devices[] __devinitdata = { |
174 | |||
175 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e6a) }, | 156 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e6a) }, |
176 | { }, | 157 | { }, |
177 | }; | 158 | }; |
diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c index beb1afa27d8d..7b951adac54b 100644 --- a/drivers/tty/serial/max3100.c +++ b/drivers/tty/serial/max3100.c | |||
@@ -601,7 +601,7 @@ static int max3100_startup(struct uart_port *port) | |||
601 | s->rts = 0; | 601 | s->rts = 0; |
602 | 602 | ||
603 | sprintf(b, "max3100-%d", s->minor); | 603 | sprintf(b, "max3100-%d", s->minor); |
604 | s->workqueue = create_freezeable_workqueue(b); | 604 | s->workqueue = create_freezable_workqueue(b); |
605 | if (!s->workqueue) { | 605 | if (!s->workqueue) { |
606 | dev_warn(&s->spi->dev, "cannot create workqueue\n"); | 606 | dev_warn(&s->spi->dev, "cannot create workqueue\n"); |
607 | return -EBUSY; | 607 | return -EBUSY; |
diff --git a/drivers/tty/serial/max3107.c b/drivers/tty/serial/max3107.c index 910870edf708..750b4f627315 100644 --- a/drivers/tty/serial/max3107.c +++ b/drivers/tty/serial/max3107.c | |||
@@ -833,7 +833,7 @@ static int max3107_startup(struct uart_port *port) | |||
833 | struct max3107_port *s = container_of(port, struct max3107_port, port); | 833 | struct max3107_port *s = container_of(port, struct max3107_port, port); |
834 | 834 | ||
835 | /* Initialize work queue */ | 835 | /* Initialize work queue */ |
836 | s->workqueue = create_freezeable_workqueue("max3107"); | 836 | s->workqueue = create_freezable_workqueue("max3107"); |
837 | if (!s->workqueue) { | 837 | if (!s->workqueue) { |
838 | dev_err(&s->spi->dev, "Workqueue creation failed\n"); | 838 | dev_err(&s->spi->dev, "Workqueue creation failed\n"); |
839 | return -EBUSY; | 839 | return -EBUSY; |
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index db8c4c4ac880..24177272bcb8 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c | |||
@@ -37,11 +37,19 @@ static enum shutdown_state shutting_down = SHUTDOWN_INVALID; | |||
37 | #ifdef CONFIG_PM_SLEEP | 37 | #ifdef CONFIG_PM_SLEEP |
38 | static int xen_hvm_suspend(void *data) | 38 | static int xen_hvm_suspend(void *data) |
39 | { | 39 | { |
40 | int err; | ||
40 | struct sched_shutdown r = { .reason = SHUTDOWN_suspend }; | 41 | struct sched_shutdown r = { .reason = SHUTDOWN_suspend }; |
41 | int *cancelled = data; | 42 | int *cancelled = data; |
42 | 43 | ||
43 | BUG_ON(!irqs_disabled()); | 44 | BUG_ON(!irqs_disabled()); |
44 | 45 | ||
46 | err = sysdev_suspend(PMSG_SUSPEND); | ||
47 | if (err) { | ||
48 | printk(KERN_ERR "xen_hvm_suspend: sysdev_suspend failed: %d\n", | ||
49 | err); | ||
50 | return err; | ||
51 | } | ||
52 | |||
45 | *cancelled = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); | 53 | *cancelled = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); |
46 | 54 | ||
47 | xen_hvm_post_suspend(*cancelled); | 55 | xen_hvm_post_suspend(*cancelled); |
@@ -53,6 +61,8 @@ static int xen_hvm_suspend(void *data) | |||
53 | xen_timer_resume(); | 61 | xen_timer_resume(); |
54 | } | 62 | } |
55 | 63 | ||
64 | sysdev_resume(); | ||
65 | |||
56 | return 0; | 66 | return 0; |
57 | } | 67 | } |
58 | 68 | ||
diff --git a/fs/block_dev.c b/fs/block_dev.c index 333a7bb4cb9c..4fb8a3431531 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -1215,12 +1215,6 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) | |||
1215 | 1215 | ||
1216 | res = __blkdev_get(bdev, mode, 0); | 1216 | res = __blkdev_get(bdev, mode, 0); |
1217 | 1217 | ||
1218 | /* __blkdev_get() may alter read only status, check it afterwards */ | ||
1219 | if (!res && (mode & FMODE_WRITE) && bdev_read_only(bdev)) { | ||
1220 | __blkdev_put(bdev, mode, 0); | ||
1221 | res = -EACCES; | ||
1222 | } | ||
1223 | |||
1224 | if (whole) { | 1218 | if (whole) { |
1225 | /* finish claiming */ | 1219 | /* finish claiming */ |
1226 | mutex_lock(&bdev->bd_mutex); | 1220 | mutex_lock(&bdev->bd_mutex); |
@@ -1298,6 +1292,11 @@ struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, | |||
1298 | if (err) | 1292 | if (err) |
1299 | return ERR_PTR(err); | 1293 | return ERR_PTR(err); |
1300 | 1294 | ||
1295 | if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) { | ||
1296 | blkdev_put(bdev, mode); | ||
1297 | return ERR_PTR(-EACCES); | ||
1298 | } | ||
1299 | |||
1301 | return bdev; | 1300 | return bdev; |
1302 | } | 1301 | } |
1303 | EXPORT_SYMBOL(blkdev_get_by_path); | 1302 | EXPORT_SYMBOL(blkdev_get_by_path); |
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 08a8beb152e6..7cd9a5a68d59 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -1779,11 +1779,11 @@ int __init gfs2_glock_init(void) | |||
1779 | #endif | 1779 | #endif |
1780 | 1780 | ||
1781 | glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | | 1781 | glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | |
1782 | WQ_HIGHPRI | WQ_FREEZEABLE, 0); | 1782 | WQ_HIGHPRI | WQ_FREEZABLE, 0); |
1783 | if (IS_ERR(glock_workqueue)) | 1783 | if (IS_ERR(glock_workqueue)) |
1784 | return PTR_ERR(glock_workqueue); | 1784 | return PTR_ERR(glock_workqueue); |
1785 | gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", | 1785 | gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", |
1786 | WQ_MEM_RECLAIM | WQ_FREEZEABLE, | 1786 | WQ_MEM_RECLAIM | WQ_FREEZABLE, |
1787 | 0); | 1787 | 0); |
1788 | if (IS_ERR(gfs2_delete_workqueue)) { | 1788 | if (IS_ERR(gfs2_delete_workqueue)) { |
1789 | destroy_workqueue(glock_workqueue); | 1789 | destroy_workqueue(glock_workqueue); |
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index ebef7ab6e17e..85ba027d1c4d 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c | |||
@@ -144,7 +144,7 @@ static int __init init_gfs2_fs(void) | |||
144 | 144 | ||
145 | error = -ENOMEM; | 145 | error = -ENOMEM; |
146 | gfs_recovery_wq = alloc_workqueue("gfs_recovery", | 146 | gfs_recovery_wq = alloc_workqueue("gfs_recovery", |
147 | WQ_MEM_RECLAIM | WQ_FREEZEABLE, 0); | 147 | WQ_MEM_RECLAIM | WQ_FREEZABLE, 0); |
148 | if (!gfs_recovery_wq) | 148 | if (!gfs_recovery_wq) |
149 | goto fail_wq; | 149 | goto fail_wq; |
150 | 150 | ||
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 956629b9cdc9..1275b8655070 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c | |||
@@ -317,8 +317,8 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, | |||
317 | READ_BUF(dummy32); | 317 | READ_BUF(dummy32); |
318 | len += (XDR_QUADLEN(dummy32) << 2); | 318 | len += (XDR_QUADLEN(dummy32) << 2); |
319 | READMEM(buf, dummy32); | 319 | READMEM(buf, dummy32); |
320 | if ((host_err = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid))) | 320 | if ((status = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid))) |
321 | goto out_nfserr; | 321 | return status; |
322 | iattr->ia_valid |= ATTR_UID; | 322 | iattr->ia_valid |= ATTR_UID; |
323 | } | 323 | } |
324 | if (bmval[1] & FATTR4_WORD1_OWNER_GROUP) { | 324 | if (bmval[1] & FATTR4_WORD1_OWNER_GROUP) { |
@@ -328,8 +328,8 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, | |||
328 | READ_BUF(dummy32); | 328 | READ_BUF(dummy32); |
329 | len += (XDR_QUADLEN(dummy32) << 2); | 329 | len += (XDR_QUADLEN(dummy32) << 2); |
330 | READMEM(buf, dummy32); | 330 | READMEM(buf, dummy32); |
331 | if ((host_err = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid))) | 331 | if ((status = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid))) |
332 | goto out_nfserr; | 332 | return status; |
333 | iattr->ia_valid |= ATTR_GID; | 333 | iattr->ia_valid |= ATTR_GID; |
334 | } | 334 | } |
335 | if (bmval[1] & FATTR4_WORD1_TIME_ACCESS_SET) { | 335 | if (bmval[1] & FATTR4_WORD1_TIME_ACCESS_SET) { |
diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c index 68d6a216ee79..11f688bd76c5 100644 --- a/fs/partitions/mac.c +++ b/fs/partitions/mac.c | |||
@@ -29,10 +29,9 @@ static inline void mac_fix_string(char *stg, int len) | |||
29 | 29 | ||
30 | int mac_partition(struct parsed_partitions *state) | 30 | int mac_partition(struct parsed_partitions *state) |
31 | { | 31 | { |
32 | int slot = 1; | ||
33 | Sector sect; | 32 | Sector sect; |
34 | unsigned char *data; | 33 | unsigned char *data; |
35 | int blk, blocks_in_map; | 34 | int slot, blocks_in_map; |
36 | unsigned secsize; | 35 | unsigned secsize; |
37 | #ifdef CONFIG_PPC_PMAC | 36 | #ifdef CONFIG_PPC_PMAC |
38 | int found_root = 0; | 37 | int found_root = 0; |
@@ -59,10 +58,14 @@ int mac_partition(struct parsed_partitions *state) | |||
59 | put_dev_sector(sect); | 58 | put_dev_sector(sect); |
60 | return 0; /* not a MacOS disk */ | 59 | return 0; /* not a MacOS disk */ |
61 | } | 60 | } |
62 | strlcat(state->pp_buf, " [mac]", PAGE_SIZE); | ||
63 | blocks_in_map = be32_to_cpu(part->map_count); | 61 | blocks_in_map = be32_to_cpu(part->map_count); |
64 | for (blk = 1; blk <= blocks_in_map; ++blk) { | 62 | if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) { |
65 | int pos = blk * secsize; | 63 | put_dev_sector(sect); |
64 | return 0; | ||
65 | } | ||
66 | strlcat(state->pp_buf, " [mac]", PAGE_SIZE); | ||
67 | for (slot = 1; slot <= blocks_in_map; ++slot) { | ||
68 | int pos = slot * secsize; | ||
66 | put_dev_sector(sect); | 69 | put_dev_sector(sect); |
67 | data = read_part_sector(state, pos/512, §); | 70 | data = read_part_sector(state, pos/512, §); |
68 | if (!data) | 71 | if (!data) |
@@ -113,13 +116,11 @@ int mac_partition(struct parsed_partitions *state) | |||
113 | } | 116 | } |
114 | 117 | ||
115 | if (goodness > found_root_goodness) { | 118 | if (goodness > found_root_goodness) { |
116 | found_root = blk; | 119 | found_root = slot; |
117 | found_root_goodness = goodness; | 120 | found_root_goodness = goodness; |
118 | } | 121 | } |
119 | } | 122 | } |
120 | #endif /* CONFIG_PPC_PMAC */ | 123 | #endif /* CONFIG_PPC_PMAC */ |
121 | |||
122 | ++slot; | ||
123 | } | 124 | } |
124 | #ifdef CONFIG_PPC_PMAC | 125 | #ifdef CONFIG_PPC_PMAC |
125 | if (found_root_goodness) | 126 | if (found_root_goodness) |
diff --git a/include/linux/freezer.h b/include/linux/freezer.h index da7e52b099f3..1effc8b56b4e 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h | |||
@@ -109,7 +109,7 @@ static inline void freezer_count(void) | |||
109 | } | 109 | } |
110 | 110 | ||
111 | /* | 111 | /* |
112 | * Check if the task should be counted as freezeable by the freezer | 112 | * Check if the task should be counted as freezable by the freezer |
113 | */ | 113 | */ |
114 | static inline int freezer_should_skip(struct task_struct *p) | 114 | static inline int freezer_should_skip(struct task_struct *p) |
115 | { | 115 | { |
diff --git a/include/linux/list.h b/include/linux/list.h index 9a5f8a71810c..3a54266a1e85 100644 --- a/include/linux/list.h +++ b/include/linux/list.h | |||
@@ -96,6 +96,11 @@ static inline void __list_del(struct list_head * prev, struct list_head * next) | |||
96 | * in an undefined state. | 96 | * in an undefined state. |
97 | */ | 97 | */ |
98 | #ifndef CONFIG_DEBUG_LIST | 98 | #ifndef CONFIG_DEBUG_LIST |
99 | static inline void __list_del_entry(struct list_head *entry) | ||
100 | { | ||
101 | __list_del(entry->prev, entry->next); | ||
102 | } | ||
103 | |||
99 | static inline void list_del(struct list_head *entry) | 104 | static inline void list_del(struct list_head *entry) |
100 | { | 105 | { |
101 | __list_del(entry->prev, entry->next); | 106 | __list_del(entry->prev, entry->next); |
@@ -103,6 +108,7 @@ static inline void list_del(struct list_head *entry) | |||
103 | entry->prev = LIST_POISON2; | 108 | entry->prev = LIST_POISON2; |
104 | } | 109 | } |
105 | #else | 110 | #else |
111 | extern void __list_del_entry(struct list_head *entry); | ||
106 | extern void list_del(struct list_head *entry); | 112 | extern void list_del(struct list_head *entry); |
107 | #endif | 113 | #endif |
108 | 114 | ||
@@ -135,7 +141,7 @@ static inline void list_replace_init(struct list_head *old, | |||
135 | */ | 141 | */ |
136 | static inline void list_del_init(struct list_head *entry) | 142 | static inline void list_del_init(struct list_head *entry) |
137 | { | 143 | { |
138 | __list_del(entry->prev, entry->next); | 144 | __list_del_entry(entry); |
139 | INIT_LIST_HEAD(entry); | 145 | INIT_LIST_HEAD(entry); |
140 | } | 146 | } |
141 | 147 | ||
@@ -146,7 +152,7 @@ static inline void list_del_init(struct list_head *entry) | |||
146 | */ | 152 | */ |
147 | static inline void list_move(struct list_head *list, struct list_head *head) | 153 | static inline void list_move(struct list_head *list, struct list_head *head) |
148 | { | 154 | { |
149 | __list_del(list->prev, list->next); | 155 | __list_del_entry(list); |
150 | list_add(list, head); | 156 | list_add(list, head); |
151 | } | 157 | } |
152 | 158 | ||
@@ -158,7 +164,7 @@ static inline void list_move(struct list_head *list, struct list_head *head) | |||
158 | static inline void list_move_tail(struct list_head *list, | 164 | static inline void list_move_tail(struct list_head *list, |
159 | struct list_head *head) | 165 | struct list_head *head) |
160 | { | 166 | { |
161 | __list_del(list->prev, list->next); | 167 | __list_del_entry(list); |
162 | list_add_tail(list, head); | 168 | list_add_tail(list, head); |
163 | } | 169 | } |
164 | 170 | ||
diff --git a/include/linux/rtc.h b/include/linux/rtc.h index a0b639f8e805..89c3e5182991 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h | |||
@@ -203,6 +203,18 @@ struct rtc_device | |||
203 | struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */ | 203 | struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */ |
204 | int pie_enabled; | 204 | int pie_enabled; |
205 | struct work_struct irqwork; | 205 | struct work_struct irqwork; |
206 | |||
207 | |||
208 | #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL | ||
209 | struct work_struct uie_task; | ||
210 | struct timer_list uie_timer; | ||
211 | /* Those fields are protected by rtc->irq_lock */ | ||
212 | unsigned int oldsecs; | ||
213 | unsigned int uie_irq_active:1; | ||
214 | unsigned int stop_uie_polling:1; | ||
215 | unsigned int uie_task_active:1; | ||
216 | unsigned int uie_timer_active:1; | ||
217 | #endif | ||
206 | }; | 218 | }; |
207 | #define to_rtc_device(d) container_of(d, struct rtc_device, dev) | 219 | #define to_rtc_device(d) container_of(d, struct rtc_device, dev) |
208 | 220 | ||
@@ -235,7 +247,10 @@ extern int rtc_irq_set_freq(struct rtc_device *rtc, | |||
235 | struct rtc_task *task, int freq); | 247 | struct rtc_task *task, int freq); |
236 | extern int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled); | 248 | extern int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled); |
237 | extern int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled); | 249 | extern int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled); |
250 | extern int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, | ||
251 | unsigned int enabled); | ||
238 | 252 | ||
253 | void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode); | ||
239 | void rtc_aie_update_irq(void *private); | 254 | void rtc_aie_update_irq(void *private); |
240 | void rtc_uie_update_irq(void *private); | 255 | void rtc_uie_update_irq(void *private); |
241 | enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer); | 256 | enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer); |
diff --git a/include/linux/sched.h b/include/linux/sched.h index d747f948b34e..777d8a5ed06b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1744,7 +1744,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * | |||
1744 | #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ | 1744 | #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ |
1745 | #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ | 1745 | #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ |
1746 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ | 1746 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ |
1747 | #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ | 1747 | #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ |
1748 | #define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */ | 1748 | #define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */ |
1749 | 1749 | ||
1750 | /* | 1750 | /* |
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 1ac11586a2f5..f7998a3bf020 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -250,7 +250,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } | |||
250 | enum { | 250 | enum { |
251 | WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ | 251 | WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ |
252 | WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ | 252 | WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ |
253 | WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */ | 253 | WQ_FREEZABLE = 1 << 2, /* freeze during suspend */ |
254 | WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ | 254 | WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ |
255 | WQ_HIGHPRI = 1 << 4, /* high priority */ | 255 | WQ_HIGHPRI = 1 << 4, /* high priority */ |
256 | WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ | 256 | WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ |
@@ -318,7 +318,7 @@ __alloc_workqueue_key(const char *name, unsigned int flags, int max_active, | |||
318 | /** | 318 | /** |
319 | * alloc_ordered_workqueue - allocate an ordered workqueue | 319 | * alloc_ordered_workqueue - allocate an ordered workqueue |
320 | * @name: name of the workqueue | 320 | * @name: name of the workqueue |
321 | * @flags: WQ_* flags (only WQ_FREEZEABLE and WQ_MEM_RECLAIM are meaningful) | 321 | * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) |
322 | * | 322 | * |
323 | * Allocate an ordered workqueue. An ordered workqueue executes at | 323 | * Allocate an ordered workqueue. An ordered workqueue executes at |
324 | * most one work item at any given time in the queued order. They are | 324 | * most one work item at any given time in the queued order. They are |
@@ -335,8 +335,8 @@ alloc_ordered_workqueue(const char *name, unsigned int flags) | |||
335 | 335 | ||
336 | #define create_workqueue(name) \ | 336 | #define create_workqueue(name) \ |
337 | alloc_workqueue((name), WQ_MEM_RECLAIM, 1) | 337 | alloc_workqueue((name), WQ_MEM_RECLAIM, 1) |
338 | #define create_freezeable_workqueue(name) \ | 338 | #define create_freezable_workqueue(name) \ |
339 | alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1) | 339 | alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1) |
340 | #define create_singlethread_workqueue(name) \ | 340 | #define create_singlethread_workqueue(name) \ |
341 | alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1) | 341 | alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1) |
342 | 342 | ||
diff --git a/kernel/power/main.c b/kernel/power/main.c index 7b5db6a8561e..701853042c28 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
@@ -326,7 +326,7 @@ EXPORT_SYMBOL_GPL(pm_wq); | |||
326 | 326 | ||
327 | static int __init pm_start_workqueue(void) | 327 | static int __init pm_start_workqueue(void) |
328 | { | 328 | { |
329 | pm_wq = alloc_workqueue("pm", WQ_FREEZEABLE, 0); | 329 | pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0); |
330 | 330 | ||
331 | return pm_wq ? 0 : -ENOMEM; | 331 | return pm_wq ? 0 : -ENOMEM; |
332 | } | 332 | } |
diff --git a/kernel/power/process.c b/kernel/power/process.c index d6d2a10320e0..0cf3a27a6c9d 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c | |||
@@ -22,7 +22,7 @@ | |||
22 | */ | 22 | */ |
23 | #define TIMEOUT (20 * HZ) | 23 | #define TIMEOUT (20 * HZ) |
24 | 24 | ||
25 | static inline int freezeable(struct task_struct * p) | 25 | static inline int freezable(struct task_struct * p) |
26 | { | 26 | { |
27 | if ((p == current) || | 27 | if ((p == current) || |
28 | (p->flags & PF_NOFREEZE) || | 28 | (p->flags & PF_NOFREEZE) || |
@@ -53,7 +53,7 @@ static int try_to_freeze_tasks(bool sig_only) | |||
53 | todo = 0; | 53 | todo = 0; |
54 | read_lock(&tasklist_lock); | 54 | read_lock(&tasklist_lock); |
55 | do_each_thread(g, p) { | 55 | do_each_thread(g, p) { |
56 | if (frozen(p) || !freezeable(p)) | 56 | if (frozen(p) || !freezable(p)) |
57 | continue; | 57 | continue; |
58 | 58 | ||
59 | if (!freeze_task(p, sig_only)) | 59 | if (!freeze_task(p, sig_only)) |
@@ -167,7 +167,7 @@ static void thaw_tasks(bool nosig_only) | |||
167 | 167 | ||
168 | read_lock(&tasklist_lock); | 168 | read_lock(&tasklist_lock); |
169 | do_each_thread(g, p) { | 169 | do_each_thread(g, p) { |
170 | if (!freezeable(p)) | 170 | if (!freezable(p)) |
171 | continue; | 171 | continue; |
172 | 172 | ||
173 | if (nosig_only && should_send_signal(p)) | 173 | if (nosig_only && should_send_signal(p)) |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 0dac75ea4456..64db648ff911 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
@@ -1519,11 +1519,8 @@ static int | |||
1519 | swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm, | 1519 | swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm, |
1520 | unsigned int nr_pages, unsigned int nr_highmem) | 1520 | unsigned int nr_pages, unsigned int nr_highmem) |
1521 | { | 1521 | { |
1522 | int error = 0; | ||
1523 | |||
1524 | if (nr_highmem > 0) { | 1522 | if (nr_highmem > 0) { |
1525 | error = get_highmem_buffer(PG_ANY); | 1523 | if (get_highmem_buffer(PG_ANY)) |
1526 | if (error) | ||
1527 | goto err_out; | 1524 | goto err_out; |
1528 | if (nr_highmem > alloc_highmem) { | 1525 | if (nr_highmem > alloc_highmem) { |
1529 | nr_highmem -= alloc_highmem; | 1526 | nr_highmem -= alloc_highmem; |
@@ -1546,7 +1543,7 @@ swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm, | |||
1546 | 1543 | ||
1547 | err_out: | 1544 | err_out: |
1548 | swsusp_free(); | 1545 | swsusp_free(); |
1549 | return error; | 1546 | return -ENOMEM; |
1550 | } | 1547 | } |
1551 | 1548 | ||
1552 | asmlinkage int swsusp_save(void) | 1549 | asmlinkage int swsusp_save(void) |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 11869faa6819..ee6578b578ad 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -79,7 +79,9 @@ enum { | |||
79 | MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ | 79 | MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ |
80 | IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ | 80 | IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ |
81 | 81 | ||
82 | MAYDAY_INITIAL_TIMEOUT = HZ / 100, /* call for help after 10ms */ | 82 | MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2, |
83 | /* call for help after 10ms | ||
84 | (min two ticks) */ | ||
83 | MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ | 85 | MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ |
84 | CREATE_COOLDOWN = HZ, /* time to breath after fail */ | 86 | CREATE_COOLDOWN = HZ, /* time to breath after fail */ |
85 | TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */ | 87 | TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */ |
@@ -2047,6 +2049,15 @@ repeat: | |||
2047 | move_linked_works(work, scheduled, &n); | 2049 | move_linked_works(work, scheduled, &n); |
2048 | 2050 | ||
2049 | process_scheduled_works(rescuer); | 2051 | process_scheduled_works(rescuer); |
2052 | |||
2053 | /* | ||
2054 | * Leave this gcwq. If keep_working() is %true, notify a | ||
2055 | * regular worker; otherwise, we end up with 0 concurrency | ||
2056 | * and stalling the execution. | ||
2057 | */ | ||
2058 | if (keep_working(gcwq)) | ||
2059 | wake_up_worker(gcwq); | ||
2060 | |||
2050 | spin_unlock_irq(&gcwq->lock); | 2061 | spin_unlock_irq(&gcwq->lock); |
2051 | } | 2062 | } |
2052 | 2063 | ||
@@ -2956,7 +2967,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, | |||
2956 | */ | 2967 | */ |
2957 | spin_lock(&workqueue_lock); | 2968 | spin_lock(&workqueue_lock); |
2958 | 2969 | ||
2959 | if (workqueue_freezing && wq->flags & WQ_FREEZEABLE) | 2970 | if (workqueue_freezing && wq->flags & WQ_FREEZABLE) |
2960 | for_each_cwq_cpu(cpu, wq) | 2971 | for_each_cwq_cpu(cpu, wq) |
2961 | get_cwq(cpu, wq)->max_active = 0; | 2972 | get_cwq(cpu, wq)->max_active = 0; |
2962 | 2973 | ||
@@ -3068,7 +3079,7 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) | |||
3068 | 3079 | ||
3069 | spin_lock_irq(&gcwq->lock); | 3080 | spin_lock_irq(&gcwq->lock); |
3070 | 3081 | ||
3071 | if (!(wq->flags & WQ_FREEZEABLE) || | 3082 | if (!(wq->flags & WQ_FREEZABLE) || |
3072 | !(gcwq->flags & GCWQ_FREEZING)) | 3083 | !(gcwq->flags & GCWQ_FREEZING)) |
3073 | get_cwq(gcwq->cpu, wq)->max_active = max_active; | 3084 | get_cwq(gcwq->cpu, wq)->max_active = max_active; |
3074 | 3085 | ||
@@ -3318,7 +3329,7 @@ static int __cpuinit trustee_thread(void *__gcwq) | |||
3318 | * want to get it over with ASAP - spam rescuers, wake up as | 3329 | * want to get it over with ASAP - spam rescuers, wake up as |
3319 | * many idlers as necessary and create new ones till the | 3330 | * many idlers as necessary and create new ones till the |
3320 | * worklist is empty. Note that if the gcwq is frozen, there | 3331 | * worklist is empty. Note that if the gcwq is frozen, there |
3321 | * may be frozen works in freezeable cwqs. Don't declare | 3332 | * may be frozen works in freezable cwqs. Don't declare |
3322 | * completion while frozen. | 3333 | * completion while frozen. |
3323 | */ | 3334 | */ |
3324 | while (gcwq->nr_workers != gcwq->nr_idle || | 3335 | while (gcwq->nr_workers != gcwq->nr_idle || |
@@ -3576,9 +3587,9 @@ EXPORT_SYMBOL_GPL(work_on_cpu); | |||
3576 | /** | 3587 | /** |
3577 | * freeze_workqueues_begin - begin freezing workqueues | 3588 | * freeze_workqueues_begin - begin freezing workqueues |
3578 | * | 3589 | * |
3579 | * Start freezing workqueues. After this function returns, all | 3590 | * Start freezing workqueues. After this function returns, all freezable |
3580 | * freezeable workqueues will queue new works to their frozen_works | 3591 | * workqueues will queue new works to their frozen_works list instead of |
3581 | * list instead of gcwq->worklist. | 3592 | * gcwq->worklist. |
3582 | * | 3593 | * |
3583 | * CONTEXT: | 3594 | * CONTEXT: |
3584 | * Grabs and releases workqueue_lock and gcwq->lock's. | 3595 | * Grabs and releases workqueue_lock and gcwq->lock's. |
@@ -3604,7 +3615,7 @@ void freeze_workqueues_begin(void) | |||
3604 | list_for_each_entry(wq, &workqueues, list) { | 3615 | list_for_each_entry(wq, &workqueues, list) { |
3605 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 3616 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |
3606 | 3617 | ||
3607 | if (cwq && wq->flags & WQ_FREEZEABLE) | 3618 | if (cwq && wq->flags & WQ_FREEZABLE) |
3608 | cwq->max_active = 0; | 3619 | cwq->max_active = 0; |
3609 | } | 3620 | } |
3610 | 3621 | ||
@@ -3615,7 +3626,7 @@ void freeze_workqueues_begin(void) | |||
3615 | } | 3626 | } |
3616 | 3627 | ||
3617 | /** | 3628 | /** |
3618 | * freeze_workqueues_busy - are freezeable workqueues still busy? | 3629 | * freeze_workqueues_busy - are freezable workqueues still busy? |
3619 | * | 3630 | * |
3620 | * Check whether freezing is complete. This function must be called | 3631 | * Check whether freezing is complete. This function must be called |
3621 | * between freeze_workqueues_begin() and thaw_workqueues(). | 3632 | * between freeze_workqueues_begin() and thaw_workqueues(). |
@@ -3624,8 +3635,8 @@ void freeze_workqueues_begin(void) | |||
3624 | * Grabs and releases workqueue_lock. | 3635 | * Grabs and releases workqueue_lock. |
3625 | * | 3636 | * |
3626 | * RETURNS: | 3637 | * RETURNS: |
3627 | * %true if some freezeable workqueues are still busy. %false if | 3638 | * %true if some freezable workqueues are still busy. %false if freezing |
3628 | * freezing is complete. | 3639 | * is complete. |
3629 | */ | 3640 | */ |
3630 | bool freeze_workqueues_busy(void) | 3641 | bool freeze_workqueues_busy(void) |
3631 | { | 3642 | { |
@@ -3645,7 +3656,7 @@ bool freeze_workqueues_busy(void) | |||
3645 | list_for_each_entry(wq, &workqueues, list) { | 3656 | list_for_each_entry(wq, &workqueues, list) { |
3646 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 3657 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |
3647 | 3658 | ||
3648 | if (!cwq || !(wq->flags & WQ_FREEZEABLE)) | 3659 | if (!cwq || !(wq->flags & WQ_FREEZABLE)) |
3649 | continue; | 3660 | continue; |
3650 | 3661 | ||
3651 | BUG_ON(cwq->nr_active < 0); | 3662 | BUG_ON(cwq->nr_active < 0); |
@@ -3690,7 +3701,7 @@ void thaw_workqueues(void) | |||
3690 | list_for_each_entry(wq, &workqueues, list) { | 3701 | list_for_each_entry(wq, &workqueues, list) { |
3691 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 3702 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |
3692 | 3703 | ||
3693 | if (!cwq || !(wq->flags & WQ_FREEZEABLE)) | 3704 | if (!cwq || !(wq->flags & WQ_FREEZABLE)) |
3694 | continue; | 3705 | continue; |
3695 | 3706 | ||
3696 | /* restore max_active and repopulate worklist */ | 3707 | /* restore max_active and repopulate worklist */ |
diff --git a/lib/list_debug.c b/lib/list_debug.c index 344c710d16ca..b8029a5583ff 100644 --- a/lib/list_debug.c +++ b/lib/list_debug.c | |||
@@ -35,6 +35,31 @@ void __list_add(struct list_head *new, | |||
35 | } | 35 | } |
36 | EXPORT_SYMBOL(__list_add); | 36 | EXPORT_SYMBOL(__list_add); |
37 | 37 | ||
38 | void __list_del_entry(struct list_head *entry) | ||
39 | { | ||
40 | struct list_head *prev, *next; | ||
41 | |||
42 | prev = entry->prev; | ||
43 | next = entry->next; | ||
44 | |||
45 | if (WARN(next == LIST_POISON1, | ||
46 | "list_del corruption, %p->next is LIST_POISON1 (%p)\n", | ||
47 | entry, LIST_POISON1) || | ||
48 | WARN(prev == LIST_POISON2, | ||
49 | "list_del corruption, %p->prev is LIST_POISON2 (%p)\n", | ||
50 | entry, LIST_POISON2) || | ||
51 | WARN(prev->next != entry, | ||
52 | "list_del corruption. prev->next should be %p, " | ||
53 | "but was %p\n", entry, prev->next) || | ||
54 | WARN(next->prev != entry, | ||
55 | "list_del corruption. next->prev should be %p, " | ||
56 | "but was %p\n", entry, next->prev)) | ||
57 | return; | ||
58 | |||
59 | __list_del(prev, next); | ||
60 | } | ||
61 | EXPORT_SYMBOL(__list_del_entry); | ||
62 | |||
38 | /** | 63 | /** |
39 | * list_del - deletes entry from list. | 64 | * list_del - deletes entry from list. |
40 | * @entry: the element to delete from the list. | 65 | * @entry: the element to delete from the list. |
@@ -43,19 +68,7 @@ EXPORT_SYMBOL(__list_add); | |||
43 | */ | 68 | */ |
44 | void list_del(struct list_head *entry) | 69 | void list_del(struct list_head *entry) |
45 | { | 70 | { |
46 | WARN(entry->next == LIST_POISON1, | 71 | __list_del_entry(entry); |
47 | "list_del corruption, next is LIST_POISON1 (%p)\n", | ||
48 | LIST_POISON1); | ||
49 | WARN(entry->next != LIST_POISON1 && entry->prev == LIST_POISON2, | ||
50 | "list_del corruption, prev is LIST_POISON2 (%p)\n", | ||
51 | LIST_POISON2); | ||
52 | WARN(entry->prev->next != entry, | ||
53 | "list_del corruption. prev->next should be %p, " | ||
54 | "but was %p\n", entry, entry->prev->next); | ||
55 | WARN(entry->next->prev != entry, | ||
56 | "list_del corruption. next->prev should be %p, " | ||
57 | "but was %p\n", entry, entry->next->prev); | ||
58 | __list_del(entry->prev, entry->next); | ||
59 | entry->next = LIST_POISON1; | 72 | entry->next = LIST_POISON1; |
60 | entry->prev = LIST_POISON2; | 73 | entry->prev = LIST_POISON2; |
61 | } | 74 | } |
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index 7550abb0c96a..675614e38e14 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c | |||
@@ -859,6 +859,7 @@ static void __l2cap_sock_close(struct sock *sk, int reason) | |||
859 | result = L2CAP_CR_SEC_BLOCK; | 859 | result = L2CAP_CR_SEC_BLOCK; |
860 | else | 860 | else |
861 | result = L2CAP_CR_BAD_PSM; | 861 | result = L2CAP_CR_BAD_PSM; |
862 | sk->sk_state = BT_DISCONN; | ||
862 | 863 | ||
863 | rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); | 864 | rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); |
864 | rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); | 865 | rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); |
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 6f6d8e1b776f..88e4aa9cb1f9 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c | |||
@@ -80,7 +80,7 @@ int br_handle_frame_finish(struct sk_buff *skb) | |||
80 | if (is_multicast_ether_addr(dest)) { | 80 | if (is_multicast_ether_addr(dest)) { |
81 | mdst = br_mdb_get(br, skb); | 81 | mdst = br_mdb_get(br, skb); |
82 | if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { | 82 | if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { |
83 | if ((mdst && !hlist_unhashed(&mdst->mglist)) || | 83 | if ((mdst && mdst->mglist) || |
84 | br_multicast_is_router(br)) | 84 | br_multicast_is_router(br)) |
85 | skb2 = skb; | 85 | skb2 = skb; |
86 | br_multicast_forward(mdst, skb, skb2); | 86 | br_multicast_forward(mdst, skb, skb2); |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index f701a21acb34..09d5c0987925 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -232,8 +232,7 @@ static void br_multicast_group_expired(unsigned long data) | |||
232 | if (!netif_running(br->dev) || timer_pending(&mp->timer)) | 232 | if (!netif_running(br->dev) || timer_pending(&mp->timer)) |
233 | goto out; | 233 | goto out; |
234 | 234 | ||
235 | if (!hlist_unhashed(&mp->mglist)) | 235 | mp->mglist = false; |
236 | hlist_del_init(&mp->mglist); | ||
237 | 236 | ||
238 | if (mp->ports) | 237 | if (mp->ports) |
239 | goto out; | 238 | goto out; |
@@ -276,7 +275,7 @@ static void br_multicast_del_pg(struct net_bridge *br, | |||
276 | del_timer(&p->query_timer); | 275 | del_timer(&p->query_timer); |
277 | call_rcu_bh(&p->rcu, br_multicast_free_pg); | 276 | call_rcu_bh(&p->rcu, br_multicast_free_pg); |
278 | 277 | ||
279 | if (!mp->ports && hlist_unhashed(&mp->mglist) && | 278 | if (!mp->ports && !mp->mglist && |
280 | netif_running(br->dev)) | 279 | netif_running(br->dev)) |
281 | mod_timer(&mp->timer, jiffies); | 280 | mod_timer(&mp->timer, jiffies); |
282 | 281 | ||
@@ -528,7 +527,7 @@ static void br_multicast_group_query_expired(unsigned long data) | |||
528 | struct net_bridge *br = mp->br; | 527 | struct net_bridge *br = mp->br; |
529 | 528 | ||
530 | spin_lock(&br->multicast_lock); | 529 | spin_lock(&br->multicast_lock); |
531 | if (!netif_running(br->dev) || hlist_unhashed(&mp->mglist) || | 530 | if (!netif_running(br->dev) || !mp->mglist || |
532 | mp->queries_sent >= br->multicast_last_member_count) | 531 | mp->queries_sent >= br->multicast_last_member_count) |
533 | goto out; | 532 | goto out; |
534 | 533 | ||
@@ -719,7 +718,7 @@ static int br_multicast_add_group(struct net_bridge *br, | |||
719 | goto err; | 718 | goto err; |
720 | 719 | ||
721 | if (!port) { | 720 | if (!port) { |
722 | hlist_add_head(&mp->mglist, &br->mglist); | 721 | mp->mglist = true; |
723 | mod_timer(&mp->timer, now + br->multicast_membership_interval); | 722 | mod_timer(&mp->timer, now + br->multicast_membership_interval); |
724 | goto out; | 723 | goto out; |
725 | } | 724 | } |
@@ -1165,7 +1164,7 @@ static int br_ip4_multicast_query(struct net_bridge *br, | |||
1165 | 1164 | ||
1166 | max_delay *= br->multicast_last_member_count; | 1165 | max_delay *= br->multicast_last_member_count; |
1167 | 1166 | ||
1168 | if (!hlist_unhashed(&mp->mglist) && | 1167 | if (mp->mglist && |
1169 | (timer_pending(&mp->timer) ? | 1168 | (timer_pending(&mp->timer) ? |
1170 | time_after(mp->timer.expires, now + max_delay) : | 1169 | time_after(mp->timer.expires, now + max_delay) : |
1171 | try_to_del_timer_sync(&mp->timer) >= 0)) | 1170 | try_to_del_timer_sync(&mp->timer) >= 0)) |
@@ -1177,7 +1176,7 @@ static int br_ip4_multicast_query(struct net_bridge *br, | |||
1177 | if (timer_pending(&p->timer) ? | 1176 | if (timer_pending(&p->timer) ? |
1178 | time_after(p->timer.expires, now + max_delay) : | 1177 | time_after(p->timer.expires, now + max_delay) : |
1179 | try_to_del_timer_sync(&p->timer) >= 0) | 1178 | try_to_del_timer_sync(&p->timer) >= 0) |
1180 | mod_timer(&mp->timer, now + max_delay); | 1179 | mod_timer(&p->timer, now + max_delay); |
1181 | } | 1180 | } |
1182 | 1181 | ||
1183 | out: | 1182 | out: |
@@ -1236,7 +1235,7 @@ static int br_ip6_multicast_query(struct net_bridge *br, | |||
1236 | goto out; | 1235 | goto out; |
1237 | 1236 | ||
1238 | max_delay *= br->multicast_last_member_count; | 1237 | max_delay *= br->multicast_last_member_count; |
1239 | if (!hlist_unhashed(&mp->mglist) && | 1238 | if (mp->mglist && |
1240 | (timer_pending(&mp->timer) ? | 1239 | (timer_pending(&mp->timer) ? |
1241 | time_after(mp->timer.expires, now + max_delay) : | 1240 | time_after(mp->timer.expires, now + max_delay) : |
1242 | try_to_del_timer_sync(&mp->timer) >= 0)) | 1241 | try_to_del_timer_sync(&mp->timer) >= 0)) |
@@ -1248,7 +1247,7 @@ static int br_ip6_multicast_query(struct net_bridge *br, | |||
1248 | if (timer_pending(&p->timer) ? | 1247 | if (timer_pending(&p->timer) ? |
1249 | time_after(p->timer.expires, now + max_delay) : | 1248 | time_after(p->timer.expires, now + max_delay) : |
1250 | try_to_del_timer_sync(&p->timer) >= 0) | 1249 | try_to_del_timer_sync(&p->timer) >= 0) |
1251 | mod_timer(&mp->timer, now + max_delay); | 1250 | mod_timer(&p->timer, now + max_delay); |
1252 | } | 1251 | } |
1253 | 1252 | ||
1254 | out: | 1253 | out: |
@@ -1283,7 +1282,7 @@ static void br_multicast_leave_group(struct net_bridge *br, | |||
1283 | br->multicast_last_member_interval; | 1282 | br->multicast_last_member_interval; |
1284 | 1283 | ||
1285 | if (!port) { | 1284 | if (!port) { |
1286 | if (!hlist_unhashed(&mp->mglist) && | 1285 | if (mp->mglist && |
1287 | (timer_pending(&mp->timer) ? | 1286 | (timer_pending(&mp->timer) ? |
1288 | time_after(mp->timer.expires, time) : | 1287 | time_after(mp->timer.expires, time) : |
1289 | try_to_del_timer_sync(&mp->timer) >= 0)) { | 1288 | try_to_del_timer_sync(&mp->timer) >= 0)) { |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 84aac7734bfc..4e1b620b6be6 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -84,13 +84,13 @@ struct net_bridge_port_group { | |||
84 | struct net_bridge_mdb_entry | 84 | struct net_bridge_mdb_entry |
85 | { | 85 | { |
86 | struct hlist_node hlist[2]; | 86 | struct hlist_node hlist[2]; |
87 | struct hlist_node mglist; | ||
88 | struct net_bridge *br; | 87 | struct net_bridge *br; |
89 | struct net_bridge_port_group __rcu *ports; | 88 | struct net_bridge_port_group __rcu *ports; |
90 | struct rcu_head rcu; | 89 | struct rcu_head rcu; |
91 | struct timer_list timer; | 90 | struct timer_list timer; |
92 | struct timer_list query_timer; | 91 | struct timer_list query_timer; |
93 | struct br_ip addr; | 92 | struct br_ip addr; |
93 | bool mglist; | ||
94 | u32 queries_sent; | 94 | u32 queries_sent; |
95 | }; | 95 | }; |
96 | 96 | ||
@@ -238,7 +238,6 @@ struct net_bridge | |||
238 | spinlock_t multicast_lock; | 238 | spinlock_t multicast_lock; |
239 | struct net_bridge_mdb_htable __rcu *mdb; | 239 | struct net_bridge_mdb_htable __rcu *mdb; |
240 | struct hlist_head router_list; | 240 | struct hlist_head router_list; |
241 | struct hlist_head mglist; | ||
242 | 241 | ||
243 | struct timer_list multicast_router_timer; | 242 | struct timer_list multicast_router_timer; |
244 | struct timer_list multicast_querier_timer; | 243 | struct timer_list multicast_querier_timer; |
diff --git a/net/core/dev.c b/net/core/dev.c index 8e726cb47ed7..8ae6631abcc2 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1280,10 +1280,13 @@ static int __dev_close_many(struct list_head *head) | |||
1280 | 1280 | ||
1281 | static int __dev_close(struct net_device *dev) | 1281 | static int __dev_close(struct net_device *dev) |
1282 | { | 1282 | { |
1283 | int retval; | ||
1283 | LIST_HEAD(single); | 1284 | LIST_HEAD(single); |
1284 | 1285 | ||
1285 | list_add(&dev->unreg_list, &single); | 1286 | list_add(&dev->unreg_list, &single); |
1286 | return __dev_close_many(&single); | 1287 | retval = __dev_close_many(&single); |
1288 | list_del(&single); | ||
1289 | return retval; | ||
1287 | } | 1290 | } |
1288 | 1291 | ||
1289 | int dev_close_many(struct list_head *head) | 1292 | int dev_close_many(struct list_head *head) |
@@ -1325,7 +1328,7 @@ int dev_close(struct net_device *dev) | |||
1325 | 1328 | ||
1326 | list_add(&dev->unreg_list, &single); | 1329 | list_add(&dev->unreg_list, &single); |
1327 | dev_close_many(&single); | 1330 | dev_close_many(&single); |
1328 | 1331 | list_del(&single); | |
1329 | return 0; | 1332 | return 0; |
1330 | } | 1333 | } |
1331 | EXPORT_SYMBOL(dev_close); | 1334 | EXPORT_SYMBOL(dev_close); |
@@ -5063,6 +5066,7 @@ static void rollback_registered(struct net_device *dev) | |||
5063 | 5066 | ||
5064 | list_add(&dev->unreg_list, &single); | 5067 | list_add(&dev->unreg_list, &single); |
5065 | rollback_registered_many(&single); | 5068 | rollback_registered_many(&single); |
5069 | list_del(&single); | ||
5066 | } | 5070 | } |
5067 | 5071 | ||
5068 | unsigned long netdev_fix_features(unsigned long features, const char *name) | 5072 | unsigned long netdev_fix_features(unsigned long features, const char *name) |
@@ -6216,6 +6220,7 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list) | |||
6216 | } | 6220 | } |
6217 | } | 6221 | } |
6218 | unregister_netdevice_many(&dev_kill_list); | 6222 | unregister_netdevice_many(&dev_kill_list); |
6223 | list_del(&dev_kill_list); | ||
6219 | rtnl_unlock(); | 6224 | rtnl_unlock(); |
6220 | } | 6225 | } |
6221 | 6226 | ||
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index 6b03f561caec..d5074a567289 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c | |||
@@ -626,6 +626,9 @@ static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb, | |||
626 | dcb->cmd = DCB_CMD_GAPP; | 626 | dcb->cmd = DCB_CMD_GAPP; |
627 | 627 | ||
628 | app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP); | 628 | app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP); |
629 | if (!app_nest) | ||
630 | goto out_cancel; | ||
631 | |||
629 | ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype); | 632 | ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype); |
630 | if (ret) | 633 | if (ret) |
631 | goto out_cancel; | 634 | goto out_cancel; |
@@ -1613,6 +1616,10 @@ EXPORT_SYMBOL(dcb_getapp); | |||
1613 | u8 dcb_setapp(struct net_device *dev, struct dcb_app *new) | 1616 | u8 dcb_setapp(struct net_device *dev, struct dcb_app *new) |
1614 | { | 1617 | { |
1615 | struct dcb_app_type *itr; | 1618 | struct dcb_app_type *itr; |
1619 | struct dcb_app_type event; | ||
1620 | |||
1621 | memcpy(&event.name, dev->name, sizeof(event.name)); | ||
1622 | memcpy(&event.app, new, sizeof(event.app)); | ||
1616 | 1623 | ||
1617 | spin_lock(&dcb_lock); | 1624 | spin_lock(&dcb_lock); |
1618 | /* Search for existing match and replace */ | 1625 | /* Search for existing match and replace */ |
@@ -1644,7 +1651,7 @@ u8 dcb_setapp(struct net_device *dev, struct dcb_app *new) | |||
1644 | } | 1651 | } |
1645 | out: | 1652 | out: |
1646 | spin_unlock(&dcb_lock); | 1653 | spin_unlock(&dcb_lock); |
1647 | call_dcbevent_notifiers(DCB_APP_EVENT, new); | 1654 | call_dcbevent_notifiers(DCB_APP_EVENT, &event); |
1648 | return 0; | 1655 | return 0; |
1649 | } | 1656 | } |
1650 | EXPORT_SYMBOL(dcb_setapp); | 1657 | EXPORT_SYMBOL(dcb_setapp); |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 748cb5b337bd..df4616fce929 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -1030,6 +1030,21 @@ static inline bool inetdev_valid_mtu(unsigned mtu) | |||
1030 | return mtu >= 68; | 1030 | return mtu >= 68; |
1031 | } | 1031 | } |
1032 | 1032 | ||
1033 | static void inetdev_send_gratuitous_arp(struct net_device *dev, | ||
1034 | struct in_device *in_dev) | ||
1035 | |||
1036 | { | ||
1037 | struct in_ifaddr *ifa = in_dev->ifa_list; | ||
1038 | |||
1039 | if (!ifa) | ||
1040 | return; | ||
1041 | |||
1042 | arp_send(ARPOP_REQUEST, ETH_P_ARP, | ||
1043 | ifa->ifa_address, dev, | ||
1044 | ifa->ifa_address, NULL, | ||
1045 | dev->dev_addr, NULL); | ||
1046 | } | ||
1047 | |||
1033 | /* Called only under RTNL semaphore */ | 1048 | /* Called only under RTNL semaphore */ |
1034 | 1049 | ||
1035 | static int inetdev_event(struct notifier_block *this, unsigned long event, | 1050 | static int inetdev_event(struct notifier_block *this, unsigned long event, |
@@ -1082,18 +1097,13 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, | |||
1082 | } | 1097 | } |
1083 | ip_mc_up(in_dev); | 1098 | ip_mc_up(in_dev); |
1084 | /* fall through */ | 1099 | /* fall through */ |
1085 | case NETDEV_NOTIFY_PEERS: | ||
1086 | case NETDEV_CHANGEADDR: | 1100 | case NETDEV_CHANGEADDR: |
1101 | if (!IN_DEV_ARP_NOTIFY(in_dev)) | ||
1102 | break; | ||
1103 | /* fall through */ | ||
1104 | case NETDEV_NOTIFY_PEERS: | ||
1087 | /* Send gratuitous ARP to notify of link change */ | 1105 | /* Send gratuitous ARP to notify of link change */ |
1088 | if (IN_DEV_ARP_NOTIFY(in_dev)) { | 1106 | inetdev_send_gratuitous_arp(dev, in_dev); |
1089 | struct in_ifaddr *ifa = in_dev->ifa_list; | ||
1090 | |||
1091 | if (ifa) | ||
1092 | arp_send(ARPOP_REQUEST, ETH_P_ARP, | ||
1093 | ifa->ifa_address, dev, | ||
1094 | ifa->ifa_address, NULL, | ||
1095 | dev->dev_addr, NULL); | ||
1096 | } | ||
1097 | break; | 1107 | break; |
1098 | case NETDEV_DOWN: | 1108 | case NETDEV_DOWN: |
1099 | ip_mc_down(in_dev); | 1109 | ip_mc_down(in_dev); |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index eb68a0e34e49..6613edfac28c 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -775,6 +775,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
775 | .fl4_dst = dst, | 775 | .fl4_dst = dst, |
776 | .fl4_src = tiph->saddr, | 776 | .fl4_src = tiph->saddr, |
777 | .fl4_tos = RT_TOS(tos), | 777 | .fl4_tos = RT_TOS(tos), |
778 | .proto = IPPROTO_GRE, | ||
778 | .fl_gre_key = tunnel->parms.o_key | 779 | .fl_gre_key = tunnel->parms.o_key |
779 | }; | 780 | }; |
780 | if (ip_route_output_key(dev_net(dev), &rt, &fl)) { | 781 | if (ip_route_output_key(dev_net(dev), &rt, &fl)) { |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 788a3e74834e..6ed6603c2f6d 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -2722,6 +2722,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = { | |||
2722 | .destroy = ipv4_dst_destroy, | 2722 | .destroy = ipv4_dst_destroy, |
2723 | .check = ipv4_blackhole_dst_check, | 2723 | .check = ipv4_blackhole_dst_check, |
2724 | .default_mtu = ipv4_blackhole_default_mtu, | 2724 | .default_mtu = ipv4_blackhole_default_mtu, |
2725 | .default_advmss = ipv4_default_advmss, | ||
2725 | .update_pmtu = ipv4_rt_blackhole_update_pmtu, | 2726 | .update_pmtu = ipv4_rt_blackhole_update_pmtu, |
2726 | }; | 2727 | }; |
2727 | 2728 | ||
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 1c29f95695de..a998db6e7895 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -128,6 +128,7 @@ static struct dst_ops ip6_dst_blackhole_ops = { | |||
128 | .destroy = ip6_dst_destroy, | 128 | .destroy = ip6_dst_destroy, |
129 | .check = ip6_dst_check, | 129 | .check = ip6_dst_check, |
130 | .default_mtu = ip6_blackhole_default_mtu, | 130 | .default_mtu = ip6_blackhole_default_mtu, |
131 | .default_advmss = ip6_default_advmss, | ||
131 | .update_pmtu = ip6_rt_blackhole_update_pmtu, | 132 | .update_pmtu = ip6_rt_blackhole_update_pmtu, |
132 | }; | 133 | }; |
133 | 134 | ||
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index cf68700abffa..d036597aabbe 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -1210,7 +1210,9 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
1210 | switch (sdata->vif.type) { | 1210 | switch (sdata->vif.type) { |
1211 | case NL80211_IFTYPE_STATION: | 1211 | case NL80211_IFTYPE_STATION: |
1212 | changed |= BSS_CHANGED_ASSOC; | 1212 | changed |= BSS_CHANGED_ASSOC; |
1213 | mutex_lock(&sdata->u.mgd.mtx); | ||
1213 | ieee80211_bss_info_change_notify(sdata, changed); | 1214 | ieee80211_bss_info_change_notify(sdata, changed); |
1215 | mutex_unlock(&sdata->u.mgd.mtx); | ||
1214 | break; | 1216 | break; |
1215 | case NL80211_IFTYPE_ADHOC: | 1217 | case NL80211_IFTYPE_ADHOC: |
1216 | changed |= BSS_CHANGED_IBSS; | 1218 | changed |= BSS_CHANGED_IBSS; |
diff --git a/net/netfilter/core.c b/net/netfilter/core.c index 32fcbe290c04..4aa614b8a96a 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c | |||
@@ -133,6 +133,7 @@ unsigned int nf_iterate(struct list_head *head, | |||
133 | 133 | ||
134 | /* Optimization: we don't need to hold module | 134 | /* Optimization: we don't need to hold module |
135 | reference here, since function can't sleep. --RR */ | 135 | reference here, since function can't sleep. --RR */ |
136 | repeat: | ||
136 | verdict = elem->hook(hook, skb, indev, outdev, okfn); | 137 | verdict = elem->hook(hook, skb, indev, outdev, okfn); |
137 | if (verdict != NF_ACCEPT) { | 138 | if (verdict != NF_ACCEPT) { |
138 | #ifdef CONFIG_NETFILTER_DEBUG | 139 | #ifdef CONFIG_NETFILTER_DEBUG |
@@ -145,7 +146,7 @@ unsigned int nf_iterate(struct list_head *head, | |||
145 | #endif | 146 | #endif |
146 | if (verdict != NF_REPEAT) | 147 | if (verdict != NF_REPEAT) |
147 | return verdict; | 148 | return verdict; |
148 | *i = (*i)->prev; | 149 | goto repeat; |
149 | } | 150 | } |
150 | } | 151 | } |
151 | return NF_ACCEPT; | 152 | return NF_ACCEPT; |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 8b3ef404c794..6459588befc3 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -1340,10 +1340,13 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) | |||
1340 | default: | 1340 | default: |
1341 | BUG(); | 1341 | BUG(); |
1342 | } | 1342 | } |
1343 | xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS); | 1343 | xdst = dst_alloc(dst_ops); |
1344 | xfrm_policy_put_afinfo(afinfo); | 1344 | xfrm_policy_put_afinfo(afinfo); |
1345 | 1345 | ||
1346 | xdst->flo.ops = &xfrm_bundle_fc_ops; | 1346 | if (likely(xdst)) |
1347 | xdst->flo.ops = &xfrm_bundle_fc_ops; | ||
1348 | else | ||
1349 | xdst = ERR_PTR(-ENOBUFS); | ||
1347 | 1350 | ||
1348 | return xdst; | 1351 | return xdst; |
1349 | } | 1352 | } |
diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c index 23f49f356e0f..16c0bdfbb164 100644 --- a/sound/pci/au88x0/au88x0_core.c +++ b/sound/pci/au88x0/au88x0_core.c | |||
@@ -1252,11 +1252,19 @@ static void vortex_adbdma_resetup(vortex_t *vortex, int adbdma) { | |||
1252 | static int inline vortex_adbdma_getlinearpos(vortex_t * vortex, int adbdma) | 1252 | static int inline vortex_adbdma_getlinearpos(vortex_t * vortex, int adbdma) |
1253 | { | 1253 | { |
1254 | stream_t *dma = &vortex->dma_adb[adbdma]; | 1254 | stream_t *dma = &vortex->dma_adb[adbdma]; |
1255 | int temp; | 1255 | int temp, page, delta; |
1256 | 1256 | ||
1257 | temp = hwread(vortex->mmio, VORTEX_ADBDMA_STAT + (adbdma << 2)); | 1257 | temp = hwread(vortex->mmio, VORTEX_ADBDMA_STAT + (adbdma << 2)); |
1258 | temp = (dma->period_virt * dma->period_bytes) + (temp & (dma->period_bytes - 1)); | 1258 | page = (temp & ADB_SUBBUF_MASK) >> ADB_SUBBUF_SHIFT; |
1259 | return temp; | 1259 | if (dma->nr_periods >= 4) |
1260 | delta = (page - dma->period_real) & 3; | ||
1261 | else { | ||
1262 | delta = (page - dma->period_real); | ||
1263 | if (delta < 0) | ||
1264 | delta += dma->nr_periods; | ||
1265 | } | ||
1266 | return (dma->period_virt + delta) * dma->period_bytes | ||
1267 | + (temp & (dma->period_bytes - 1)); | ||
1260 | } | 1268 | } |
1261 | 1269 | ||
1262 | static void vortex_adbdma_startfifo(vortex_t * vortex, int adbdma) | 1270 | static void vortex_adbdma_startfifo(vortex_t * vortex, int adbdma) |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 0baffcdee8f9..fcedad9a5fef 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -2308,6 +2308,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = { | |||
2308 | SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB), | 2308 | SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB), |
2309 | SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB), | 2309 | SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB), |
2310 | SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB), | 2310 | SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB), |
2311 | SND_PCI_QUIRK(0x1043, 0x8410, "ASUS", POS_FIX_LPIB), | ||
2311 | SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB), | 2312 | SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB), |
2312 | SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB), | 2313 | SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB), |
2313 | SND_PCI_QUIRK(0x1179, 0xff10, "Toshiba A100-259", POS_FIX_LPIB), | 2314 | SND_PCI_QUIRK(0x1179, 0xff10, "Toshiba A100-259", POS_FIX_LPIB), |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index fbe97d32140d..dd7c5c12225d 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
@@ -3410,7 +3410,7 @@ static void cx_auto_parse_output(struct hda_codec *codec) | |||
3410 | } | 3410 | } |
3411 | } | 3411 | } |
3412 | spec->multiout.dac_nids = spec->private_dac_nids; | 3412 | spec->multiout.dac_nids = spec->private_dac_nids; |
3413 | spec->multiout.max_channels = nums * 2; | 3413 | spec->multiout.max_channels = spec->multiout.num_dacs * 2; |
3414 | 3414 | ||
3415 | if (cfg->hp_outs > 0) | 3415 | if (cfg->hp_outs > 0) |
3416 | spec->auto_mute = 1; | 3416 | spec->auto_mute = 1; |
@@ -3729,9 +3729,9 @@ static int cx_auto_init(struct hda_codec *codec) | |||
3729 | return 0; | 3729 | return 0; |
3730 | } | 3730 | } |
3731 | 3731 | ||
3732 | static int cx_auto_add_volume(struct hda_codec *codec, const char *basename, | 3732 | static int cx_auto_add_volume_idx(struct hda_codec *codec, const char *basename, |
3733 | const char *dir, int cidx, | 3733 | const char *dir, int cidx, |
3734 | hda_nid_t nid, int hda_dir) | 3734 | hda_nid_t nid, int hda_dir, int amp_idx) |
3735 | { | 3735 | { |
3736 | static char name[32]; | 3736 | static char name[32]; |
3737 | static struct snd_kcontrol_new knew[] = { | 3737 | static struct snd_kcontrol_new knew[] = { |
@@ -3743,7 +3743,8 @@ static int cx_auto_add_volume(struct hda_codec *codec, const char *basename, | |||
3743 | 3743 | ||
3744 | for (i = 0; i < 2; i++) { | 3744 | for (i = 0; i < 2; i++) { |
3745 | struct snd_kcontrol *kctl; | 3745 | struct snd_kcontrol *kctl; |
3746 | knew[i].private_value = HDA_COMPOSE_AMP_VAL(nid, 3, 0, hda_dir); | 3746 | knew[i].private_value = HDA_COMPOSE_AMP_VAL(nid, 3, amp_idx, |
3747 | hda_dir); | ||
3747 | knew[i].subdevice = HDA_SUBDEV_AMP_FLAG; | 3748 | knew[i].subdevice = HDA_SUBDEV_AMP_FLAG; |
3748 | knew[i].index = cidx; | 3749 | knew[i].index = cidx; |
3749 | snprintf(name, sizeof(name), "%s%s %s", basename, dir, sfx[i]); | 3750 | snprintf(name, sizeof(name), "%s%s %s", basename, dir, sfx[i]); |
@@ -3759,6 +3760,9 @@ static int cx_auto_add_volume(struct hda_codec *codec, const char *basename, | |||
3759 | return 0; | 3760 | return 0; |
3760 | } | 3761 | } |
3761 | 3762 | ||
3763 | #define cx_auto_add_volume(codec, str, dir, cidx, nid, hda_dir) \ | ||
3764 | cx_auto_add_volume_idx(codec, str, dir, cidx, nid, hda_dir, 0) | ||
3765 | |||
3762 | #define cx_auto_add_pb_volume(codec, nid, str, idx) \ | 3766 | #define cx_auto_add_pb_volume(codec, nid, str, idx) \ |
3763 | cx_auto_add_volume(codec, str, " Playback", idx, nid, HDA_OUTPUT) | 3767 | cx_auto_add_volume(codec, str, " Playback", idx, nid, HDA_OUTPUT) |
3764 | 3768 | ||
@@ -3808,29 +3812,60 @@ static int cx_auto_build_input_controls(struct hda_codec *codec) | |||
3808 | struct conexant_spec *spec = codec->spec; | 3812 | struct conexant_spec *spec = codec->spec; |
3809 | struct auto_pin_cfg *cfg = &spec->autocfg; | 3813 | struct auto_pin_cfg *cfg = &spec->autocfg; |
3810 | static const char *prev_label; | 3814 | static const char *prev_label; |
3811 | int i, err, cidx; | 3815 | int i, err, cidx, conn_len; |
3816 | hda_nid_t conn[HDA_MAX_CONNECTIONS]; | ||
3817 | |||
3818 | int multi_adc_volume = 0; /* If the ADC nid has several input volumes */ | ||
3819 | int adc_nid = spec->adc_nids[0]; | ||
3820 | |||
3821 | conn_len = snd_hda_get_connections(codec, adc_nid, conn, | ||
3822 | HDA_MAX_CONNECTIONS); | ||
3823 | if (conn_len < 0) | ||
3824 | return conn_len; | ||
3825 | |||
3826 | multi_adc_volume = cfg->num_inputs > 1 && conn_len > 1; | ||
3827 | if (!multi_adc_volume) { | ||
3828 | err = cx_auto_add_volume(codec, "Capture", "", 0, adc_nid, | ||
3829 | HDA_INPUT); | ||
3830 | if (err < 0) | ||
3831 | return err; | ||
3832 | } | ||
3812 | 3833 | ||
3813 | err = cx_auto_add_volume(codec, "Capture", "", 0, spec->adc_nids[0], | ||
3814 | HDA_INPUT); | ||
3815 | if (err < 0) | ||
3816 | return err; | ||
3817 | prev_label = NULL; | 3834 | prev_label = NULL; |
3818 | cidx = 0; | 3835 | cidx = 0; |
3819 | for (i = 0; i < cfg->num_inputs; i++) { | 3836 | for (i = 0; i < cfg->num_inputs; i++) { |
3820 | hda_nid_t nid = cfg->inputs[i].pin; | 3837 | hda_nid_t nid = cfg->inputs[i].pin; |
3821 | const char *label; | 3838 | const char *label; |
3822 | if (!(get_wcaps(codec, nid) & AC_WCAP_IN_AMP)) | 3839 | int j; |
3840 | int pin_amp = get_wcaps(codec, nid) & AC_WCAP_IN_AMP; | ||
3841 | if (!pin_amp && !multi_adc_volume) | ||
3823 | continue; | 3842 | continue; |
3843 | |||
3824 | label = hda_get_autocfg_input_label(codec, cfg, i); | 3844 | label = hda_get_autocfg_input_label(codec, cfg, i); |
3825 | if (label == prev_label) | 3845 | if (label == prev_label) |
3826 | cidx++; | 3846 | cidx++; |
3827 | else | 3847 | else |
3828 | cidx = 0; | 3848 | cidx = 0; |
3829 | prev_label = label; | 3849 | prev_label = label; |
3830 | err = cx_auto_add_volume(codec, label, " Capture", cidx, | 3850 | |
3831 | nid, HDA_INPUT); | 3851 | if (pin_amp) { |
3832 | if (err < 0) | 3852 | err = cx_auto_add_volume(codec, label, " Boost", cidx, |
3833 | return err; | 3853 | nid, HDA_INPUT); |
3854 | if (err < 0) | ||
3855 | return err; | ||
3856 | } | ||
3857 | |||
3858 | if (!multi_adc_volume) | ||
3859 | continue; | ||
3860 | for (j = 0; j < conn_len; j++) { | ||
3861 | if (conn[j] == nid) { | ||
3862 | err = cx_auto_add_volume_idx(codec, label, | ||
3863 | " Capture", cidx, adc_nid, HDA_INPUT, j); | ||
3864 | if (err < 0) | ||
3865 | return err; | ||
3866 | break; | ||
3867 | } | ||
3868 | } | ||
3834 | } | 3869 | } |
3835 | return 0; | 3870 | return 0; |
3836 | } | 3871 | } |
diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c index 68b97477577b..66eabafb1c24 100644 --- a/sound/usb/caiaq/audio.c +++ b/sound/usb/caiaq/audio.c | |||
@@ -785,7 +785,7 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *dev) | |||
785 | } | 785 | } |
786 | 786 | ||
787 | dev->pcm->private_data = dev; | 787 | dev->pcm->private_data = dev; |
788 | strcpy(dev->pcm->name, dev->product_name); | 788 | strlcpy(dev->pcm->name, dev->product_name, sizeof(dev->pcm->name)); |
789 | 789 | ||
790 | memset(dev->sub_playback, 0, sizeof(dev->sub_playback)); | 790 | memset(dev->sub_playback, 0, sizeof(dev->sub_playback)); |
791 | memset(dev->sub_capture, 0, sizeof(dev->sub_capture)); | 791 | memset(dev->sub_capture, 0, sizeof(dev->sub_capture)); |
diff --git a/sound/usb/caiaq/midi.c b/sound/usb/caiaq/midi.c index 2f218c77fff2..a1a47088fd0c 100644 --- a/sound/usb/caiaq/midi.c +++ b/sound/usb/caiaq/midi.c | |||
@@ -136,7 +136,7 @@ int snd_usb_caiaq_midi_init(struct snd_usb_caiaqdev *device) | |||
136 | if (ret < 0) | 136 | if (ret < 0) |
137 | return ret; | 137 | return ret; |
138 | 138 | ||
139 | strcpy(rmidi->name, device->product_name); | 139 | strlcpy(rmidi->name, device->product_name, sizeof(rmidi->name)); |
140 | 140 | ||
141 | rmidi->info_flags = SNDRV_RAWMIDI_INFO_DUPLEX; | 141 | rmidi->info_flags = SNDRV_RAWMIDI_INFO_DUPLEX; |
142 | rmidi->private_data = device; | 142 | rmidi->private_data = device; |