diff options
177 files changed, 2018 insertions, 2619 deletions
diff --git a/Documentation/DocBook/filesystems.tmpl b/Documentation/DocBook/filesystems.tmpl index 5e87ad58c0b5..f51f28531b8d 100644 --- a/Documentation/DocBook/filesystems.tmpl +++ b/Documentation/DocBook/filesystems.tmpl | |||
@@ -82,6 +82,11 @@ | |||
82 | </sect1> | 82 | </sect1> |
83 | </chapter> | 83 | </chapter> |
84 | 84 | ||
85 | <chapter id="fs_events"> | ||
86 | <title>Events based on file descriptors</title> | ||
87 | !Efs/eventfd.c | ||
88 | </chapter> | ||
89 | |||
85 | <chapter id="sysfs"> | 90 | <chapter id="sysfs"> |
86 | <title>The Filesystem for Exporting Kernel Objects</title> | 91 | <title>The Filesystem for Exporting Kernel Objects</title> |
87 | !Efs/sysfs/file.c | 92 | !Efs/sysfs/file.c |
diff --git a/Documentation/hwmon/jc42 b/Documentation/hwmon/jc42 index 0e76ef12e4c6..a22ecf48f255 100644 --- a/Documentation/hwmon/jc42 +++ b/Documentation/hwmon/jc42 | |||
@@ -51,7 +51,8 @@ Supported chips: | |||
51 | * JEDEC JC 42.4 compliant temperature sensor chips | 51 | * JEDEC JC 42.4 compliant temperature sensor chips |
52 | Prefix: 'jc42' | 52 | Prefix: 'jc42' |
53 | Addresses scanned: I2C 0x18 - 0x1f | 53 | Addresses scanned: I2C 0x18 - 0x1f |
54 | Datasheet: - | 54 | Datasheet: |
55 | http://www.jedec.org/sites/default/files/docs/4_01_04R19.pdf | ||
55 | 56 | ||
56 | Author: | 57 | Author: |
57 | Guenter Roeck <guenter.roeck@ericsson.com> | 58 | Guenter Roeck <guenter.roeck@ericsson.com> |
@@ -60,7 +61,11 @@ Author: | |||
60 | Description | 61 | Description |
61 | ----------- | 62 | ----------- |
62 | 63 | ||
63 | This driver implements support for JEDEC JC 42.4 compliant temperature sensors. | 64 | This driver implements support for JEDEC JC 42.4 compliant temperature sensors, |
65 | which are used on many DDR3 memory modules for mobile devices and servers. Some | ||
66 | systems use the sensor to prevent memory overheating by automatically throttling | ||
67 | the memory controller. | ||
68 | |||
64 | The driver auto-detects the chips listed above, but can be manually instantiated | 69 | The driver auto-detects the chips listed above, but can be manually instantiated |
65 | to support other JC 42.4 compliant chips. | 70 | to support other JC 42.4 compliant chips. |
66 | 71 | ||
@@ -81,15 +86,19 @@ limits. The chip supports only a single register to configure the hysteresis, | |||
81 | which applies to all limits. This register can be written by writing into | 86 | which applies to all limits. This register can be written by writing into |
82 | temp1_crit_hyst. Other hysteresis attributes are read-only. | 87 | temp1_crit_hyst. Other hysteresis attributes are read-only. |
83 | 88 | ||
89 | If the BIOS has configured the sensor for automatic temperature management, it | ||
90 | is likely that it has locked the registers, i.e., that the temperature limits | ||
91 | cannot be changed. | ||
92 | |||
84 | Sysfs entries | 93 | Sysfs entries |
85 | ------------- | 94 | ------------- |
86 | 95 | ||
87 | temp1_input Temperature (RO) | 96 | temp1_input Temperature (RO) |
88 | temp1_min Minimum temperature (RW) | 97 | temp1_min Minimum temperature (RO or RW) |
89 | temp1_max Maximum temperature (RW) | 98 | temp1_max Maximum temperature (RO or RW) |
90 | temp1_crit Critical high temperature (RW) | 99 | temp1_crit Critical high temperature (RO or RW) |
91 | 100 | ||
92 | temp1_crit_hyst Critical hysteresis temperature (RW) | 101 | temp1_crit_hyst Critical hysteresis temperature (RO or RW) |
93 | temp1_max_hyst Maximum hysteresis temperature (RO) | 102 | temp1_max_hyst Maximum hysteresis temperature (RO) |
94 | 103 | ||
95 | temp1_min_alarm Temperature low alarm | 104 | temp1_min_alarm Temperature low alarm |
diff --git a/Documentation/hwmon/k10temp b/Documentation/hwmon/k10temp index 6526eee525a6..d2b56a4fd1f5 100644 --- a/Documentation/hwmon/k10temp +++ b/Documentation/hwmon/k10temp | |||
@@ -9,6 +9,8 @@ Supported chips: | |||
9 | Socket S1G3: Athlon II, Sempron, Turion II | 9 | Socket S1G3: Athlon II, Sempron, Turion II |
10 | * AMD Family 11h processors: | 10 | * AMD Family 11h processors: |
11 | Socket S1G2: Athlon (X2), Sempron (X2), Turion X2 (Ultra) | 11 | Socket S1G2: Athlon (X2), Sempron (X2), Turion X2 (Ultra) |
12 | * AMD Family 12h processors: "Llano" | ||
13 | * AMD Family 14h processors: "Brazos" (C/E/G-Series) | ||
12 | 14 | ||
13 | Prefix: 'k10temp' | 15 | Prefix: 'k10temp' |
14 | Addresses scanned: PCI space | 16 | Addresses scanned: PCI space |
@@ -17,10 +19,14 @@ Supported chips: | |||
17 | http://support.amd.com/us/Processor_TechDocs/31116.pdf | 19 | http://support.amd.com/us/Processor_TechDocs/31116.pdf |
18 | BIOS and Kernel Developer's Guide (BKDG) for AMD Family 11h Processors: | 20 | BIOS and Kernel Developer's Guide (BKDG) for AMD Family 11h Processors: |
19 | http://support.amd.com/us/Processor_TechDocs/41256.pdf | 21 | http://support.amd.com/us/Processor_TechDocs/41256.pdf |
22 | BIOS and Kernel Developer's Guide (BKDG) for AMD Family 14h Models 00h-0Fh Processors: | ||
23 | http://support.amd.com/us/Processor_TechDocs/43170.pdf | ||
20 | Revision Guide for AMD Family 10h Processors: | 24 | Revision Guide for AMD Family 10h Processors: |
21 | http://support.amd.com/us/Processor_TechDocs/41322.pdf | 25 | http://support.amd.com/us/Processor_TechDocs/41322.pdf |
22 | Revision Guide for AMD Family 11h Processors: | 26 | Revision Guide for AMD Family 11h Processors: |
23 | http://support.amd.com/us/Processor_TechDocs/41788.pdf | 27 | http://support.amd.com/us/Processor_TechDocs/41788.pdf |
28 | Revision Guide for AMD Family 14h Models 00h-0Fh Processors: | ||
29 | http://support.amd.com/us/Processor_TechDocs/47534.pdf | ||
24 | AMD Family 11h Processor Power and Thermal Data Sheet for Notebooks: | 30 | AMD Family 11h Processor Power and Thermal Data Sheet for Notebooks: |
25 | http://support.amd.com/us/Processor_TechDocs/43373.pdf | 31 | http://support.amd.com/us/Processor_TechDocs/43373.pdf |
26 | AMD Family 10h Server and Workstation Processor Power and Thermal Data Sheet: | 32 | AMD Family 10h Server and Workstation Processor Power and Thermal Data Sheet: |
@@ -34,7 +40,7 @@ Description | |||
34 | ----------- | 40 | ----------- |
35 | 41 | ||
36 | This driver permits reading of the internal temperature sensor of AMD | 42 | This driver permits reading of the internal temperature sensor of AMD |
37 | Family 10h and 11h processors. | 43 | Family 10h/11h/12h/14h processors. |
38 | 44 | ||
39 | All these processors have a sensor, but on those for Socket F or AM2+, | 45 | All these processors have a sensor, but on those for Socket F or AM2+, |
40 | the sensor may return inconsistent values (erratum 319). The driver | 46 | the sensor may return inconsistent values (erratum 319). The driver |
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 89835a4766a6..f4a04c0c7edc 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt | |||
@@ -144,6 +144,11 @@ a fixed number of characters. This limit depends on the architecture | |||
144 | and is between 256 and 4096 characters. It is defined in the file | 144 | and is between 256 and 4096 characters. It is defined in the file |
145 | ./include/asm/setup.h as COMMAND_LINE_SIZE. | 145 | ./include/asm/setup.h as COMMAND_LINE_SIZE. |
146 | 146 | ||
147 | Finally, the [KMG] suffix is commonly described after a number of kernel | ||
148 | parameter values. These 'K', 'M', and 'G' letters represent the _binary_ | ||
149 | multipliers 'Kilo', 'Mega', and 'Giga', equalling 2^10, 2^20, and 2^30 | ||
150 | bytes respectively. Such letter suffixes can also be entirely omitted. | ||
151 | |||
147 | 152 | ||
148 | acpi= [HW,ACPI,X86] | 153 | acpi= [HW,ACPI,X86] |
149 | Advanced Configuration and Power Interface | 154 | Advanced Configuration and Power Interface |
@@ -545,16 +550,20 @@ and is between 256 and 4096 characters. It is defined in the file | |||
545 | Format: | 550 | Format: |
546 | <first_slot>,<last_slot>,<port>,<enum_bit>[,<debug>] | 551 | <first_slot>,<last_slot>,<port>,<enum_bit>[,<debug>] |
547 | 552 | ||
548 | crashkernel=nn[KMG]@ss[KMG] | 553 | crashkernel=size[KMG][@offset[KMG]] |
549 | [KNL] Reserve a chunk of physical memory to | 554 | [KNL] Using kexec, Linux can switch to a 'crash kernel' |
550 | hold a kernel to switch to with kexec on panic. | 555 | upon panic. This parameter reserves the physical |
556 | memory region [offset, offset + size] for that kernel | ||
557 | image. If '@offset' is omitted, then a suitable offset | ||
558 | is selected automatically. Check | ||
559 | Documentation/kdump/kdump.txt for further details. | ||
551 | 560 | ||
552 | crashkernel=range1:size1[,range2:size2,...][@offset] | 561 | crashkernel=range1:size1[,range2:size2,...][@offset] |
553 | [KNL] Same as above, but depends on the memory | 562 | [KNL] Same as above, but depends on the memory |
554 | in the running system. The syntax of range is | 563 | in the running system. The syntax of range is |
555 | start-[end] where start and end are both | 564 | start-[end] where start and end are both |
556 | a memory unit (amount[KMG]). See also | 565 | a memory unit (amount[KMG]). See also |
557 | Documentation/kdump/kdump.txt for a example. | 566 | Documentation/kdump/kdump.txt for an example. |
558 | 567 | ||
559 | cs89x0_dma= [HW,NET] | 568 | cs89x0_dma= [HW,NET] |
560 | Format: <dma> | 569 | Format: <dma> |
@@ -1262,10 +1271,9 @@ and is between 256 and 4096 characters. It is defined in the file | |||
1262 | 6 (KERN_INFO) informational | 1271 | 6 (KERN_INFO) informational |
1263 | 7 (KERN_DEBUG) debug-level messages | 1272 | 7 (KERN_DEBUG) debug-level messages |
1264 | 1273 | ||
1265 | log_buf_len=n Sets the size of the printk ring buffer, in bytes. | 1274 | log_buf_len=n[KMG] Sets the size of the printk ring buffer, |
1266 | Format: { n | nk | nM } | 1275 | in bytes. n must be a power of two. The default |
1267 | n must be a power of two. The default size | 1276 | size is set in the kernel config file. |
1268 | is set in the kernel config file. | ||
1269 | 1277 | ||
1270 | logo.nologo [FB] Disables display of the built-in Linux logo. | 1278 | logo.nologo [FB] Disables display of the built-in Linux logo. |
1271 | This may be used to provide more screen space for | 1279 | This may be used to provide more screen space for |
diff --git a/Documentation/networking/Makefile b/Documentation/networking/Makefile index 5aba7a33aeeb..24c308dd3fd1 100644 --- a/Documentation/networking/Makefile +++ b/Documentation/networking/Makefile | |||
@@ -4,6 +4,8 @@ obj- := dummy.o | |||
4 | # List of programs to build | 4 | # List of programs to build |
5 | hostprogs-y := ifenslave | 5 | hostprogs-y := ifenslave |
6 | 6 | ||
7 | HOSTCFLAGS_ifenslave.o += -I$(objtree)/usr/include | ||
8 | |||
7 | # Tell kbuild to always build the programs | 9 | # Tell kbuild to always build the programs |
8 | always := $(hostprogs-y) | 10 | always := $(hostprogs-y) |
9 | 11 | ||
diff --git a/Documentation/workqueue.txt b/Documentation/workqueue.txt index 996a27d9b8db..01c513fac40e 100644 --- a/Documentation/workqueue.txt +++ b/Documentation/workqueue.txt | |||
@@ -190,9 +190,9 @@ resources, scheduled and executed. | |||
190 | * Long running CPU intensive workloads which can be better | 190 | * Long running CPU intensive workloads which can be better |
191 | managed by the system scheduler. | 191 | managed by the system scheduler. |
192 | 192 | ||
193 | WQ_FREEZEABLE | 193 | WQ_FREEZABLE |
194 | 194 | ||
195 | A freezeable wq participates in the freeze phase of the system | 195 | A freezable wq participates in the freeze phase of the system |
196 | suspend operations. Work items on the wq are drained and no | 196 | suspend operations. Work items on the wq are drained and no |
197 | new work item starts execution until thawed. | 197 | new work item starts execution until thawed. |
198 | 198 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 5dd6c751e6a6..6f99e1260db8 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -885,7 +885,7 @@ S: Supported | |||
885 | 885 | ||
886 | ARM/QUALCOMM MSM MACHINE SUPPORT | 886 | ARM/QUALCOMM MSM MACHINE SUPPORT |
887 | M: David Brown <davidb@codeaurora.org> | 887 | M: David Brown <davidb@codeaurora.org> |
888 | M: Daniel Walker <dwalker@codeaurora.org> | 888 | M: Daniel Walker <dwalker@fifo99.com> |
889 | M: Bryan Huntsman <bryanh@codeaurora.org> | 889 | M: Bryan Huntsman <bryanh@codeaurora.org> |
890 | L: linux-arm-msm@vger.kernel.org | 890 | L: linux-arm-msm@vger.kernel.org |
891 | F: arch/arm/mach-msm/ | 891 | F: arch/arm/mach-msm/ |
@@ -2873,7 +2873,6 @@ M: Guenter Roeck <guenter.roeck@ericsson.com> | |||
2873 | L: lm-sensors@lm-sensors.org | 2873 | L: lm-sensors@lm-sensors.org |
2874 | W: http://www.lm-sensors.org/ | 2874 | W: http://www.lm-sensors.org/ |
2875 | T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/ | 2875 | T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/ |
2876 | T: quilt kernel.org/pub/linux/kernel/people/groeck/linux-staging/ | ||
2877 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git | 2876 | T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git |
2878 | S: Maintained | 2877 | S: Maintained |
2879 | F: Documentation/hwmon/ | 2878 | F: Documentation/hwmon/ |
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 2 | 1 | VERSION = 2 |
2 | PATCHLEVEL = 6 | 2 | PATCHLEVEL = 6 |
3 | SUBLEVEL = 38 | 3 | SUBLEVEL = 38 |
4 | EXTRAVERSION = -rc5 | 4 | EXTRAVERSION = -rc6 |
5 | NAME = Flesh-Eating Bats with Fangs | 5 | NAME = Flesh-Eating Bats with Fangs |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 26d45e5b636b..166efa2a19cd 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
@@ -1177,6 +1177,31 @@ config ARM_ERRATA_743622 | |||
1177 | visible impact on the overall performance or power consumption of the | 1177 | visible impact on the overall performance or power consumption of the |
1178 | processor. | 1178 | processor. |
1179 | 1179 | ||
1180 | config ARM_ERRATA_751472 | ||
1181 | bool "ARM errata: Interrupted ICIALLUIS may prevent completion of broadcasted operation" | ||
1182 | depends on CPU_V7 && SMP | ||
1183 | help | ||
1184 | This option enables the workaround for the 751472 Cortex-A9 (prior | ||
1185 | to r3p0) erratum. An interrupted ICIALLUIS operation may prevent the | ||
1186 | completion of a following broadcasted operation if the second | ||
1187 | operation is received by a CPU before the ICIALLUIS has completed, | ||
1188 | potentially leading to corrupted entries in the cache or TLB. | ||
1189 | |||
1190 | config ARM_ERRATA_753970 | ||
1191 | bool "ARM errata: cache sync operation may be faulty" | ||
1192 | depends on CACHE_PL310 | ||
1193 | help | ||
1194 | This option enables the workaround for the 753970 PL310 (r3p0) erratum. | ||
1195 | |||
1196 | Under some condition the effect of cache sync operation on | ||
1197 | the store buffer still remains when the operation completes. | ||
1198 | This means that the store buffer is always asked to drain and | ||
1199 | this prevents it from merging any further writes. The workaround | ||
1200 | is to replace the normal offset of cache sync operation (0x730) | ||
1201 | by another offset targeting an unmapped PL310 register 0x740. | ||
1202 | This has the same effect as the cache sync operation: store buffer | ||
1203 | drain and waiting for all buffers empty. | ||
1204 | |||
1180 | endmenu | 1205 | endmenu |
1181 | 1206 | ||
1182 | source "arch/arm/common/Kconfig" | 1207 | source "arch/arm/common/Kconfig" |
diff --git a/arch/arm/Makefile b/arch/arm/Makefile index c22c1adfedd6..6f7b29294c80 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile | |||
@@ -15,7 +15,7 @@ ifeq ($(CONFIG_CPU_ENDIAN_BE8),y) | |||
15 | LDFLAGS_vmlinux += --be8 | 15 | LDFLAGS_vmlinux += --be8 |
16 | endif | 16 | endif |
17 | 17 | ||
18 | OBJCOPYFLAGS :=-O binary -R .note -R .note.gnu.build-id -R .comment -S | 18 | OBJCOPYFLAGS :=-O binary -R .comment -S |
19 | GZFLAGS :=-9 | 19 | GZFLAGS :=-9 |
20 | #KBUILD_CFLAGS +=-pipe | 20 | #KBUILD_CFLAGS +=-pipe |
21 | # Explicitly specifiy 32-bit ARM ISA since toolchain default can be -mthumb: | 21 | # Explicitly specifiy 32-bit ARM ISA since toolchain default can be -mthumb: |
diff --git a/arch/arm/boot/compressed/.gitignore b/arch/arm/boot/compressed/.gitignore index ab204db594d3..c6028967d336 100644 --- a/arch/arm/boot/compressed/.gitignore +++ b/arch/arm/boot/compressed/.gitignore | |||
@@ -1,3 +1,7 @@ | |||
1 | font.c | 1 | font.c |
2 | piggy.gz | 2 | lib1funcs.S |
3 | piggy.gzip | ||
4 | piggy.lzo | ||
5 | piggy.lzma | ||
6 | vmlinux | ||
3 | vmlinux.lds | 7 | vmlinux.lds |
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h index 5aeec1e1735c..16bd48031583 100644 --- a/arch/arm/include/asm/hardware/cache-l2x0.h +++ b/arch/arm/include/asm/hardware/cache-l2x0.h | |||
@@ -36,6 +36,7 @@ | |||
36 | #define L2X0_RAW_INTR_STAT 0x21C | 36 | #define L2X0_RAW_INTR_STAT 0x21C |
37 | #define L2X0_INTR_CLEAR 0x220 | 37 | #define L2X0_INTR_CLEAR 0x220 |
38 | #define L2X0_CACHE_SYNC 0x730 | 38 | #define L2X0_CACHE_SYNC 0x730 |
39 | #define L2X0_DUMMY_REG 0x740 | ||
39 | #define L2X0_INV_LINE_PA 0x770 | 40 | #define L2X0_INV_LINE_PA 0x770 |
40 | #define L2X0_INV_WAY 0x77C | 41 | #define L2X0_INV_WAY 0x77C |
41 | #define L2X0_CLEAN_LINE_PA 0x7B0 | 42 | #define L2X0_CLEAN_LINE_PA 0x7B0 |
diff --git a/arch/arm/include/asm/hardware/sp810.h b/arch/arm/include/asm/hardware/sp810.h index 721847dc68ab..e0d1c0cfa548 100644 --- a/arch/arm/include/asm/hardware/sp810.h +++ b/arch/arm/include/asm/hardware/sp810.h | |||
@@ -58,6 +58,9 @@ | |||
58 | 58 | ||
59 | static inline void sysctl_soft_reset(void __iomem *base) | 59 | static inline void sysctl_soft_reset(void __iomem *base) |
60 | { | 60 | { |
61 | /* switch to slow mode */ | ||
62 | writel(0x2, base + SCCTRL); | ||
63 | |||
61 | /* writing any value to SCSYSSTAT reg will reset system */ | 64 | /* writing any value to SCSYSSTAT reg will reset system */ |
62 | writel(0, base + SCSYSSTAT); | 65 | writel(0, base + SCSYSSTAT); |
63 | } | 66 | } |
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index f41a6f57cd12..82dfe5d0c41e 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h | |||
@@ -18,16 +18,34 @@ | |||
18 | #define __ASMARM_TLB_H | 18 | #define __ASMARM_TLB_H |
19 | 19 | ||
20 | #include <asm/cacheflush.h> | 20 | #include <asm/cacheflush.h> |
21 | #include <asm/tlbflush.h> | ||
22 | 21 | ||
23 | #ifndef CONFIG_MMU | 22 | #ifndef CONFIG_MMU |
24 | 23 | ||
25 | #include <linux/pagemap.h> | 24 | #include <linux/pagemap.h> |
25 | |||
26 | #define tlb_flush(tlb) ((void) tlb) | ||
27 | |||
26 | #include <asm-generic/tlb.h> | 28 | #include <asm-generic/tlb.h> |
27 | 29 | ||
28 | #else /* !CONFIG_MMU */ | 30 | #else /* !CONFIG_MMU */ |
29 | 31 | ||
32 | #include <linux/swap.h> | ||
30 | #include <asm/pgalloc.h> | 33 | #include <asm/pgalloc.h> |
34 | #include <asm/tlbflush.h> | ||
35 | |||
36 | /* | ||
37 | * We need to delay page freeing for SMP as other CPUs can access pages | ||
38 | * which have been removed but not yet had their TLB entries invalidated. | ||
39 | * Also, as ARMv7 speculative prefetch can drag new entries into the TLB, | ||
40 | * we need to apply this same delaying tactic to ensure correct operation. | ||
41 | */ | ||
42 | #if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7) | ||
43 | #define tlb_fast_mode(tlb) 0 | ||
44 | #define FREE_PTE_NR 500 | ||
45 | #else | ||
46 | #define tlb_fast_mode(tlb) 1 | ||
47 | #define FREE_PTE_NR 0 | ||
48 | #endif | ||
31 | 49 | ||
32 | /* | 50 | /* |
33 | * TLB handling. This allows us to remove pages from the page | 51 | * TLB handling. This allows us to remove pages from the page |
@@ -36,12 +54,58 @@ | |||
36 | struct mmu_gather { | 54 | struct mmu_gather { |
37 | struct mm_struct *mm; | 55 | struct mm_struct *mm; |
38 | unsigned int fullmm; | 56 | unsigned int fullmm; |
57 | struct vm_area_struct *vma; | ||
39 | unsigned long range_start; | 58 | unsigned long range_start; |
40 | unsigned long range_end; | 59 | unsigned long range_end; |
60 | unsigned int nr; | ||
61 | struct page *pages[FREE_PTE_NR]; | ||
41 | }; | 62 | }; |
42 | 63 | ||
43 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); | 64 | DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); |
44 | 65 | ||
66 | /* | ||
67 | * This is unnecessarily complex. There's three ways the TLB shootdown | ||
68 | * code is used: | ||
69 | * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region(). | ||
70 | * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called. | ||
71 | * tlb->vma will be non-NULL. | ||
72 | * 2. Unmapping all vmas. See exit_mmap(). | ||
73 | * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called. | ||
74 | * tlb->vma will be non-NULL. Additionally, page tables will be freed. | ||
75 | * 3. Unmapping argument pages. See shift_arg_pages(). | ||
76 | * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called. | ||
77 | * tlb->vma will be NULL. | ||
78 | */ | ||
79 | static inline void tlb_flush(struct mmu_gather *tlb) | ||
80 | { | ||
81 | if (tlb->fullmm || !tlb->vma) | ||
82 | flush_tlb_mm(tlb->mm); | ||
83 | else if (tlb->range_end > 0) { | ||
84 | flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end); | ||
85 | tlb->range_start = TASK_SIZE; | ||
86 | tlb->range_end = 0; | ||
87 | } | ||
88 | } | ||
89 | |||
90 | static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr) | ||
91 | { | ||
92 | if (!tlb->fullmm) { | ||
93 | if (addr < tlb->range_start) | ||
94 | tlb->range_start = addr; | ||
95 | if (addr + PAGE_SIZE > tlb->range_end) | ||
96 | tlb->range_end = addr + PAGE_SIZE; | ||
97 | } | ||
98 | } | ||
99 | |||
100 | static inline void tlb_flush_mmu(struct mmu_gather *tlb) | ||
101 | { | ||
102 | tlb_flush(tlb); | ||
103 | if (!tlb_fast_mode(tlb)) { | ||
104 | free_pages_and_swap_cache(tlb->pages, tlb->nr); | ||
105 | tlb->nr = 0; | ||
106 | } | ||
107 | } | ||
108 | |||
45 | static inline struct mmu_gather * | 109 | static inline struct mmu_gather * |
46 | tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) | 110 | tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) |
47 | { | 111 | { |
@@ -49,6 +113,8 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) | |||
49 | 113 | ||
50 | tlb->mm = mm; | 114 | tlb->mm = mm; |
51 | tlb->fullmm = full_mm_flush; | 115 | tlb->fullmm = full_mm_flush; |
116 | tlb->vma = NULL; | ||
117 | tlb->nr = 0; | ||
52 | 118 | ||
53 | return tlb; | 119 | return tlb; |
54 | } | 120 | } |
@@ -56,8 +122,7 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) | |||
56 | static inline void | 122 | static inline void |
57 | tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) | 123 | tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) |
58 | { | 124 | { |
59 | if (tlb->fullmm) | 125 | tlb_flush_mmu(tlb); |
60 | flush_tlb_mm(tlb->mm); | ||
61 | 126 | ||
62 | /* keep the page table cache within bounds */ | 127 | /* keep the page table cache within bounds */ |
63 | check_pgt_cache(); | 128 | check_pgt_cache(); |
@@ -71,12 +136,7 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) | |||
71 | static inline void | 136 | static inline void |
72 | tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr) | 137 | tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr) |
73 | { | 138 | { |
74 | if (!tlb->fullmm) { | 139 | tlb_add_flush(tlb, addr); |
75 | if (addr < tlb->range_start) | ||
76 | tlb->range_start = addr; | ||
77 | if (addr + PAGE_SIZE > tlb->range_end) | ||
78 | tlb->range_end = addr + PAGE_SIZE; | ||
79 | } | ||
80 | } | 140 | } |
81 | 141 | ||
82 | /* | 142 | /* |
@@ -89,6 +149,7 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) | |||
89 | { | 149 | { |
90 | if (!tlb->fullmm) { | 150 | if (!tlb->fullmm) { |
91 | flush_cache_range(vma, vma->vm_start, vma->vm_end); | 151 | flush_cache_range(vma, vma->vm_start, vma->vm_end); |
152 | tlb->vma = vma; | ||
92 | tlb->range_start = TASK_SIZE; | 153 | tlb->range_start = TASK_SIZE; |
93 | tlb->range_end = 0; | 154 | tlb->range_end = 0; |
94 | } | 155 | } |
@@ -97,12 +158,30 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) | |||
97 | static inline void | 158 | static inline void |
98 | tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) | 159 | tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) |
99 | { | 160 | { |
100 | if (!tlb->fullmm && tlb->range_end > 0) | 161 | if (!tlb->fullmm) |
101 | flush_tlb_range(vma, tlb->range_start, tlb->range_end); | 162 | tlb_flush(tlb); |
163 | } | ||
164 | |||
165 | static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) | ||
166 | { | ||
167 | if (tlb_fast_mode(tlb)) { | ||
168 | free_page_and_swap_cache(page); | ||
169 | } else { | ||
170 | tlb->pages[tlb->nr++] = page; | ||
171 | if (tlb->nr >= FREE_PTE_NR) | ||
172 | tlb_flush_mmu(tlb); | ||
173 | } | ||
174 | } | ||
175 | |||
176 | static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, | ||
177 | unsigned long addr) | ||
178 | { | ||
179 | pgtable_page_dtor(pte); | ||
180 | tlb_add_flush(tlb, addr); | ||
181 | tlb_remove_page(tlb, pte); | ||
102 | } | 182 | } |
103 | 183 | ||
104 | #define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) | 184 | #define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) |
105 | #define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep) | ||
106 | #define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) | 185 | #define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) |
107 | 186 | ||
108 | #define tlb_migrate_finish(mm) do { } while (0) | 187 | #define tlb_migrate_finish(mm) do { } while (0) |
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h index ce7378ea15a2..d2005de383b8 100644 --- a/arch/arm/include/asm/tlbflush.h +++ b/arch/arm/include/asm/tlbflush.h | |||
@@ -10,12 +10,7 @@ | |||
10 | #ifndef _ASMARM_TLBFLUSH_H | 10 | #ifndef _ASMARM_TLBFLUSH_H |
11 | #define _ASMARM_TLBFLUSH_H | 11 | #define _ASMARM_TLBFLUSH_H |
12 | 12 | ||
13 | 13 | #ifdef CONFIG_MMU | |
14 | #ifndef CONFIG_MMU | ||
15 | |||
16 | #define tlb_flush(tlb) ((void) tlb) | ||
17 | |||
18 | #else /* CONFIG_MMU */ | ||
19 | 14 | ||
20 | #include <asm/glue.h> | 15 | #include <asm/glue.h> |
21 | 16 | ||
diff --git a/arch/arm/kernel/kprobes-decode.c b/arch/arm/kernel/kprobes-decode.c index 2c1f0050c9c4..8f6ed43861f1 100644 --- a/arch/arm/kernel/kprobes-decode.c +++ b/arch/arm/kernel/kprobes-decode.c | |||
@@ -1437,7 +1437,7 @@ arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi) | |||
1437 | 1437 | ||
1438 | return space_cccc_1100_010x(insn, asi); | 1438 | return space_cccc_1100_010x(insn, asi); |
1439 | 1439 | ||
1440 | } else if ((insn & 0x0e000000) == 0x0c400000) { | 1440 | } else if ((insn & 0x0e000000) == 0x0c000000) { |
1441 | 1441 | ||
1442 | return space_cccc_110x(insn, asi); | 1442 | return space_cccc_110x(insn, asi); |
1443 | 1443 | ||
diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c index b8af96ea62e6..2c79eec19262 100644 --- a/arch/arm/kernel/pmu.c +++ b/arch/arm/kernel/pmu.c | |||
@@ -97,28 +97,34 @@ set_irq_affinity(int irq, | |||
97 | irq, cpu); | 97 | irq, cpu); |
98 | return err; | 98 | return err; |
99 | #else | 99 | #else |
100 | return 0; | 100 | return -EINVAL; |
101 | #endif | 101 | #endif |
102 | } | 102 | } |
103 | 103 | ||
104 | static int | 104 | static int |
105 | init_cpu_pmu(void) | 105 | init_cpu_pmu(void) |
106 | { | 106 | { |
107 | int i, err = 0; | 107 | int i, irqs, err = 0; |
108 | struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU]; | 108 | struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU]; |
109 | 109 | ||
110 | if (!pdev) { | 110 | if (!pdev) |
111 | err = -ENODEV; | 111 | return -ENODEV; |
112 | goto out; | 112 | |
113 | } | 113 | irqs = pdev->num_resources; |
114 | |||
115 | /* | ||
116 | * If we have a single PMU interrupt that we can't shift, assume that | ||
117 | * we're running on a uniprocessor machine and continue. | ||
118 | */ | ||
119 | if (irqs == 1 && !irq_can_set_affinity(platform_get_irq(pdev, 0))) | ||
120 | return 0; | ||
114 | 121 | ||
115 | for (i = 0; i < pdev->num_resources; ++i) { | 122 | for (i = 0; i < irqs; ++i) { |
116 | err = set_irq_affinity(platform_get_irq(pdev, i), i); | 123 | err = set_irq_affinity(platform_get_irq(pdev, i), i); |
117 | if (err) | 124 | if (err) |
118 | break; | 125 | break; |
119 | } | 126 | } |
120 | 127 | ||
121 | out: | ||
122 | return err; | 128 | return err; |
123 | } | 129 | } |
124 | 130 | ||
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 420b8d6485d6..5ea4fb718b97 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c | |||
@@ -226,8 +226,8 @@ int cpu_architecture(void) | |||
226 | * Register 0 and check for VMSAv7 or PMSAv7 */ | 226 | * Register 0 and check for VMSAv7 or PMSAv7 */ |
227 | asm("mrc p15, 0, %0, c0, c1, 4" | 227 | asm("mrc p15, 0, %0, c0, c1, 4" |
228 | : "=r" (mmfr0)); | 228 | : "=r" (mmfr0)); |
229 | if ((mmfr0 & 0x0000000f) == 0x00000003 || | 229 | if ((mmfr0 & 0x0000000f) >= 0x00000003 || |
230 | (mmfr0 & 0x000000f0) == 0x00000030) | 230 | (mmfr0 & 0x000000f0) >= 0x00000030) |
231 | cpu_arch = CPU_ARCH_ARMv7; | 231 | cpu_arch = CPU_ARCH_ARMv7; |
232 | else if ((mmfr0 & 0x0000000f) == 0x00000002 || | 232 | else if ((mmfr0 & 0x0000000f) == 0x00000002 || |
233 | (mmfr0 & 0x000000f0) == 0x00000020) | 233 | (mmfr0 & 0x000000f0) == 0x00000020) |
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c index 907d5a620bca..abaf8445ce25 100644 --- a/arch/arm/kernel/signal.c +++ b/arch/arm/kernel/signal.c | |||
@@ -474,7 +474,9 @@ setup_return(struct pt_regs *regs, struct k_sigaction *ka, | |||
474 | unsigned long handler = (unsigned long)ka->sa.sa_handler; | 474 | unsigned long handler = (unsigned long)ka->sa.sa_handler; |
475 | unsigned long retcode; | 475 | unsigned long retcode; |
476 | int thumb = 0; | 476 | int thumb = 0; |
477 | unsigned long cpsr = regs->ARM_cpsr & ~PSR_f; | 477 | unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT); |
478 | |||
479 | cpsr |= PSR_ENDSTATE; | ||
478 | 480 | ||
479 | /* | 481 | /* |
480 | * Maybe we need to deliver a 32-bit signal to a 26-bit task. | 482 | * Maybe we need to deliver a 32-bit signal to a 26-bit task. |
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 86b66f3f2031..61462790757f 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S | |||
@@ -21,6 +21,12 @@ | |||
21 | #define ARM_CPU_KEEP(x) | 21 | #define ARM_CPU_KEEP(x) |
22 | #endif | 22 | #endif |
23 | 23 | ||
24 | #if defined(CONFIG_SMP_ON_UP) && !defined(CONFIG_DEBUG_SPINLOCK) | ||
25 | #define ARM_EXIT_KEEP(x) x | ||
26 | #else | ||
27 | #define ARM_EXIT_KEEP(x) | ||
28 | #endif | ||
29 | |||
24 | OUTPUT_ARCH(arm) | 30 | OUTPUT_ARCH(arm) |
25 | ENTRY(stext) | 31 | ENTRY(stext) |
26 | 32 | ||
@@ -43,6 +49,7 @@ SECTIONS | |||
43 | _sinittext = .; | 49 | _sinittext = .; |
44 | HEAD_TEXT | 50 | HEAD_TEXT |
45 | INIT_TEXT | 51 | INIT_TEXT |
52 | ARM_EXIT_KEEP(EXIT_TEXT) | ||
46 | _einittext = .; | 53 | _einittext = .; |
47 | ARM_CPU_DISCARD(PROC_INFO) | 54 | ARM_CPU_DISCARD(PROC_INFO) |
48 | __arch_info_begin = .; | 55 | __arch_info_begin = .; |
@@ -67,6 +74,7 @@ SECTIONS | |||
67 | #ifndef CONFIG_XIP_KERNEL | 74 | #ifndef CONFIG_XIP_KERNEL |
68 | __init_begin = _stext; | 75 | __init_begin = _stext; |
69 | INIT_DATA | 76 | INIT_DATA |
77 | ARM_EXIT_KEEP(EXIT_DATA) | ||
70 | #endif | 78 | #endif |
71 | } | 79 | } |
72 | 80 | ||
@@ -162,6 +170,7 @@ SECTIONS | |||
162 | . = ALIGN(PAGE_SIZE); | 170 | . = ALIGN(PAGE_SIZE); |
163 | __init_begin = .; | 171 | __init_begin = .; |
164 | INIT_DATA | 172 | INIT_DATA |
173 | ARM_EXIT_KEEP(EXIT_DATA) | ||
165 | . = ALIGN(PAGE_SIZE); | 174 | . = ALIGN(PAGE_SIZE); |
166 | __init_end = .; | 175 | __init_end = .; |
167 | #endif | 176 | #endif |
@@ -247,6 +256,8 @@ SECTIONS | |||
247 | } | 256 | } |
248 | #endif | 257 | #endif |
249 | 258 | ||
259 | NOTES | ||
260 | |||
250 | BSS_SECTION(0, 0, 0) | 261 | BSS_SECTION(0, 0, 0) |
251 | _end = .; | 262 | _end = .; |
252 | 263 | ||
diff --git a/arch/arm/mach-s5p6442/include/mach/map.h b/arch/arm/mach-s5p6442/include/mach/map.h index 203dd5a18bd5..058dab4482a1 100644 --- a/arch/arm/mach-s5p6442/include/mach/map.h +++ b/arch/arm/mach-s5p6442/include/mach/map.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* linux/arch/arm/mach-s5p6442/include/mach/map.h | 1 | /* linux/arch/arm/mach-s5p6442/include/mach/map.h |
2 | * | 2 | * |
3 | * Copyright (c) 2010 Samsung Electronics Co., Ltd. | 3 | * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. |
4 | * http://www.samsung.com/ | 4 | * http://www.samsung.com/ |
5 | * | 5 | * |
6 | * S5P6442 - Memory map definitions | 6 | * S5P6442 - Memory map definitions |
@@ -16,56 +16,61 @@ | |||
16 | #include <plat/map-base.h> | 16 | #include <plat/map-base.h> |
17 | #include <plat/map-s5p.h> | 17 | #include <plat/map-s5p.h> |
18 | 18 | ||
19 | #define S5P6442_PA_CHIPID (0xE0000000) | 19 | #define S5P6442_PA_SDRAM 0x20000000 |
20 | #define S5P_PA_CHIPID S5P6442_PA_CHIPID | ||
21 | 20 | ||
22 | #define S5P6442_PA_SYSCON (0xE0100000) | 21 | #define S5P6442_PA_I2S0 0xC0B00000 |
23 | #define S5P_PA_SYSCON S5P6442_PA_SYSCON | 22 | #define S5P6442_PA_I2S1 0xF2200000 |
24 | 23 | ||
25 | #define S5P6442_PA_GPIO (0xE0200000) | 24 | #define S5P6442_PA_CHIPID 0xE0000000 |
26 | 25 | ||
27 | #define S5P6442_PA_VIC0 (0xE4000000) | 26 | #define S5P6442_PA_SYSCON 0xE0100000 |
28 | #define S5P6442_PA_VIC1 (0xE4100000) | ||
29 | #define S5P6442_PA_VIC2 (0xE4200000) | ||
30 | 27 | ||
31 | #define S5P6442_PA_SROMC (0xE7000000) | 28 | #define S5P6442_PA_GPIO 0xE0200000 |
32 | #define S5P_PA_SROMC S5P6442_PA_SROMC | ||
33 | 29 | ||
34 | #define S5P6442_PA_MDMA 0xE8000000 | 30 | #define S5P6442_PA_VIC0 0xE4000000 |
35 | #define S5P6442_PA_PDMA 0xE9000000 | 31 | #define S5P6442_PA_VIC1 0xE4100000 |
32 | #define S5P6442_PA_VIC2 0xE4200000 | ||
36 | 33 | ||
37 | #define S5P6442_PA_TIMER (0xEA000000) | 34 | #define S5P6442_PA_SROMC 0xE7000000 |
38 | #define S5P_PA_TIMER S5P6442_PA_TIMER | ||
39 | 35 | ||
40 | #define S5P6442_PA_SYSTIMER (0xEA100000) | 36 | #define S5P6442_PA_MDMA 0xE8000000 |
37 | #define S5P6442_PA_PDMA 0xE9000000 | ||
41 | 38 | ||
42 | #define S5P6442_PA_WATCHDOG (0xEA200000) | 39 | #define S5P6442_PA_TIMER 0xEA000000 |
43 | 40 | ||
44 | #define S5P6442_PA_UART (0xEC000000) | 41 | #define S5P6442_PA_SYSTIMER 0xEA100000 |
45 | 42 | ||
46 | #define S5P_PA_UART0 (S5P6442_PA_UART + 0x0) | 43 | #define S5P6442_PA_WATCHDOG 0xEA200000 |
47 | #define S5P_PA_UART1 (S5P6442_PA_UART + 0x400) | ||
48 | #define S5P_PA_UART2 (S5P6442_PA_UART + 0x800) | ||
49 | #define S5P_SZ_UART SZ_256 | ||
50 | 44 | ||
51 | #define S5P6442_PA_IIC0 (0xEC100000) | 45 | #define S5P6442_PA_UART 0xEC000000 |
52 | 46 | ||
53 | #define S5P6442_PA_SDRAM (0x20000000) | 47 | #define S5P6442_PA_IIC0 0xEC100000 |
54 | #define S5P_PA_SDRAM S5P6442_PA_SDRAM | ||
55 | 48 | ||
56 | #define S5P6442_PA_SPI 0xEC300000 | 49 | #define S5P6442_PA_SPI 0xEC300000 |
57 | 50 | ||
58 | /* I2S */ | ||
59 | #define S5P6442_PA_I2S0 0xC0B00000 | ||
60 | #define S5P6442_PA_I2S1 0xF2200000 | ||
61 | |||
62 | /* PCM */ | ||
63 | #define S5P6442_PA_PCM0 0xF2400000 | 51 | #define S5P6442_PA_PCM0 0xF2400000 |
64 | #define S5P6442_PA_PCM1 0xF2500000 | 52 | #define S5P6442_PA_PCM1 0xF2500000 |
65 | 53 | ||
66 | /* compatibiltiy defines. */ | 54 | /* Compatibiltiy Defines */ |
55 | |||
56 | #define S3C_PA_IIC S5P6442_PA_IIC0 | ||
67 | #define S3C_PA_WDT S5P6442_PA_WATCHDOG | 57 | #define S3C_PA_WDT S5P6442_PA_WATCHDOG |
58 | |||
59 | #define S5P_PA_CHIPID S5P6442_PA_CHIPID | ||
60 | #define S5P_PA_SDRAM S5P6442_PA_SDRAM | ||
61 | #define S5P_PA_SROMC S5P6442_PA_SROMC | ||
62 | #define S5P_PA_SYSCON S5P6442_PA_SYSCON | ||
63 | #define S5P_PA_TIMER S5P6442_PA_TIMER | ||
64 | |||
65 | /* UART */ | ||
66 | |||
68 | #define S3C_PA_UART S5P6442_PA_UART | 67 | #define S3C_PA_UART S5P6442_PA_UART |
69 | #define S3C_PA_IIC S5P6442_PA_IIC0 | 68 | |
69 | #define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET)) | ||
70 | #define S5P_PA_UART0 S5P_PA_UART(0) | ||
71 | #define S5P_PA_UART1 S5P_PA_UART(1) | ||
72 | #define S5P_PA_UART2 S5P_PA_UART(2) | ||
73 | |||
74 | #define S5P_SZ_UART SZ_256 | ||
70 | 75 | ||
71 | #endif /* __ASM_ARCH_MAP_H */ | 76 | #endif /* __ASM_ARCH_MAP_H */ |
diff --git a/arch/arm/mach-s5p64x0/include/mach/map.h b/arch/arm/mach-s5p64x0/include/mach/map.h index a9365e5ba614..95c91257c7ca 100644 --- a/arch/arm/mach-s5p64x0/include/mach/map.h +++ b/arch/arm/mach-s5p64x0/include/mach/map.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* linux/arch/arm/mach-s5p64x0/include/mach/map.h | 1 | /* linux/arch/arm/mach-s5p64x0/include/mach/map.h |
2 | * | 2 | * |
3 | * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. | 3 | * Copyright (c) 2009-2011 Samsung Electronics Co., Ltd. |
4 | * http://www.samsung.com | 4 | * http://www.samsung.com |
5 | * | 5 | * |
6 | * S5P64X0 - Memory map definitions | 6 | * S5P64X0 - Memory map definitions |
@@ -16,64 +16,46 @@ | |||
16 | #include <plat/map-base.h> | 16 | #include <plat/map-base.h> |
17 | #include <plat/map-s5p.h> | 17 | #include <plat/map-s5p.h> |
18 | 18 | ||
19 | #define S5P64X0_PA_SDRAM (0x20000000) | 19 | #define S5P64X0_PA_SDRAM 0x20000000 |
20 | 20 | ||
21 | #define S5P64X0_PA_CHIPID (0xE0000000) | 21 | #define S5P64X0_PA_CHIPID 0xE0000000 |
22 | #define S5P_PA_CHIPID S5P64X0_PA_CHIPID | ||
23 | |||
24 | #define S5P64X0_PA_SYSCON (0xE0100000) | ||
25 | #define S5P_PA_SYSCON S5P64X0_PA_SYSCON | ||
26 | |||
27 | #define S5P64X0_PA_GPIO (0xE0308000) | ||
28 | |||
29 | #define S5P64X0_PA_VIC0 (0xE4000000) | ||
30 | #define S5P64X0_PA_VIC1 (0xE4100000) | ||
31 | 22 | ||
32 | #define S5P64X0_PA_SROMC (0xE7000000) | 23 | #define S5P64X0_PA_SYSCON 0xE0100000 |
33 | #define S5P_PA_SROMC S5P64X0_PA_SROMC | ||
34 | |||
35 | #define S5P64X0_PA_PDMA (0xE9000000) | ||
36 | |||
37 | #define S5P64X0_PA_TIMER (0xEA000000) | ||
38 | #define S5P_PA_TIMER S5P64X0_PA_TIMER | ||
39 | 24 | ||
40 | #define S5P64X0_PA_RTC (0xEA100000) | 25 | #define S5P64X0_PA_GPIO 0xE0308000 |
41 | 26 | ||
42 | #define S5P64X0_PA_WDT (0xEA200000) | 27 | #define S5P64X0_PA_VIC0 0xE4000000 |
28 | #define S5P64X0_PA_VIC1 0xE4100000 | ||
43 | 29 | ||
44 | #define S5P6440_PA_UART(x) (0xEC000000 + ((x) * S3C_UART_OFFSET)) | 30 | #define S5P64X0_PA_SROMC 0xE7000000 |
45 | #define S5P6450_PA_UART(x) ((x < 5) ? (0xEC800000 + ((x) * S3C_UART_OFFSET)) : (0xEC000000)) | ||
46 | 31 | ||
47 | #define S5P_PA_UART0 S5P6450_PA_UART(0) | 32 | #define S5P64X0_PA_PDMA 0xE9000000 |
48 | #define S5P_PA_UART1 S5P6450_PA_UART(1) | ||
49 | #define S5P_PA_UART2 S5P6450_PA_UART(2) | ||
50 | #define S5P_PA_UART3 S5P6450_PA_UART(3) | ||
51 | #define S5P_PA_UART4 S5P6450_PA_UART(4) | ||
52 | #define S5P_PA_UART5 S5P6450_PA_UART(5) | ||
53 | 33 | ||
54 | #define S5P_SZ_UART SZ_256 | 34 | #define S5P64X0_PA_TIMER 0xEA000000 |
35 | #define S5P64X0_PA_RTC 0xEA100000 | ||
36 | #define S5P64X0_PA_WDT 0xEA200000 | ||
55 | 37 | ||
56 | #define S5P6440_PA_IIC0 (0xEC104000) | 38 | #define S5P6440_PA_IIC0 0xEC104000 |
57 | #define S5P6440_PA_IIC1 (0xEC20F000) | 39 | #define S5P6440_PA_IIC1 0xEC20F000 |
58 | #define S5P6450_PA_IIC0 (0xEC100000) | 40 | #define S5P6450_PA_IIC0 0xEC100000 |
59 | #define S5P6450_PA_IIC1 (0xEC200000) | 41 | #define S5P6450_PA_IIC1 0xEC200000 |
60 | 42 | ||
61 | #define S5P64X0_PA_SPI0 (0xEC400000) | 43 | #define S5P64X0_PA_SPI0 0xEC400000 |
62 | #define S5P64X0_PA_SPI1 (0xEC500000) | 44 | #define S5P64X0_PA_SPI1 0xEC500000 |
63 | 45 | ||
64 | #define S5P64X0_PA_HSOTG (0xED100000) | 46 | #define S5P64X0_PA_HSOTG 0xED100000 |
65 | 47 | ||
66 | #define S5P64X0_PA_HSMMC(x) (0xED800000 + ((x) * 0x100000)) | 48 | #define S5P64X0_PA_HSMMC(x) (0xED800000 + ((x) * 0x100000)) |
67 | 49 | ||
68 | #define S5P64X0_PA_I2S (0xF2000000) | 50 | #define S5P64X0_PA_I2S 0xF2000000 |
69 | #define S5P6450_PA_I2S1 0xF2800000 | 51 | #define S5P6450_PA_I2S1 0xF2800000 |
70 | #define S5P6450_PA_I2S2 0xF2900000 | 52 | #define S5P6450_PA_I2S2 0xF2900000 |
71 | 53 | ||
72 | #define S5P64X0_PA_PCM (0xF2100000) | 54 | #define S5P64X0_PA_PCM 0xF2100000 |
73 | 55 | ||
74 | #define S5P64X0_PA_ADC (0xF3000000) | 56 | #define S5P64X0_PA_ADC 0xF3000000 |
75 | 57 | ||
76 | /* compatibiltiy defines. */ | 58 | /* Compatibiltiy Defines */ |
77 | 59 | ||
78 | #define S3C_PA_HSMMC0 S5P64X0_PA_HSMMC(0) | 60 | #define S3C_PA_HSMMC0 S5P64X0_PA_HSMMC(0) |
79 | #define S3C_PA_HSMMC1 S5P64X0_PA_HSMMC(1) | 61 | #define S3C_PA_HSMMC1 S5P64X0_PA_HSMMC(1) |
@@ -83,6 +65,25 @@ | |||
83 | #define S3C_PA_RTC S5P64X0_PA_RTC | 65 | #define S3C_PA_RTC S5P64X0_PA_RTC |
84 | #define S3C_PA_WDT S5P64X0_PA_WDT | 66 | #define S3C_PA_WDT S5P64X0_PA_WDT |
85 | 67 | ||
68 | #define S5P_PA_CHIPID S5P64X0_PA_CHIPID | ||
69 | #define S5P_PA_SROMC S5P64X0_PA_SROMC | ||
70 | #define S5P_PA_SYSCON S5P64X0_PA_SYSCON | ||
71 | #define S5P_PA_TIMER S5P64X0_PA_TIMER | ||
72 | |||
86 | #define SAMSUNG_PA_ADC S5P64X0_PA_ADC | 73 | #define SAMSUNG_PA_ADC S5P64X0_PA_ADC |
87 | 74 | ||
75 | /* UART */ | ||
76 | |||
77 | #define S5P6440_PA_UART(x) (0xEC000000 + ((x) * S3C_UART_OFFSET)) | ||
78 | #define S5P6450_PA_UART(x) ((x < 5) ? (0xEC800000 + ((x) * S3C_UART_OFFSET)) : (0xEC000000)) | ||
79 | |||
80 | #define S5P_PA_UART0 S5P6450_PA_UART(0) | ||
81 | #define S5P_PA_UART1 S5P6450_PA_UART(1) | ||
82 | #define S5P_PA_UART2 S5P6450_PA_UART(2) | ||
83 | #define S5P_PA_UART3 S5P6450_PA_UART(3) | ||
84 | #define S5P_PA_UART4 S5P6450_PA_UART(4) | ||
85 | #define S5P_PA_UART5 S5P6450_PA_UART(5) | ||
86 | |||
87 | #define S5P_SZ_UART SZ_256 | ||
88 | |||
88 | #endif /* __ASM_ARCH_MAP_H */ | 89 | #endif /* __ASM_ARCH_MAP_H */ |
diff --git a/arch/arm/mach-s5pc100/include/mach/map.h b/arch/arm/mach-s5pc100/include/mach/map.h index 328467b346aa..ccbe6b767f7d 100644 --- a/arch/arm/mach-s5pc100/include/mach/map.h +++ b/arch/arm/mach-s5pc100/include/mach/map.h | |||
@@ -1,5 +1,8 @@ | |||
1 | /* linux/arch/arm/mach-s5pc100/include/mach/map.h | 1 | /* linux/arch/arm/mach-s5pc100/include/mach/map.h |
2 | * | 2 | * |
3 | * Copyright (c) 2011 Samsung Electronics Co., Ltd. | ||
4 | * http://www.samsung.com/ | ||
5 | * | ||
3 | * Copyright 2009 Samsung Electronics Co. | 6 | * Copyright 2009 Samsung Electronics Co. |
4 | * Byungho Min <bhmin@samsung.com> | 7 | * Byungho Min <bhmin@samsung.com> |
5 | * | 8 | * |
@@ -16,145 +19,115 @@ | |||
16 | #include <plat/map-base.h> | 19 | #include <plat/map-base.h> |
17 | #include <plat/map-s5p.h> | 20 | #include <plat/map-s5p.h> |
18 | 21 | ||
19 | /* | 22 | #define S5PC100_PA_SDRAM 0x20000000 |
20 | * map-base.h has already defined virtual memory address | 23 | |
21 | * S3C_VA_IRQ S3C_ADDR(0x00000000) irq controller(s) | 24 | #define S5PC100_PA_ONENAND 0xE7100000 |
22 | * S3C_VA_SYS S3C_ADDR(0x00100000) system control | 25 | #define S5PC100_PA_ONENAND_BUF 0xB0000000 |
23 | * S3C_VA_MEM S3C_ADDR(0x00200000) system control (not used) | 26 | |
24 | * S3C_VA_TIMER S3C_ADDR(0x00300000) timer block | 27 | #define S5PC100_PA_CHIPID 0xE0000000 |
25 | * S3C_VA_WATCHDOG S3C_ADDR(0x00400000) watchdog | ||
26 | * S3C_VA_UART S3C_ADDR(0x01000000) UART | ||
27 | * | ||
28 | * S5PC100 specific virtual memory address can be defined here | ||
29 | * S5PC1XX_VA_GPIO S3C_ADDR(0x00500000) GPIO | ||
30 | * | ||
31 | */ | ||
32 | 28 | ||
33 | #define S5PC100_PA_ONENAND_BUF (0xB0000000) | 29 | #define S5PC100_PA_SYSCON 0xE0100000 |
34 | #define S5PC100_SZ_ONENAND_BUF (SZ_256M - SZ_32M) | ||
35 | 30 | ||
36 | /* Chip ID */ | 31 | #define S5PC100_PA_OTHERS 0xE0200000 |
37 | 32 | ||
38 | #define S5PC100_PA_CHIPID (0xE0000000) | 33 | #define S5PC100_PA_GPIO 0xE0300000 |
39 | #define S5P_PA_CHIPID S5PC100_PA_CHIPID | ||
40 | 34 | ||
41 | #define S5PC100_PA_SYSCON (0xE0100000) | 35 | #define S5PC100_PA_VIC0 0xE4000000 |
42 | #define S5P_PA_SYSCON S5PC100_PA_SYSCON | 36 | #define S5PC100_PA_VIC1 0xE4100000 |
37 | #define S5PC100_PA_VIC2 0xE4200000 | ||
43 | 38 | ||
44 | #define S5PC100_PA_OTHERS (0xE0200000) | 39 | #define S5PC100_PA_SROMC 0xE7000000 |
45 | #define S5PC100_VA_OTHERS (S3C_VA_SYS + 0x10000) | ||
46 | 40 | ||
47 | #define S5PC100_PA_GPIO (0xE0300000) | 41 | #define S5PC100_PA_CFCON 0xE7800000 |
48 | #define S5PC1XX_VA_GPIO S3C_ADDR(0x00500000) | ||
49 | 42 | ||
50 | /* Interrupt */ | 43 | #define S5PC100_PA_MDMA 0xE8100000 |
51 | #define S5PC100_PA_VIC0 (0xE4000000) | 44 | #define S5PC100_PA_PDMA0 0xE9000000 |
52 | #define S5PC100_PA_VIC1 (0xE4100000) | 45 | #define S5PC100_PA_PDMA1 0xE9200000 |
53 | #define S5PC100_PA_VIC2 (0xE4200000) | ||
54 | #define S5PC100_VA_VIC S3C_VA_IRQ | ||
55 | #define S5PC100_VA_VIC_OFFSET 0x10000 | ||
56 | #define S5PC1XX_VA_VIC(x) (S5PC100_VA_VIC + ((x) * S5PC100_VA_VIC_OFFSET)) | ||
57 | 46 | ||
58 | #define S5PC100_PA_SROMC (0xE7000000) | 47 | #define S5PC100_PA_TIMER 0xEA000000 |
59 | #define S5P_PA_SROMC S5PC100_PA_SROMC | 48 | #define S5PC100_PA_SYSTIMER 0xEA100000 |
49 | #define S5PC100_PA_WATCHDOG 0xEA200000 | ||
50 | #define S5PC100_PA_RTC 0xEA300000 | ||
60 | 51 | ||
61 | #define S5PC100_PA_ONENAND (0xE7100000) | 52 | #define S5PC100_PA_UART 0xEC000000 |
62 | 53 | ||
63 | #define S5PC100_PA_CFCON (0xE7800000) | 54 | #define S5PC100_PA_IIC0 0xEC100000 |
55 | #define S5PC100_PA_IIC1 0xEC200000 | ||
64 | 56 | ||
65 | /* DMA */ | 57 | #define S5PC100_PA_SPI0 0xEC300000 |
66 | #define S5PC100_PA_MDMA (0xE8100000) | 58 | #define S5PC100_PA_SPI1 0xEC400000 |
67 | #define S5PC100_PA_PDMA0 (0xE9000000) | 59 | #define S5PC100_PA_SPI2 0xEC500000 |
68 | #define S5PC100_PA_PDMA1 (0xE9200000) | ||
69 | 60 | ||
70 | /* Timer */ | 61 | #define S5PC100_PA_USB_HSOTG 0xED200000 |
71 | #define S5PC100_PA_TIMER (0xEA000000) | 62 | #define S5PC100_PA_USB_HSPHY 0xED300000 |
72 | #define S5P_PA_TIMER S5PC100_PA_TIMER | ||
73 | 63 | ||
74 | #define S5PC100_PA_SYSTIMER (0xEA100000) | 64 | #define S5PC100_PA_HSMMC(x) (0xED800000 + ((x) * 0x100000)) |
75 | 65 | ||
76 | #define S5PC100_PA_WATCHDOG (0xEA200000) | 66 | #define S5PC100_PA_FB 0xEE000000 |
77 | #define S5PC100_PA_RTC (0xEA300000) | ||
78 | 67 | ||
79 | #define S5PC100_PA_UART (0xEC000000) | 68 | #define S5PC100_PA_FIMC0 0xEE200000 |
69 | #define S5PC100_PA_FIMC1 0xEE300000 | ||
70 | #define S5PC100_PA_FIMC2 0xEE400000 | ||
80 | 71 | ||
81 | #define S5P_PA_UART0 (S5PC100_PA_UART + 0x0) | 72 | #define S5PC100_PA_I2S0 0xF2000000 |
82 | #define S5P_PA_UART1 (S5PC100_PA_UART + 0x400) | 73 | #define S5PC100_PA_I2S1 0xF2100000 |
83 | #define S5P_PA_UART2 (S5PC100_PA_UART + 0x800) | 74 | #define S5PC100_PA_I2S2 0xF2200000 |
84 | #define S5P_PA_UART3 (S5PC100_PA_UART + 0xC00) | ||
85 | #define S5P_SZ_UART SZ_256 | ||
86 | 75 | ||
87 | #define S5PC100_PA_IIC0 (0xEC100000) | 76 | #define S5PC100_PA_AC97 0xF2300000 |
88 | #define S5PC100_PA_IIC1 (0xEC200000) | ||
89 | 77 | ||
90 | /* SPI */ | 78 | #define S5PC100_PA_PCM0 0xF2400000 |
91 | #define S5PC100_PA_SPI0 0xEC300000 | 79 | #define S5PC100_PA_PCM1 0xF2500000 |
92 | #define S5PC100_PA_SPI1 0xEC400000 | ||
93 | #define S5PC100_PA_SPI2 0xEC500000 | ||
94 | 80 | ||
95 | /* USB HS OTG */ | 81 | #define S5PC100_PA_SPDIF 0xF2600000 |
96 | #define S5PC100_PA_USB_HSOTG (0xED200000) | ||
97 | #define S5PC100_PA_USB_HSPHY (0xED300000) | ||
98 | 82 | ||
99 | #define S5PC100_PA_FB (0xEE000000) | 83 | #define S5PC100_PA_TSADC 0xF3000000 |
100 | 84 | ||
101 | #define S5PC100_PA_FIMC0 (0xEE200000) | 85 | #define S5PC100_PA_KEYPAD 0xF3100000 |
102 | #define S5PC100_PA_FIMC1 (0xEE300000) | ||
103 | #define S5PC100_PA_FIMC2 (0xEE400000) | ||
104 | 86 | ||
105 | #define S5PC100_PA_I2S0 (0xF2000000) | 87 | /* Compatibiltiy Defines */ |
106 | #define S5PC100_PA_I2S1 (0xF2100000) | ||
107 | #define S5PC100_PA_I2S2 (0xF2200000) | ||
108 | 88 | ||
109 | #define S5PC100_PA_AC97 0xF2300000 | 89 | #define S3C_PA_FB S5PC100_PA_FB |
90 | #define S3C_PA_HSMMC0 S5PC100_PA_HSMMC(0) | ||
91 | #define S3C_PA_HSMMC1 S5PC100_PA_HSMMC(1) | ||
92 | #define S3C_PA_HSMMC2 S5PC100_PA_HSMMC(2) | ||
93 | #define S3C_PA_IIC S5PC100_PA_IIC0 | ||
94 | #define S3C_PA_IIC1 S5PC100_PA_IIC1 | ||
95 | #define S3C_PA_KEYPAD S5PC100_PA_KEYPAD | ||
96 | #define S3C_PA_ONENAND S5PC100_PA_ONENAND | ||
97 | #define S3C_PA_ONENAND_BUF S5PC100_PA_ONENAND_BUF | ||
98 | #define S3C_PA_RTC S5PC100_PA_RTC | ||
99 | #define S3C_PA_TSADC S5PC100_PA_TSADC | ||
100 | #define S3C_PA_USB_HSOTG S5PC100_PA_USB_HSOTG | ||
101 | #define S3C_PA_USB_HSPHY S5PC100_PA_USB_HSPHY | ||
102 | #define S3C_PA_WDT S5PC100_PA_WATCHDOG | ||
110 | 103 | ||
111 | /* PCM */ | 104 | #define S5P_PA_CHIPID S5PC100_PA_CHIPID |
112 | #define S5PC100_PA_PCM0 0xF2400000 | 105 | #define S5P_PA_FIMC0 S5PC100_PA_FIMC0 |
113 | #define S5PC100_PA_PCM1 0xF2500000 | 106 | #define S5P_PA_FIMC1 S5PC100_PA_FIMC1 |
107 | #define S5P_PA_FIMC2 S5PC100_PA_FIMC2 | ||
108 | #define S5P_PA_SDRAM S5PC100_PA_SDRAM | ||
109 | #define S5P_PA_SROMC S5PC100_PA_SROMC | ||
110 | #define S5P_PA_SYSCON S5PC100_PA_SYSCON | ||
111 | #define S5P_PA_TIMER S5PC100_PA_TIMER | ||
114 | 112 | ||
115 | #define S5PC100_PA_SPDIF 0xF2600000 | 113 | #define SAMSUNG_PA_ADC S5PC100_PA_TSADC |
114 | #define SAMSUNG_PA_CFCON S5PC100_PA_CFCON | ||
115 | #define SAMSUNG_PA_KEYPAD S5PC100_PA_KEYPAD | ||
116 | 116 | ||
117 | #define S5PC100_PA_TSADC (0xF3000000) | 117 | #define S5PC100_VA_OTHERS (S3C_VA_SYS + 0x10000) |
118 | 118 | ||
119 | /* KEYPAD */ | 119 | #define S3C_SZ_ONENAND_BUF (SZ_256M - SZ_32M) |
120 | #define S5PC100_PA_KEYPAD (0xF3100000) | ||
121 | 120 | ||
122 | #define S5PC100_PA_HSMMC(x) (0xED800000 + ((x) * 0x100000)) | 121 | /* UART */ |
123 | 122 | ||
124 | #define S5PC100_PA_SDRAM (0x20000000) | 123 | #define S3C_PA_UART S5PC100_PA_UART |
125 | #define S5P_PA_SDRAM S5PC100_PA_SDRAM | ||
126 | 124 | ||
127 | /* compatibiltiy defines. */ | 125 | #define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET)) |
128 | #define S3C_PA_UART S5PC100_PA_UART | 126 | #define S5P_PA_UART0 S5P_PA_UART(0) |
129 | #define S3C_PA_IIC S5PC100_PA_IIC0 | 127 | #define S5P_PA_UART1 S5P_PA_UART(1) |
130 | #define S3C_PA_IIC1 S5PC100_PA_IIC1 | 128 | #define S5P_PA_UART2 S5P_PA_UART(2) |
131 | #define S3C_PA_FB S5PC100_PA_FB | 129 | #define S5P_PA_UART3 S5P_PA_UART(3) |
132 | #define S3C_PA_G2D S5PC100_PA_G2D | ||
133 | #define S3C_PA_G3D S5PC100_PA_G3D | ||
134 | #define S3C_PA_JPEG S5PC100_PA_JPEG | ||
135 | #define S3C_PA_ROTATOR S5PC100_PA_ROTATOR | ||
136 | #define S5P_VA_VIC0 S5PC1XX_VA_VIC(0) | ||
137 | #define S5P_VA_VIC1 S5PC1XX_VA_VIC(1) | ||
138 | #define S5P_VA_VIC2 S5PC1XX_VA_VIC(2) | ||
139 | #define S3C_PA_USB_HSOTG S5PC100_PA_USB_HSOTG | ||
140 | #define S3C_PA_USB_HSPHY S5PC100_PA_USB_HSPHY | ||
141 | #define S3C_PA_HSMMC0 S5PC100_PA_HSMMC(0) | ||
142 | #define S3C_PA_HSMMC1 S5PC100_PA_HSMMC(1) | ||
143 | #define S3C_PA_HSMMC2 S5PC100_PA_HSMMC(2) | ||
144 | #define S3C_PA_KEYPAD S5PC100_PA_KEYPAD | ||
145 | #define S3C_PA_WDT S5PC100_PA_WATCHDOG | ||
146 | #define S3C_PA_TSADC S5PC100_PA_TSADC | ||
147 | #define S3C_PA_ONENAND S5PC100_PA_ONENAND | ||
148 | #define S3C_PA_ONENAND_BUF S5PC100_PA_ONENAND_BUF | ||
149 | #define S3C_SZ_ONENAND_BUF S5PC100_SZ_ONENAND_BUF | ||
150 | #define S3C_PA_RTC S5PC100_PA_RTC | ||
151 | |||
152 | #define SAMSUNG_PA_ADC S5PC100_PA_TSADC | ||
153 | #define SAMSUNG_PA_CFCON S5PC100_PA_CFCON | ||
154 | #define SAMSUNG_PA_KEYPAD S5PC100_PA_KEYPAD | ||
155 | 130 | ||
156 | #define S5P_PA_FIMC0 S5PC100_PA_FIMC0 | 131 | #define S5P_SZ_UART SZ_256 |
157 | #define S5P_PA_FIMC1 S5PC100_PA_FIMC1 | ||
158 | #define S5P_PA_FIMC2 S5PC100_PA_FIMC2 | ||
159 | 132 | ||
160 | #endif /* __ASM_ARCH_C100_MAP_H */ | 133 | #endif /* __ASM_ARCH_MAP_H */ |
diff --git a/arch/arm/mach-s5pv210/include/mach/map.h b/arch/arm/mach-s5pv210/include/mach/map.h index 3611492ad681..1dd58836fd4f 100644 --- a/arch/arm/mach-s5pv210/include/mach/map.h +++ b/arch/arm/mach-s5pv210/include/mach/map.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* linux/arch/arm/mach-s5pv210/include/mach/map.h | 1 | /* linux/arch/arm/mach-s5pv210/include/mach/map.h |
2 | * | 2 | * |
3 | * Copyright (c) 2010 Samsung Electronics Co., Ltd. | 3 | * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. |
4 | * http://www.samsung.com/ | 4 | * http://www.samsung.com/ |
5 | * | 5 | * |
6 | * S5PV210 - Memory map definitions | 6 | * S5PV210 - Memory map definitions |
@@ -16,122 +16,120 @@ | |||
16 | #include <plat/map-base.h> | 16 | #include <plat/map-base.h> |
17 | #include <plat/map-s5p.h> | 17 | #include <plat/map-s5p.h> |
18 | 18 | ||
19 | #define S5PV210_PA_SROM_BANK5 (0xA8000000) | 19 | #define S5PV210_PA_SDRAM 0x20000000 |
20 | 20 | ||
21 | #define S5PC110_PA_ONENAND (0xB0000000) | 21 | #define S5PV210_PA_SROM_BANK5 0xA8000000 |
22 | #define S5P_PA_ONENAND S5PC110_PA_ONENAND | ||
23 | 22 | ||
24 | #define S5PC110_PA_ONENAND_DMA (0xB0600000) | 23 | #define S5PC110_PA_ONENAND 0xB0000000 |
25 | #define S5P_PA_ONENAND_DMA S5PC110_PA_ONENAND_DMA | 24 | #define S5PC110_PA_ONENAND_DMA 0xB0600000 |
26 | 25 | ||
27 | #define S5PV210_PA_CHIPID (0xE0000000) | 26 | #define S5PV210_PA_CHIPID 0xE0000000 |
28 | #define S5P_PA_CHIPID S5PV210_PA_CHIPID | ||
29 | 27 | ||
30 | #define S5PV210_PA_SYSCON (0xE0100000) | 28 | #define S5PV210_PA_SYSCON 0xE0100000 |
31 | #define S5P_PA_SYSCON S5PV210_PA_SYSCON | ||
32 | 29 | ||
33 | #define S5PV210_PA_GPIO (0xE0200000) | 30 | #define S5PV210_PA_GPIO 0xE0200000 |
34 | 31 | ||
35 | /* SPI */ | 32 | #define S5PV210_PA_SPDIF 0xE1100000 |
36 | #define S5PV210_PA_SPI0 0xE1300000 | ||
37 | #define S5PV210_PA_SPI1 0xE1400000 | ||
38 | 33 | ||
39 | #define S5PV210_PA_KEYPAD (0xE1600000) | 34 | #define S5PV210_PA_SPI0 0xE1300000 |
35 | #define S5PV210_PA_SPI1 0xE1400000 | ||
40 | 36 | ||
41 | #define S5PV210_PA_IIC0 (0xE1800000) | 37 | #define S5PV210_PA_KEYPAD 0xE1600000 |
42 | #define S5PV210_PA_IIC1 (0xFAB00000) | ||
43 | #define S5PV210_PA_IIC2 (0xE1A00000) | ||
44 | 38 | ||
45 | #define S5PV210_PA_TIMER (0xE2500000) | 39 | #define S5PV210_PA_ADC 0xE1700000 |
46 | #define S5P_PA_TIMER S5PV210_PA_TIMER | ||
47 | 40 | ||
48 | #define S5PV210_PA_SYSTIMER (0xE2600000) | 41 | #define S5PV210_PA_IIC0 0xE1800000 |
42 | #define S5PV210_PA_IIC1 0xFAB00000 | ||
43 | #define S5PV210_PA_IIC2 0xE1A00000 | ||
49 | 44 | ||
50 | #define S5PV210_PA_WATCHDOG (0xE2700000) | 45 | #define S5PV210_PA_AC97 0xE2200000 |
51 | 46 | ||
52 | #define S5PV210_PA_RTC (0xE2800000) | 47 | #define S5PV210_PA_PCM0 0xE2300000 |
53 | #define S5PV210_PA_UART (0xE2900000) | 48 | #define S5PV210_PA_PCM1 0xE1200000 |
49 | #define S5PV210_PA_PCM2 0xE2B00000 | ||
54 | 50 | ||
55 | #define S5P_PA_UART0 (S5PV210_PA_UART + 0x0) | 51 | #define S5PV210_PA_TIMER 0xE2500000 |
56 | #define S5P_PA_UART1 (S5PV210_PA_UART + 0x400) | 52 | #define S5PV210_PA_SYSTIMER 0xE2600000 |
57 | #define S5P_PA_UART2 (S5PV210_PA_UART + 0x800) | 53 | #define S5PV210_PA_WATCHDOG 0xE2700000 |
58 | #define S5P_PA_UART3 (S5PV210_PA_UART + 0xC00) | 54 | #define S5PV210_PA_RTC 0xE2800000 |
59 | 55 | ||
60 | #define S5P_SZ_UART SZ_256 | 56 | #define S5PV210_PA_UART 0xE2900000 |
61 | 57 | ||
62 | #define S3C_VA_UARTx(x) (S3C_VA_UART + ((x) * S3C_UART_OFFSET)) | 58 | #define S5PV210_PA_SROMC 0xE8000000 |
63 | 59 | ||
64 | #define S5PV210_PA_SROMC (0xE8000000) | 60 | #define S5PV210_PA_CFCON 0xE8200000 |
65 | #define S5P_PA_SROMC S5PV210_PA_SROMC | ||
66 | 61 | ||
67 | #define S5PV210_PA_CFCON (0xE8200000) | 62 | #define S5PV210_PA_HSMMC(x) (0xEB000000 + ((x) * 0x100000)) |
68 | 63 | ||
69 | #define S5PV210_PA_MDMA 0xFA200000 | 64 | #define S5PV210_PA_HSOTG 0xEC000000 |
70 | #define S5PV210_PA_PDMA0 0xE0900000 | 65 | #define S5PV210_PA_HSPHY 0xEC100000 |
71 | #define S5PV210_PA_PDMA1 0xE0A00000 | ||
72 | 66 | ||
73 | #define S5PV210_PA_FB (0xF8000000) | 67 | #define S5PV210_PA_IIS0 0xEEE30000 |
68 | #define S5PV210_PA_IIS1 0xE2100000 | ||
69 | #define S5PV210_PA_IIS2 0xE2A00000 | ||
74 | 70 | ||
75 | #define S5PV210_PA_FIMC0 (0xFB200000) | 71 | #define S5PV210_PA_DMC0 0xF0000000 |
76 | #define S5PV210_PA_FIMC1 (0xFB300000) | 72 | #define S5PV210_PA_DMC1 0xF1400000 |
77 | #define S5PV210_PA_FIMC2 (0xFB400000) | ||
78 | 73 | ||
79 | #define S5PV210_PA_HSMMC(x) (0xEB000000 + ((x) * 0x100000)) | 74 | #define S5PV210_PA_VIC0 0xF2000000 |
75 | #define S5PV210_PA_VIC1 0xF2100000 | ||
76 | #define S5PV210_PA_VIC2 0xF2200000 | ||
77 | #define S5PV210_PA_VIC3 0xF2300000 | ||
80 | 78 | ||
81 | #define S5PV210_PA_HSOTG (0xEC000000) | 79 | #define S5PV210_PA_FB 0xF8000000 |
82 | #define S5PV210_PA_HSPHY (0xEC100000) | ||
83 | 80 | ||
84 | #define S5PV210_PA_VIC0 (0xF2000000) | 81 | #define S5PV210_PA_MDMA 0xFA200000 |
85 | #define S5PV210_PA_VIC1 (0xF2100000) | 82 | #define S5PV210_PA_PDMA0 0xE0900000 |
86 | #define S5PV210_PA_VIC2 (0xF2200000) | 83 | #define S5PV210_PA_PDMA1 0xE0A00000 |
87 | #define S5PV210_PA_VIC3 (0xF2300000) | ||
88 | 84 | ||
89 | #define S5PV210_PA_SDRAM (0x20000000) | 85 | #define S5PV210_PA_MIPI_CSIS 0xFA600000 |
90 | #define S5P_PA_SDRAM S5PV210_PA_SDRAM | ||
91 | 86 | ||
92 | /* S/PDIF */ | 87 | #define S5PV210_PA_FIMC0 0xFB200000 |
93 | #define S5PV210_PA_SPDIF 0xE1100000 | 88 | #define S5PV210_PA_FIMC1 0xFB300000 |
89 | #define S5PV210_PA_FIMC2 0xFB400000 | ||
94 | 90 | ||
95 | /* I2S */ | 91 | /* Compatibiltiy Defines */ |
96 | #define S5PV210_PA_IIS0 0xEEE30000 | ||
97 | #define S5PV210_PA_IIS1 0xE2100000 | ||
98 | #define S5PV210_PA_IIS2 0xE2A00000 | ||
99 | 92 | ||
100 | /* PCM */ | 93 | #define S3C_PA_FB S5PV210_PA_FB |
101 | #define S5PV210_PA_PCM0 0xE2300000 | 94 | #define S3C_PA_HSMMC0 S5PV210_PA_HSMMC(0) |
102 | #define S5PV210_PA_PCM1 0xE1200000 | 95 | #define S3C_PA_HSMMC1 S5PV210_PA_HSMMC(1) |
103 | #define S5PV210_PA_PCM2 0xE2B00000 | 96 | #define S3C_PA_HSMMC2 S5PV210_PA_HSMMC(2) |
97 | #define S3C_PA_HSMMC3 S5PV210_PA_HSMMC(3) | ||
98 | #define S3C_PA_IIC S5PV210_PA_IIC0 | ||
99 | #define S3C_PA_IIC1 S5PV210_PA_IIC1 | ||
100 | #define S3C_PA_IIC2 S5PV210_PA_IIC2 | ||
101 | #define S3C_PA_RTC S5PV210_PA_RTC | ||
102 | #define S3C_PA_USB_HSOTG S5PV210_PA_HSOTG | ||
103 | #define S3C_PA_WDT S5PV210_PA_WATCHDOG | ||
104 | 104 | ||
105 | /* AC97 */ | 105 | #define S5P_PA_CHIPID S5PV210_PA_CHIPID |
106 | #define S5PV210_PA_AC97 0xE2200000 | 106 | #define S5P_PA_FIMC0 S5PV210_PA_FIMC0 |
107 | #define S5P_PA_FIMC1 S5PV210_PA_FIMC1 | ||
108 | #define S5P_PA_FIMC2 S5PV210_PA_FIMC2 | ||
109 | #define S5P_PA_MIPI_CSIS0 S5PV210_PA_MIPI_CSIS | ||
110 | #define S5P_PA_ONENAND S5PC110_PA_ONENAND | ||
111 | #define S5P_PA_ONENAND_DMA S5PC110_PA_ONENAND_DMA | ||
112 | #define S5P_PA_SDRAM S5PV210_PA_SDRAM | ||
113 | #define S5P_PA_SROMC S5PV210_PA_SROMC | ||
114 | #define S5P_PA_SYSCON S5PV210_PA_SYSCON | ||
115 | #define S5P_PA_TIMER S5PV210_PA_TIMER | ||
107 | 116 | ||
108 | #define S5PV210_PA_ADC (0xE1700000) | 117 | #define SAMSUNG_PA_ADC S5PV210_PA_ADC |
118 | #define SAMSUNG_PA_CFCON S5PV210_PA_CFCON | ||
119 | #define SAMSUNG_PA_KEYPAD S5PV210_PA_KEYPAD | ||
109 | 120 | ||
110 | #define S5PV210_PA_DMC0 (0xF0000000) | 121 | /* UART */ |
111 | #define S5PV210_PA_DMC1 (0xF1400000) | ||
112 | 122 | ||
113 | #define S5PV210_PA_MIPI_CSIS 0xFA600000 | 123 | #define S3C_VA_UARTx(x) (S3C_VA_UART + ((x) * S3C_UART_OFFSET)) |
114 | 124 | ||
115 | /* compatibiltiy defines. */ | 125 | #define S3C_PA_UART S5PV210_PA_UART |
116 | #define S3C_PA_UART S5PV210_PA_UART | ||
117 | #define S3C_PA_HSMMC0 S5PV210_PA_HSMMC(0) | ||
118 | #define S3C_PA_HSMMC1 S5PV210_PA_HSMMC(1) | ||
119 | #define S3C_PA_HSMMC2 S5PV210_PA_HSMMC(2) | ||
120 | #define S3C_PA_HSMMC3 S5PV210_PA_HSMMC(3) | ||
121 | #define S3C_PA_IIC S5PV210_PA_IIC0 | ||
122 | #define S3C_PA_IIC1 S5PV210_PA_IIC1 | ||
123 | #define S3C_PA_IIC2 S5PV210_PA_IIC2 | ||
124 | #define S3C_PA_FB S5PV210_PA_FB | ||
125 | #define S3C_PA_RTC S5PV210_PA_RTC | ||
126 | #define S3C_PA_WDT S5PV210_PA_WATCHDOG | ||
127 | #define S3C_PA_USB_HSOTG S5PV210_PA_HSOTG | ||
128 | #define S5P_PA_FIMC0 S5PV210_PA_FIMC0 | ||
129 | #define S5P_PA_FIMC1 S5PV210_PA_FIMC1 | ||
130 | #define S5P_PA_FIMC2 S5PV210_PA_FIMC2 | ||
131 | #define S5P_PA_MIPI_CSIS0 S5PV210_PA_MIPI_CSIS | ||
132 | 126 | ||
133 | #define SAMSUNG_PA_ADC S5PV210_PA_ADC | 127 | #define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET)) |
134 | #define SAMSUNG_PA_CFCON S5PV210_PA_CFCON | 128 | #define S5P_PA_UART0 S5P_PA_UART(0) |
135 | #define SAMSUNG_PA_KEYPAD S5PV210_PA_KEYPAD | 129 | #define S5P_PA_UART1 S5P_PA_UART(1) |
130 | #define S5P_PA_UART2 S5P_PA_UART(2) | ||
131 | #define S5P_PA_UART3 S5P_PA_UART(3) | ||
132 | |||
133 | #define S5P_SZ_UART SZ_256 | ||
136 | 134 | ||
137 | #endif /* __ASM_ARCH_MAP_H */ | 135 | #endif /* __ASM_ARCH_MAP_H */ |
diff --git a/arch/arm/mach-s5pv210/mach-aquila.c b/arch/arm/mach-s5pv210/mach-aquila.c index 461aa035afc0..557add4fc56c 100644 --- a/arch/arm/mach-s5pv210/mach-aquila.c +++ b/arch/arm/mach-s5pv210/mach-aquila.c | |||
@@ -149,7 +149,7 @@ static struct regulator_init_data aquila_ldo2_data = { | |||
149 | 149 | ||
150 | static struct regulator_init_data aquila_ldo3_data = { | 150 | static struct regulator_init_data aquila_ldo3_data = { |
151 | .constraints = { | 151 | .constraints = { |
152 | .name = "VUSB/MIPI_1.1V", | 152 | .name = "VUSB+MIPI_1.1V", |
153 | .min_uV = 1100000, | 153 | .min_uV = 1100000, |
154 | .max_uV = 1100000, | 154 | .max_uV = 1100000, |
155 | .apply_uV = 1, | 155 | .apply_uV = 1, |
@@ -197,7 +197,7 @@ static struct regulator_init_data aquila_ldo7_data = { | |||
197 | 197 | ||
198 | static struct regulator_init_data aquila_ldo8_data = { | 198 | static struct regulator_init_data aquila_ldo8_data = { |
199 | .constraints = { | 199 | .constraints = { |
200 | .name = "VUSB/VADC_3.3V", | 200 | .name = "VUSB+VADC_3.3V", |
201 | .min_uV = 3300000, | 201 | .min_uV = 3300000, |
202 | .max_uV = 3300000, | 202 | .max_uV = 3300000, |
203 | .apply_uV = 1, | 203 | .apply_uV = 1, |
@@ -207,7 +207,7 @@ static struct regulator_init_data aquila_ldo8_data = { | |||
207 | 207 | ||
208 | static struct regulator_init_data aquila_ldo9_data = { | 208 | static struct regulator_init_data aquila_ldo9_data = { |
209 | .constraints = { | 209 | .constraints = { |
210 | .name = "VCC/VCAM_2.8V", | 210 | .name = "VCC+VCAM_2.8V", |
211 | .min_uV = 2800000, | 211 | .min_uV = 2800000, |
212 | .max_uV = 2800000, | 212 | .max_uV = 2800000, |
213 | .apply_uV = 1, | 213 | .apply_uV = 1, |
@@ -381,9 +381,12 @@ static struct max8998_platform_data aquila_max8998_pdata = { | |||
381 | .buck1_set1 = S5PV210_GPH0(3), | 381 | .buck1_set1 = S5PV210_GPH0(3), |
382 | .buck1_set2 = S5PV210_GPH0(4), | 382 | .buck1_set2 = S5PV210_GPH0(4), |
383 | .buck2_set3 = S5PV210_GPH0(5), | 383 | .buck2_set3 = S5PV210_GPH0(5), |
384 | .buck1_max_voltage1 = 1200000, | 384 | .buck1_voltage1 = 1200000, |
385 | .buck1_max_voltage2 = 1200000, | 385 | .buck1_voltage2 = 1200000, |
386 | .buck2_max_voltage = 1200000, | 386 | .buck1_voltage3 = 1200000, |
387 | .buck1_voltage4 = 1200000, | ||
388 | .buck2_voltage1 = 1200000, | ||
389 | .buck2_voltage2 = 1200000, | ||
387 | }; | 390 | }; |
388 | #endif | 391 | #endif |
389 | 392 | ||
diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c index e22d5112fd44..056f5c769b0a 100644 --- a/arch/arm/mach-s5pv210/mach-goni.c +++ b/arch/arm/mach-s5pv210/mach-goni.c | |||
@@ -288,7 +288,7 @@ static struct regulator_init_data goni_ldo2_data = { | |||
288 | 288 | ||
289 | static struct regulator_init_data goni_ldo3_data = { | 289 | static struct regulator_init_data goni_ldo3_data = { |
290 | .constraints = { | 290 | .constraints = { |
291 | .name = "VUSB/MIPI_1.1V", | 291 | .name = "VUSB+MIPI_1.1V", |
292 | .min_uV = 1100000, | 292 | .min_uV = 1100000, |
293 | .max_uV = 1100000, | 293 | .max_uV = 1100000, |
294 | .apply_uV = 1, | 294 | .apply_uV = 1, |
@@ -337,7 +337,7 @@ static struct regulator_init_data goni_ldo7_data = { | |||
337 | 337 | ||
338 | static struct regulator_init_data goni_ldo8_data = { | 338 | static struct regulator_init_data goni_ldo8_data = { |
339 | .constraints = { | 339 | .constraints = { |
340 | .name = "VUSB/VADC_3.3V", | 340 | .name = "VUSB+VADC_3.3V", |
341 | .min_uV = 3300000, | 341 | .min_uV = 3300000, |
342 | .max_uV = 3300000, | 342 | .max_uV = 3300000, |
343 | .apply_uV = 1, | 343 | .apply_uV = 1, |
@@ -347,7 +347,7 @@ static struct regulator_init_data goni_ldo8_data = { | |||
347 | 347 | ||
348 | static struct regulator_init_data goni_ldo9_data = { | 348 | static struct regulator_init_data goni_ldo9_data = { |
349 | .constraints = { | 349 | .constraints = { |
350 | .name = "VCC/VCAM_2.8V", | 350 | .name = "VCC+VCAM_2.8V", |
351 | .min_uV = 2800000, | 351 | .min_uV = 2800000, |
352 | .max_uV = 2800000, | 352 | .max_uV = 2800000, |
353 | .apply_uV = 1, | 353 | .apply_uV = 1, |
@@ -521,9 +521,12 @@ static struct max8998_platform_data goni_max8998_pdata = { | |||
521 | .buck1_set1 = S5PV210_GPH0(3), | 521 | .buck1_set1 = S5PV210_GPH0(3), |
522 | .buck1_set2 = S5PV210_GPH0(4), | 522 | .buck1_set2 = S5PV210_GPH0(4), |
523 | .buck2_set3 = S5PV210_GPH0(5), | 523 | .buck2_set3 = S5PV210_GPH0(5), |
524 | .buck1_max_voltage1 = 1200000, | 524 | .buck1_voltage1 = 1200000, |
525 | .buck1_max_voltage2 = 1200000, | 525 | .buck1_voltage2 = 1200000, |
526 | .buck2_max_voltage = 1200000, | 526 | .buck1_voltage3 = 1200000, |
527 | .buck1_voltage4 = 1200000, | ||
528 | .buck2_voltage1 = 1200000, | ||
529 | .buck2_voltage2 = 1200000, | ||
527 | }; | 530 | }; |
528 | #endif | 531 | #endif |
529 | 532 | ||
diff --git a/arch/arm/mach-s5pv310/include/mach/map.h b/arch/arm/mach-s5pv310/include/mach/map.h index 3060f78e12ab..901657fa7a12 100644 --- a/arch/arm/mach-s5pv310/include/mach/map.h +++ b/arch/arm/mach-s5pv310/include/mach/map.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* linux/arch/arm/mach-s5pv310/include/mach/map.h | 1 | /* linux/arch/arm/mach-s5pv310/include/mach/map.h |
2 | * | 2 | * |
3 | * Copyright (c) 2010 Samsung Electronics Co., Ltd. | 3 | * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. |
4 | * http://www.samsung.com/ | 4 | * http://www.samsung.com/ |
5 | * | 5 | * |
6 | * S5PV310 - Memory map definitions | 6 | * S5PV310 - Memory map definitions |
@@ -23,90 +23,43 @@ | |||
23 | 23 | ||
24 | #include <plat/map-s5p.h> | 24 | #include <plat/map-s5p.h> |
25 | 25 | ||
26 | #define S5PV310_PA_SYSRAM (0x02025000) | 26 | #define S5PV310_PA_SYSRAM 0x02025000 |
27 | 27 | ||
28 | #define S5PV310_PA_SROM_BANK(x) (0x04000000 + ((x) * 0x01000000)) | 28 | #define S5PV310_PA_I2S0 0x03830000 |
29 | 29 | #define S5PV310_PA_I2S1 0xE3100000 | |
30 | #define S5PC210_PA_ONENAND (0x0C000000) | 30 | #define S5PV310_PA_I2S2 0xE2A00000 |
31 | #define S5P_PA_ONENAND S5PC210_PA_ONENAND | ||
32 | |||
33 | #define S5PC210_PA_ONENAND_DMA (0x0C600000) | ||
34 | #define S5P_PA_ONENAND_DMA S5PC210_PA_ONENAND_DMA | ||
35 | |||
36 | #define S5PV310_PA_CHIPID (0x10000000) | ||
37 | #define S5P_PA_CHIPID S5PV310_PA_CHIPID | ||
38 | |||
39 | #define S5PV310_PA_SYSCON (0x10010000) | ||
40 | #define S5P_PA_SYSCON S5PV310_PA_SYSCON | ||
41 | 31 | ||
42 | #define S5PV310_PA_PMU (0x10020000) | 32 | #define S5PV310_PA_PCM0 0x03840000 |
33 | #define S5PV310_PA_PCM1 0x13980000 | ||
34 | #define S5PV310_PA_PCM2 0x13990000 | ||
43 | 35 | ||
44 | #define S5PV310_PA_CMU (0x10030000) | 36 | #define S5PV310_PA_SROM_BANK(x) (0x04000000 + ((x) * 0x01000000)) |
45 | |||
46 | #define S5PV310_PA_WATCHDOG (0x10060000) | ||
47 | #define S5PV310_PA_RTC (0x10070000) | ||
48 | |||
49 | #define S5PV310_PA_DMC0 (0x10400000) | ||
50 | |||
51 | #define S5PV310_PA_COMBINER (0x10448000) | ||
52 | |||
53 | #define S5PV310_PA_COREPERI (0x10500000) | ||
54 | #define S5PV310_PA_GIC_CPU (0x10500100) | ||
55 | #define S5PV310_PA_TWD (0x10500600) | ||
56 | #define S5PV310_PA_GIC_DIST (0x10501000) | ||
57 | #define S5PV310_PA_L2CC (0x10502000) | ||
58 | |||
59 | /* DMA */ | ||
60 | #define S5PV310_PA_MDMA 0x10810000 | ||
61 | #define S5PV310_PA_PDMA0 0x12680000 | ||
62 | #define S5PV310_PA_PDMA1 0x12690000 | ||
63 | |||
64 | #define S5PV310_PA_GPIO1 (0x11400000) | ||
65 | #define S5PV310_PA_GPIO2 (0x11000000) | ||
66 | #define S5PV310_PA_GPIO3 (0x03860000) | ||
67 | |||
68 | #define S5PV310_PA_MIPI_CSIS0 0x11880000 | ||
69 | #define S5PV310_PA_MIPI_CSIS1 0x11890000 | ||
70 | 37 | ||
71 | #define S5PV310_PA_HSMMC(x) (0x12510000 + ((x) * 0x10000)) | 38 | #define S5PC210_PA_ONENAND 0x0C000000 |
39 | #define S5PC210_PA_ONENAND_DMA 0x0C600000 | ||
72 | 40 | ||
73 | #define S5PV310_PA_SROMC (0x12570000) | 41 | #define S5PV310_PA_CHIPID 0x10000000 |
74 | #define S5P_PA_SROMC S5PV310_PA_SROMC | ||
75 | 42 | ||
76 | /* S/PDIF */ | 43 | #define S5PV310_PA_SYSCON 0x10010000 |
77 | #define S5PV310_PA_SPDIF 0xE1100000 | 44 | #define S5PV310_PA_PMU 0x10020000 |
45 | #define S5PV310_PA_CMU 0x10030000 | ||
78 | 46 | ||
79 | /* I2S */ | 47 | #define S5PV310_PA_WATCHDOG 0x10060000 |
80 | #define S5PV310_PA_I2S0 0x03830000 | 48 | #define S5PV310_PA_RTC 0x10070000 |
81 | #define S5PV310_PA_I2S1 0xE3100000 | ||
82 | #define S5PV310_PA_I2S2 0xE2A00000 | ||
83 | 49 | ||
84 | /* PCM */ | 50 | #define S5PV310_PA_DMC0 0x10400000 |
85 | #define S5PV310_PA_PCM0 0x03840000 | ||
86 | #define S5PV310_PA_PCM1 0x13980000 | ||
87 | #define S5PV310_PA_PCM2 0x13990000 | ||
88 | 51 | ||
89 | /* AC97 */ | 52 | #define S5PV310_PA_COMBINER 0x10448000 |
90 | #define S5PV310_PA_AC97 0x139A0000 | ||
91 | 53 | ||
92 | #define S5PV310_PA_UART (0x13800000) | 54 | #define S5PV310_PA_COREPERI 0x10500000 |
55 | #define S5PV310_PA_GIC_CPU 0x10500100 | ||
56 | #define S5PV310_PA_TWD 0x10500600 | ||
57 | #define S5PV310_PA_GIC_DIST 0x10501000 | ||
58 | #define S5PV310_PA_L2CC 0x10502000 | ||
93 | 59 | ||
94 | #define S5P_PA_UART(x) (S5PV310_PA_UART + ((x) * S3C_UART_OFFSET)) | 60 | #define S5PV310_PA_MDMA 0x10810000 |
95 | #define S5P_PA_UART0 S5P_PA_UART(0) | 61 | #define S5PV310_PA_PDMA0 0x12680000 |
96 | #define S5P_PA_UART1 S5P_PA_UART(1) | 62 | #define S5PV310_PA_PDMA1 0x12690000 |
97 | #define S5P_PA_UART2 S5P_PA_UART(2) | ||
98 | #define S5P_PA_UART3 S5P_PA_UART(3) | ||
99 | #define S5P_PA_UART4 S5P_PA_UART(4) | ||
100 | |||
101 | #define S5P_SZ_UART SZ_256 | ||
102 | |||
103 | #define S5PV310_PA_IIC(x) (0x13860000 + ((x) * 0x10000)) | ||
104 | |||
105 | #define S5PV310_PA_TIMER (0x139D0000) | ||
106 | #define S5P_PA_TIMER S5PV310_PA_TIMER | ||
107 | |||
108 | #define S5PV310_PA_SDRAM (0x40000000) | ||
109 | #define S5P_PA_SDRAM S5PV310_PA_SDRAM | ||
110 | 63 | ||
111 | #define S5PV310_PA_SYSMMU_MDMA 0x10A40000 | 64 | #define S5PV310_PA_SYSMMU_MDMA 0x10A40000 |
112 | #define S5PV310_PA_SYSMMU_SSS 0x10A50000 | 65 | #define S5PV310_PA_SYSMMU_SSS 0x10A50000 |
@@ -125,8 +78,31 @@ | |||
125 | #define S5PV310_PA_SYSMMU_MFC_L 0x13620000 | 78 | #define S5PV310_PA_SYSMMU_MFC_L 0x13620000 |
126 | #define S5PV310_PA_SYSMMU_MFC_R 0x13630000 | 79 | #define S5PV310_PA_SYSMMU_MFC_R 0x13630000 |
127 | 80 | ||
128 | /* compatibiltiy defines. */ | 81 | #define S5PV310_PA_GPIO1 0x11400000 |
129 | #define S3C_PA_UART S5PV310_PA_UART | 82 | #define S5PV310_PA_GPIO2 0x11000000 |
83 | #define S5PV310_PA_GPIO3 0x03860000 | ||
84 | |||
85 | #define S5PV310_PA_MIPI_CSIS0 0x11880000 | ||
86 | #define S5PV310_PA_MIPI_CSIS1 0x11890000 | ||
87 | |||
88 | #define S5PV310_PA_HSMMC(x) (0x12510000 + ((x) * 0x10000)) | ||
89 | |||
90 | #define S5PV310_PA_SROMC 0x12570000 | ||
91 | |||
92 | #define S5PV310_PA_UART 0x13800000 | ||
93 | |||
94 | #define S5PV310_PA_IIC(x) (0x13860000 + ((x) * 0x10000)) | ||
95 | |||
96 | #define S5PV310_PA_AC97 0x139A0000 | ||
97 | |||
98 | #define S5PV310_PA_TIMER 0x139D0000 | ||
99 | |||
100 | #define S5PV310_PA_SDRAM 0x40000000 | ||
101 | |||
102 | #define S5PV310_PA_SPDIF 0xE1100000 | ||
103 | |||
104 | /* Compatibiltiy Defines */ | ||
105 | |||
130 | #define S3C_PA_HSMMC0 S5PV310_PA_HSMMC(0) | 106 | #define S3C_PA_HSMMC0 S5PV310_PA_HSMMC(0) |
131 | #define S3C_PA_HSMMC1 S5PV310_PA_HSMMC(1) | 107 | #define S3C_PA_HSMMC1 S5PV310_PA_HSMMC(1) |
132 | #define S3C_PA_HSMMC2 S5PV310_PA_HSMMC(2) | 108 | #define S3C_PA_HSMMC2 S5PV310_PA_HSMMC(2) |
@@ -141,7 +117,28 @@ | |||
141 | #define S3C_PA_IIC7 S5PV310_PA_IIC(7) | 117 | #define S3C_PA_IIC7 S5PV310_PA_IIC(7) |
142 | #define S3C_PA_RTC S5PV310_PA_RTC | 118 | #define S3C_PA_RTC S5PV310_PA_RTC |
143 | #define S3C_PA_WDT S5PV310_PA_WATCHDOG | 119 | #define S3C_PA_WDT S5PV310_PA_WATCHDOG |
120 | |||
121 | #define S5P_PA_CHIPID S5PV310_PA_CHIPID | ||
144 | #define S5P_PA_MIPI_CSIS0 S5PV310_PA_MIPI_CSIS0 | 122 | #define S5P_PA_MIPI_CSIS0 S5PV310_PA_MIPI_CSIS0 |
145 | #define S5P_PA_MIPI_CSIS1 S5PV310_PA_MIPI_CSIS1 | 123 | #define S5P_PA_MIPI_CSIS1 S5PV310_PA_MIPI_CSIS1 |
124 | #define S5P_PA_ONENAND S5PC210_PA_ONENAND | ||
125 | #define S5P_PA_ONENAND_DMA S5PC210_PA_ONENAND_DMA | ||
126 | #define S5P_PA_SDRAM S5PV310_PA_SDRAM | ||
127 | #define S5P_PA_SROMC S5PV310_PA_SROMC | ||
128 | #define S5P_PA_SYSCON S5PV310_PA_SYSCON | ||
129 | #define S5P_PA_TIMER S5PV310_PA_TIMER | ||
130 | |||
131 | /* UART */ | ||
132 | |||
133 | #define S3C_PA_UART S5PV310_PA_UART | ||
134 | |||
135 | #define S5P_PA_UART(x) (S3C_PA_UART + ((x) * S3C_UART_OFFSET)) | ||
136 | #define S5P_PA_UART0 S5P_PA_UART(0) | ||
137 | #define S5P_PA_UART1 S5P_PA_UART(1) | ||
138 | #define S5P_PA_UART2 S5P_PA_UART(2) | ||
139 | #define S5P_PA_UART3 S5P_PA_UART(3) | ||
140 | #define S5P_PA_UART4 S5P_PA_UART(4) | ||
141 | |||
142 | #define S5P_SZ_UART SZ_256 | ||
146 | 143 | ||
147 | #endif /* __ASM_ARCH_MAP_H */ | 144 | #endif /* __ASM_ARCH_MAP_H */ |
diff --git a/arch/arm/mach-spear3xx/include/mach/spear320.h b/arch/arm/mach-spear3xx/include/mach/spear320.h index cacf17a958cd..53677e464d4b 100644 --- a/arch/arm/mach-spear3xx/include/mach/spear320.h +++ b/arch/arm/mach-spear3xx/include/mach/spear320.h | |||
@@ -62,7 +62,7 @@ | |||
62 | #define SPEAR320_SMII1_BASE 0xAB000000 | 62 | #define SPEAR320_SMII1_BASE 0xAB000000 |
63 | #define SPEAR320_SMII1_SIZE 0x01000000 | 63 | #define SPEAR320_SMII1_SIZE 0x01000000 |
64 | 64 | ||
65 | #define SPEAR320_SOC_CONFIG_BASE 0xB4000000 | 65 | #define SPEAR320_SOC_CONFIG_BASE 0xB3000000 |
66 | #define SPEAR320_SOC_CONFIG_SIZE 0x00000070 | 66 | #define SPEAR320_SOC_CONFIG_SIZE 0x00000070 |
67 | /* Interrupt registers offsets and masks */ | 67 | /* Interrupt registers offsets and masks */ |
68 | #define INT_STS_MASK_REG 0x04 | 68 | #define INT_STS_MASK_REG 0x04 |
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 170c9bb95866..f2ce38e085d2 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c | |||
@@ -49,7 +49,13 @@ static inline void cache_wait(void __iomem *reg, unsigned long mask) | |||
49 | static inline void cache_sync(void) | 49 | static inline void cache_sync(void) |
50 | { | 50 | { |
51 | void __iomem *base = l2x0_base; | 51 | void __iomem *base = l2x0_base; |
52 | |||
53 | #ifdef CONFIG_ARM_ERRATA_753970 | ||
54 | /* write to an unmmapped register */ | ||
55 | writel_relaxed(0, base + L2X0_DUMMY_REG); | ||
56 | #else | ||
52 | writel_relaxed(0, base + L2X0_CACHE_SYNC); | 57 | writel_relaxed(0, base + L2X0_CACHE_SYNC); |
58 | #endif | ||
53 | cache_wait(base + L2X0_CACHE_SYNC, 1); | 59 | cache_wait(base + L2X0_CACHE_SYNC, 1); |
54 | } | 60 | } |
55 | 61 | ||
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 0c1172b56b4e..8e3356239136 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
@@ -264,6 +264,12 @@ __v7_setup: | |||
264 | orreq r10, r10, #1 << 6 @ set bit #6 | 264 | orreq r10, r10, #1 << 6 @ set bit #6 |
265 | mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register | 265 | mcreq p15, 0, r10, c15, c0, 1 @ write diagnostic register |
266 | #endif | 266 | #endif |
267 | #ifdef CONFIG_ARM_ERRATA_751472 | ||
268 | cmp r6, #0x30 @ present prior to r3p0 | ||
269 | mrclt p15, 0, r10, c15, c0, 1 @ read diagnostic register | ||
270 | orrlt r10, r10, #1 << 11 @ set bit #11 | ||
271 | mcrlt p15, 0, r10, c15, c0, 1 @ write diagnostic register | ||
272 | #endif | ||
267 | 273 | ||
268 | 3: mov r10, #0 | 274 | 3: mov r10, #0 |
269 | #ifdef HARVARD_CACHE | 275 | #ifdef HARVARD_CACHE |
diff --git a/arch/arm/plat-s5p/dev-uart.c b/arch/arm/plat-s5p/dev-uart.c index 6a7342886171..afaf87fdb93e 100644 --- a/arch/arm/plat-s5p/dev-uart.c +++ b/arch/arm/plat-s5p/dev-uart.c | |||
@@ -28,7 +28,7 @@ | |||
28 | static struct resource s5p_uart0_resource[] = { | 28 | static struct resource s5p_uart0_resource[] = { |
29 | [0] = { | 29 | [0] = { |
30 | .start = S5P_PA_UART0, | 30 | .start = S5P_PA_UART0, |
31 | .end = S5P_PA_UART0 + S5P_SZ_UART, | 31 | .end = S5P_PA_UART0 + S5P_SZ_UART - 1, |
32 | .flags = IORESOURCE_MEM, | 32 | .flags = IORESOURCE_MEM, |
33 | }, | 33 | }, |
34 | [1] = { | 34 | [1] = { |
@@ -51,7 +51,7 @@ static struct resource s5p_uart0_resource[] = { | |||
51 | static struct resource s5p_uart1_resource[] = { | 51 | static struct resource s5p_uart1_resource[] = { |
52 | [0] = { | 52 | [0] = { |
53 | .start = S5P_PA_UART1, | 53 | .start = S5P_PA_UART1, |
54 | .end = S5P_PA_UART1 + S5P_SZ_UART, | 54 | .end = S5P_PA_UART1 + S5P_SZ_UART - 1, |
55 | .flags = IORESOURCE_MEM, | 55 | .flags = IORESOURCE_MEM, |
56 | }, | 56 | }, |
57 | [1] = { | 57 | [1] = { |
@@ -74,7 +74,7 @@ static struct resource s5p_uart1_resource[] = { | |||
74 | static struct resource s5p_uart2_resource[] = { | 74 | static struct resource s5p_uart2_resource[] = { |
75 | [0] = { | 75 | [0] = { |
76 | .start = S5P_PA_UART2, | 76 | .start = S5P_PA_UART2, |
77 | .end = S5P_PA_UART2 + S5P_SZ_UART, | 77 | .end = S5P_PA_UART2 + S5P_SZ_UART - 1, |
78 | .flags = IORESOURCE_MEM, | 78 | .flags = IORESOURCE_MEM, |
79 | }, | 79 | }, |
80 | [1] = { | 80 | [1] = { |
@@ -98,7 +98,7 @@ static struct resource s5p_uart3_resource[] = { | |||
98 | #if CONFIG_SERIAL_SAMSUNG_UARTS > 3 | 98 | #if CONFIG_SERIAL_SAMSUNG_UARTS > 3 |
99 | [0] = { | 99 | [0] = { |
100 | .start = S5P_PA_UART3, | 100 | .start = S5P_PA_UART3, |
101 | .end = S5P_PA_UART3 + S5P_SZ_UART, | 101 | .end = S5P_PA_UART3 + S5P_SZ_UART - 1, |
102 | .flags = IORESOURCE_MEM, | 102 | .flags = IORESOURCE_MEM, |
103 | }, | 103 | }, |
104 | [1] = { | 104 | [1] = { |
@@ -123,7 +123,7 @@ static struct resource s5p_uart4_resource[] = { | |||
123 | #if CONFIG_SERIAL_SAMSUNG_UARTS > 4 | 123 | #if CONFIG_SERIAL_SAMSUNG_UARTS > 4 |
124 | [0] = { | 124 | [0] = { |
125 | .start = S5P_PA_UART4, | 125 | .start = S5P_PA_UART4, |
126 | .end = S5P_PA_UART4 + S5P_SZ_UART, | 126 | .end = S5P_PA_UART4 + S5P_SZ_UART - 1, |
127 | .flags = IORESOURCE_MEM, | 127 | .flags = IORESOURCE_MEM, |
128 | }, | 128 | }, |
129 | [1] = { | 129 | [1] = { |
@@ -148,7 +148,7 @@ static struct resource s5p_uart5_resource[] = { | |||
148 | #if CONFIG_SERIAL_SAMSUNG_UARTS > 5 | 148 | #if CONFIG_SERIAL_SAMSUNG_UARTS > 5 |
149 | [0] = { | 149 | [0] = { |
150 | .start = S5P_PA_UART5, | 150 | .start = S5P_PA_UART5, |
151 | .end = S5P_PA_UART5 + S5P_SZ_UART, | 151 | .end = S5P_PA_UART5 + S5P_SZ_UART - 1, |
152 | .flags = IORESOURCE_MEM, | 152 | .flags = IORESOURCE_MEM, |
153 | }, | 153 | }, |
154 | [1] = { | 154 | [1] = { |
diff --git a/arch/arm/plat-samsung/dev-ts.c b/arch/arm/plat-samsung/dev-ts.c index 236ef8427d7d..3e4bd8147bf4 100644 --- a/arch/arm/plat-samsung/dev-ts.c +++ b/arch/arm/plat-samsung/dev-ts.c | |||
@@ -58,4 +58,3 @@ void __init s3c24xx_ts_set_platdata(struct s3c2410_ts_mach_info *pd) | |||
58 | 58 | ||
59 | s3c_device_ts.dev.platform_data = npd; | 59 | s3c_device_ts.dev.platform_data = npd; |
60 | } | 60 | } |
61 | EXPORT_SYMBOL(s3c24xx_ts_set_platdata); | ||
diff --git a/arch/arm/plat-spear/include/plat/uncompress.h b/arch/arm/plat-spear/include/plat/uncompress.h index 99ba6789cc97..6dd455bafdfd 100644 --- a/arch/arm/plat-spear/include/plat/uncompress.h +++ b/arch/arm/plat-spear/include/plat/uncompress.h | |||
@@ -24,10 +24,10 @@ static inline void putc(int c) | |||
24 | { | 24 | { |
25 | void __iomem *base = (void __iomem *)SPEAR_DBG_UART_BASE; | 25 | void __iomem *base = (void __iomem *)SPEAR_DBG_UART_BASE; |
26 | 26 | ||
27 | while (readl(base + UART01x_FR) & UART01x_FR_TXFF) | 27 | while (readl_relaxed(base + UART01x_FR) & UART01x_FR_TXFF) |
28 | barrier(); | 28 | barrier(); |
29 | 29 | ||
30 | writel(c, base + UART01x_DR); | 30 | writel_relaxed(c, base + UART01x_DR); |
31 | } | 31 | } |
32 | 32 | ||
33 | static inline void flush(void) | 33 | static inline void flush(void) |
diff --git a/arch/arm/plat-spear/include/plat/vmalloc.h b/arch/arm/plat-spear/include/plat/vmalloc.h index 09e9372aea21..8c8b24d07046 100644 --- a/arch/arm/plat-spear/include/plat/vmalloc.h +++ b/arch/arm/plat-spear/include/plat/vmalloc.h | |||
@@ -14,6 +14,6 @@ | |||
14 | #ifndef __PLAT_VMALLOC_H | 14 | #ifndef __PLAT_VMALLOC_H |
15 | #define __PLAT_VMALLOC_H | 15 | #define __PLAT_VMALLOC_H |
16 | 16 | ||
17 | #define VMALLOC_END 0xF0000000 | 17 | #define VMALLOC_END 0xF0000000UL |
18 | 18 | ||
19 | #endif /* __PLAT_VMALLOC_H */ | 19 | #endif /* __PLAT_VMALLOC_H */ |
diff --git a/arch/s390/boot/compressed/misc.c b/arch/s390/boot/compressed/misc.c index 0851eb1e919e..2751b3a8a66f 100644 --- a/arch/s390/boot/compressed/misc.c +++ b/arch/s390/boot/compressed/misc.c | |||
@@ -133,11 +133,12 @@ unsigned long decompress_kernel(void) | |||
133 | unsigned long output_addr; | 133 | unsigned long output_addr; |
134 | unsigned char *output; | 134 | unsigned char *output; |
135 | 135 | ||
136 | check_ipl_parmblock((void *) 0, (unsigned long) output + SZ__bss_start); | 136 | output_addr = ((unsigned long) &_end + HEAP_SIZE + 4095UL) & -4096UL; |
137 | check_ipl_parmblock((void *) 0, output_addr + SZ__bss_start); | ||
137 | memset(&_bss, 0, &_ebss - &_bss); | 138 | memset(&_bss, 0, &_ebss - &_bss); |
138 | free_mem_ptr = (unsigned long)&_end; | 139 | free_mem_ptr = (unsigned long)&_end; |
139 | free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; | 140 | free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; |
140 | output = (unsigned char *) ((free_mem_end_ptr + 4095UL) & -4096UL); | 141 | output = (unsigned char *) output_addr; |
141 | 142 | ||
142 | #ifdef CONFIG_BLK_DEV_INITRD | 143 | #ifdef CONFIG_BLK_DEV_INITRD |
143 | /* | 144 | /* |
diff --git a/arch/s390/crypto/sha_common.c b/arch/s390/crypto/sha_common.c index f42dbabc0d30..48884f89ab92 100644 --- a/arch/s390/crypto/sha_common.c +++ b/arch/s390/crypto/sha_common.c | |||
@@ -38,6 +38,7 @@ int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len) | |||
38 | BUG_ON(ret != bsize); | 38 | BUG_ON(ret != bsize); |
39 | data += bsize - index; | 39 | data += bsize - index; |
40 | len -= bsize - index; | 40 | len -= bsize - index; |
41 | index = 0; | ||
41 | } | 42 | } |
42 | 43 | ||
43 | /* process as many blocks as possible */ | 44 | /* process as many blocks as possible */ |
diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h index 76daea117181..5c5ba10384c2 100644 --- a/arch/s390/include/asm/atomic.h +++ b/arch/s390/include/asm/atomic.h | |||
@@ -36,14 +36,19 @@ | |||
36 | 36 | ||
37 | static inline int atomic_read(const atomic_t *v) | 37 | static inline int atomic_read(const atomic_t *v) |
38 | { | 38 | { |
39 | barrier(); | 39 | int c; |
40 | return v->counter; | 40 | |
41 | asm volatile( | ||
42 | " l %0,%1\n" | ||
43 | : "=d" (c) : "Q" (v->counter)); | ||
44 | return c; | ||
41 | } | 45 | } |
42 | 46 | ||
43 | static inline void atomic_set(atomic_t *v, int i) | 47 | static inline void atomic_set(atomic_t *v, int i) |
44 | { | 48 | { |
45 | v->counter = i; | 49 | asm volatile( |
46 | barrier(); | 50 | " st %1,%0\n" |
51 | : "=Q" (v->counter) : "d" (i)); | ||
47 | } | 52 | } |
48 | 53 | ||
49 | static inline int atomic_add_return(int i, atomic_t *v) | 54 | static inline int atomic_add_return(int i, atomic_t *v) |
@@ -128,14 +133,19 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u) | |||
128 | 133 | ||
129 | static inline long long atomic64_read(const atomic64_t *v) | 134 | static inline long long atomic64_read(const atomic64_t *v) |
130 | { | 135 | { |
131 | barrier(); | 136 | long long c; |
132 | return v->counter; | 137 | |
138 | asm volatile( | ||
139 | " lg %0,%1\n" | ||
140 | : "=d" (c) : "Q" (v->counter)); | ||
141 | return c; | ||
133 | } | 142 | } |
134 | 143 | ||
135 | static inline void atomic64_set(atomic64_t *v, long long i) | 144 | static inline void atomic64_set(atomic64_t *v, long long i) |
136 | { | 145 | { |
137 | v->counter = i; | 146 | asm volatile( |
138 | barrier(); | 147 | " stg %1,%0\n" |
148 | : "=Q" (v->counter) : "d" (i)); | ||
139 | } | 149 | } |
140 | 150 | ||
141 | static inline long long atomic64_add_return(long long i, atomic64_t *v) | 151 | static inline long long atomic64_add_return(long long i, atomic64_t *v) |
diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h index 24aafa68b643..2a30d5ac0667 100644 --- a/arch/s390/include/asm/cache.h +++ b/arch/s390/include/asm/cache.h | |||
@@ -13,6 +13,7 @@ | |||
13 | 13 | ||
14 | #define L1_CACHE_BYTES 256 | 14 | #define L1_CACHE_BYTES 256 |
15 | #define L1_CACHE_SHIFT 8 | 15 | #define L1_CACHE_SHIFT 8 |
16 | #define NET_SKB_PAD 32 | ||
16 | 17 | ||
17 | #define __read_mostly __attribute__((__section__(".data..read_mostly"))) | 18 | #define __read_mostly __attribute__((__section__(".data..read_mostly"))) |
18 | 19 | ||
diff --git a/arch/sparc/include/asm/pcr.h b/arch/sparc/include/asm/pcr.h index a2f5c61f924e..843e4faf6a50 100644 --- a/arch/sparc/include/asm/pcr.h +++ b/arch/sparc/include/asm/pcr.h | |||
@@ -43,4 +43,6 @@ static inline u64 picl_value(unsigned int nmi_hz) | |||
43 | 43 | ||
44 | extern u64 pcr_enable; | 44 | extern u64 pcr_enable; |
45 | 45 | ||
46 | extern int pcr_arch_init(void); | ||
47 | |||
46 | #endif /* __PCR_H */ | 48 | #endif /* __PCR_H */ |
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 47977a77f6c6..72509d0e34be 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c | |||
@@ -255,10 +255,9 @@ static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu, | |||
255 | static int iommu_alloc_ctx(struct iommu *iommu) | 255 | static int iommu_alloc_ctx(struct iommu *iommu) |
256 | { | 256 | { |
257 | int lowest = iommu->ctx_lowest_free; | 257 | int lowest = iommu->ctx_lowest_free; |
258 | int sz = IOMMU_NUM_CTXS - lowest; | 258 | int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest); |
259 | int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest); | ||
260 | 259 | ||
261 | if (unlikely(n == sz)) { | 260 | if (unlikely(n == IOMMU_NUM_CTXS)) { |
262 | n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1); | 261 | n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1); |
263 | if (unlikely(n == lowest)) { | 262 | if (unlikely(n == lowest)) { |
264 | printk(KERN_WARNING "IOMMU: Ran out of contexts.\n"); | 263 | printk(KERN_WARNING "IOMMU: Ran out of contexts.\n"); |
diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c index ae96cf52a955..7c2ced612b8f 100644 --- a/arch/sparc/kernel/pcr.c +++ b/arch/sparc/kernel/pcr.c | |||
@@ -167,5 +167,3 @@ out_unregister: | |||
167 | unregister_perf_hsvc(); | 167 | unregister_perf_hsvc(); |
168 | return err; | 168 | return err; |
169 | } | 169 | } |
170 | |||
171 | early_initcall(pcr_arch_init); | ||
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index b6a2b8f47040..555a76d1f4a1 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c | |||
@@ -49,6 +49,7 @@ | |||
49 | #include <asm/mdesc.h> | 49 | #include <asm/mdesc.h> |
50 | #include <asm/ldc.h> | 50 | #include <asm/ldc.h> |
51 | #include <asm/hypervisor.h> | 51 | #include <asm/hypervisor.h> |
52 | #include <asm/pcr.h> | ||
52 | 53 | ||
53 | #include "cpumap.h" | 54 | #include "cpumap.h" |
54 | 55 | ||
@@ -1358,6 +1359,7 @@ void __cpu_die(unsigned int cpu) | |||
1358 | 1359 | ||
1359 | void __init smp_cpus_done(unsigned int max_cpus) | 1360 | void __init smp_cpus_done(unsigned int max_cpus) |
1360 | { | 1361 | { |
1362 | pcr_arch_init(); | ||
1361 | } | 1363 | } |
1362 | 1364 | ||
1363 | void smp_send_reschedule(int cpu) | 1365 | void smp_send_reschedule(int cpu) |
diff --git a/arch/sparc/kernel/una_asm_32.S b/arch/sparc/kernel/una_asm_32.S index 8cc03458eb7e..8f096e84a937 100644 --- a/arch/sparc/kernel/una_asm_32.S +++ b/arch/sparc/kernel/una_asm_32.S | |||
@@ -24,9 +24,9 @@ retl_efault: | |||
24 | .globl __do_int_store | 24 | .globl __do_int_store |
25 | __do_int_store: | 25 | __do_int_store: |
26 | ld [%o2], %g1 | 26 | ld [%o2], %g1 |
27 | cmp %1, 2 | 27 | cmp %o1, 2 |
28 | be 2f | 28 | be 2f |
29 | cmp %1, 4 | 29 | cmp %o1, 4 |
30 | be 1f | 30 | be 1f |
31 | srl %g1, 24, %g2 | 31 | srl %g1, 24, %g2 |
32 | srl %g1, 16, %g7 | 32 | srl %g1, 16, %g7 |
diff --git a/arch/sparc/lib/bitext.c b/arch/sparc/lib/bitext.c index 764b3eb7b604..48d00e72ce15 100644 --- a/arch/sparc/lib/bitext.c +++ b/arch/sparc/lib/bitext.c | |||
@@ -10,7 +10,7 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/string.h> | 12 | #include <linux/string.h> |
13 | #include <linux/bitops.h> | 13 | #include <linux/bitmap.h> |
14 | 14 | ||
15 | #include <asm/bitext.h> | 15 | #include <asm/bitext.h> |
16 | 16 | ||
@@ -80,8 +80,7 @@ int bit_map_string_get(struct bit_map *t, int len, int align) | |||
80 | while (test_bit(offset + i, t->map) == 0) { | 80 | while (test_bit(offset + i, t->map) == 0) { |
81 | i++; | 81 | i++; |
82 | if (i == len) { | 82 | if (i == len) { |
83 | for (i = 0; i < len; i++) | 83 | bitmap_set(t->map, offset, len); |
84 | __set_bit(offset + i, t->map); | ||
85 | if (offset == t->first_free) | 84 | if (offset == t->first_free) |
86 | t->first_free = find_next_zero_bit | 85 | t->first_free = find_next_zero_bit |
87 | (t->map, t->size, | 86 | (t->map, t->size, |
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index 57afd1da491d..8634e1b49c03 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c | |||
@@ -229,21 +229,27 @@ static int xen_pcifront_enable_irq(struct pci_dev *dev) | |||
229 | { | 229 | { |
230 | int rc; | 230 | int rc; |
231 | int share = 1; | 231 | int share = 1; |
232 | u8 gsi; | ||
232 | 233 | ||
233 | dev_info(&dev->dev, "Xen PCI enabling IRQ: %d\n", dev->irq); | 234 | rc = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi); |
234 | 235 | if (rc < 0) { | |
235 | if (dev->irq < 0) | 236 | dev_warn(&dev->dev, "Xen PCI: failed to read interrupt line: %d\n", |
236 | return -EINVAL; | 237 | rc); |
238 | return rc; | ||
239 | } | ||
237 | 240 | ||
238 | if (dev->irq < NR_IRQS_LEGACY) | 241 | if (gsi < NR_IRQS_LEGACY) |
239 | share = 0; | 242 | share = 0; |
240 | 243 | ||
241 | rc = xen_allocate_pirq(dev->irq, share, "pcifront"); | 244 | rc = xen_allocate_pirq(gsi, share, "pcifront"); |
242 | if (rc < 0) { | 245 | if (rc < 0) { |
243 | dev_warn(&dev->dev, "Xen PCI IRQ: %d, failed to register:%d\n", | 246 | dev_warn(&dev->dev, "Xen PCI: failed to register GSI%d: %d\n", |
244 | dev->irq, rc); | 247 | gsi, rc); |
245 | return rc; | 248 | return rc; |
246 | } | 249 | } |
250 | |||
251 | dev->irq = rc; | ||
252 | dev_info(&dev->dev, "Xen PCI mapped GSI%d to IRQ%d\n", gsi, dev->irq); | ||
247 | return 0; | 253 | return 0; |
248 | } | 254 | } |
249 | 255 | ||
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 067759e3d6a5..2e2d370a47b1 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c | |||
@@ -397,7 +397,9 @@ void xen_setup_timer(int cpu) | |||
397 | name = "<timer kasprintf failed>"; | 397 | name = "<timer kasprintf failed>"; |
398 | 398 | ||
399 | irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt, | 399 | irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt, |
400 | IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER, | 400 | IRQF_DISABLED|IRQF_PERCPU| |
401 | IRQF_NOBALANCING|IRQF_TIMER| | ||
402 | IRQF_FORCE_RESUME, | ||
401 | name, NULL); | 403 | name, NULL); |
402 | 404 | ||
403 | evt = &per_cpu(xen_clock_events, cpu); | 405 | evt = &per_cpu(xen_clock_events, cpu); |
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c index 73fb1c4f4cd4..25ef1a4556e6 100644 --- a/drivers/atm/solos-pci.c +++ b/drivers/atm/solos-pci.c | |||
@@ -866,8 +866,9 @@ static int popen(struct atm_vcc *vcc) | |||
866 | } | 866 | } |
867 | 867 | ||
868 | skb = alloc_skb(sizeof(*header), GFP_ATOMIC); | 868 | skb = alloc_skb(sizeof(*header), GFP_ATOMIC); |
869 | if (!skb && net_ratelimit()) { | 869 | if (!skb) { |
870 | dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n"); | 870 | if (net_ratelimit()) |
871 | dev_warn(&card->dev->dev, "Failed to allocate sk_buff in popen()\n"); | ||
871 | return -ENOMEM; | 872 | return -ENOMEM; |
872 | } | 873 | } |
873 | header = (void *)skb_put(skb, sizeof(*header)); | 874 | header = (void *)skb_put(skb, sizeof(*header)); |
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index a126e614601f..333c21289d97 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c | |||
@@ -39,6 +39,8 @@ static struct usb_device_id ath3k_table[] = { | |||
39 | /* Atheros AR3011 with sflash firmware*/ | 39 | /* Atheros AR3011 with sflash firmware*/ |
40 | { USB_DEVICE(0x0CF3, 0x3002) }, | 40 | { USB_DEVICE(0x0CF3, 0x3002) }, |
41 | 41 | ||
42 | /* Atheros AR9285 Malbec with sflash firmware */ | ||
43 | { USB_DEVICE(0x03F0, 0x311D) }, | ||
42 | { } /* Terminating entry */ | 44 | { } /* Terminating entry */ |
43 | }; | 45 | }; |
44 | 46 | ||
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 1da773f899a2..4cefa91e6c34 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c | |||
@@ -102,6 +102,9 @@ static struct usb_device_id blacklist_table[] = { | |||
102 | /* Atheros 3011 with sflash firmware */ | 102 | /* Atheros 3011 with sflash firmware */ |
103 | { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, | 103 | { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, |
104 | 104 | ||
105 | /* Atheros AR9285 Malbec with sflash firmware */ | ||
106 | { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, | ||
107 | |||
105 | /* Broadcom BCM2035 */ | 108 | /* Broadcom BCM2035 */ |
106 | { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU }, | 109 | { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU }, |
107 | { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU }, | 110 | { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU }, |
diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index faf5a2c65926..36e0fa161c2b 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c | |||
@@ -577,11 +577,9 @@ duration: | |||
577 | if (rc) | 577 | if (rc) |
578 | return; | 578 | return; |
579 | 579 | ||
580 | if (be32_to_cpu(tpm_cmd.header.out.return_code) != 0 || | 580 | if (be32_to_cpu(tpm_cmd.header.out.return_code) |
581 | be32_to_cpu(tpm_cmd.header.out.length) | 581 | != 3 * sizeof(u32)) |
582 | != sizeof(tpm_cmd.header.out) + sizeof(u32) + 3 * sizeof(u32)) | ||
583 | return; | 582 | return; |
584 | |||
585 | duration_cap = &tpm_cmd.params.getcap_out.cap.duration; | 583 | duration_cap = &tpm_cmd.params.getcap_out.cap.duration; |
586 | chip->vendor.duration[TPM_SHORT] = | 584 | chip->vendor.duration[TPM_SHORT] = |
587 | usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short)); | 585 | usecs_to_jiffies(be32_to_cpu(duration_cap->tpm_short)); |
@@ -941,18 +939,6 @@ ssize_t tpm_show_caps_1_2(struct device * dev, | |||
941 | } | 939 | } |
942 | EXPORT_SYMBOL_GPL(tpm_show_caps_1_2); | 940 | EXPORT_SYMBOL_GPL(tpm_show_caps_1_2); |
943 | 941 | ||
944 | ssize_t tpm_show_timeouts(struct device *dev, struct device_attribute *attr, | ||
945 | char *buf) | ||
946 | { | ||
947 | struct tpm_chip *chip = dev_get_drvdata(dev); | ||
948 | |||
949 | return sprintf(buf, "%d %d %d\n", | ||
950 | jiffies_to_usecs(chip->vendor.duration[TPM_SHORT]), | ||
951 | jiffies_to_usecs(chip->vendor.duration[TPM_MEDIUM]), | ||
952 | jiffies_to_usecs(chip->vendor.duration[TPM_LONG])); | ||
953 | } | ||
954 | EXPORT_SYMBOL_GPL(tpm_show_timeouts); | ||
955 | |||
956 | ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr, | 942 | ssize_t tpm_store_cancel(struct device *dev, struct device_attribute *attr, |
957 | const char *buf, size_t count) | 943 | const char *buf, size_t count) |
958 | { | 944 | { |
diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index d84ff772c26f..72ddb031b69a 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h | |||
@@ -56,8 +56,6 @@ extern ssize_t tpm_show_owned(struct device *, struct device_attribute *attr, | |||
56 | char *); | 56 | char *); |
57 | extern ssize_t tpm_show_temp_deactivated(struct device *, | 57 | extern ssize_t tpm_show_temp_deactivated(struct device *, |
58 | struct device_attribute *attr, char *); | 58 | struct device_attribute *attr, char *); |
59 | extern ssize_t tpm_show_timeouts(struct device *, | ||
60 | struct device_attribute *attr, char *); | ||
61 | 59 | ||
62 | struct tpm_chip; | 60 | struct tpm_chip; |
63 | 61 | ||
diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index 0d1d38e5f266..dd21df55689d 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c | |||
@@ -376,7 +376,6 @@ static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated, | |||
376 | NULL); | 376 | NULL); |
377 | static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL); | 377 | static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps_1_2, NULL); |
378 | static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); | 378 | static DEVICE_ATTR(cancel, S_IWUSR | S_IWGRP, NULL, tpm_store_cancel); |
379 | static DEVICE_ATTR(timeouts, S_IRUGO, tpm_show_timeouts, NULL); | ||
380 | 379 | ||
381 | static struct attribute *tis_attrs[] = { | 380 | static struct attribute *tis_attrs[] = { |
382 | &dev_attr_pubek.attr, | 381 | &dev_attr_pubek.attr, |
@@ -386,8 +385,7 @@ static struct attribute *tis_attrs[] = { | |||
386 | &dev_attr_owned.attr, | 385 | &dev_attr_owned.attr, |
387 | &dev_attr_temp_deactivated.attr, | 386 | &dev_attr_temp_deactivated.attr, |
388 | &dev_attr_caps.attr, | 387 | &dev_attr_caps.attr, |
389 | &dev_attr_cancel.attr, | 388 | &dev_attr_cancel.attr, NULL, |
390 | &dev_attr_timeouts.attr, NULL, | ||
391 | }; | 389 | }; |
392 | 390 | ||
393 | static struct attribute_group tis_attr_grp = { | 391 | static struct attribute_group tis_attr_grp = { |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 15d94c63918c..729d4233b763 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -1553,17 +1553,7 @@ | |||
1553 | 1553 | ||
1554 | /* Backlight control */ | 1554 | /* Backlight control */ |
1555 | #define BLC_PWM_CTL 0x61254 | 1555 | #define BLC_PWM_CTL 0x61254 |
1556 | #define BACKLIGHT_MODULATION_FREQ_SHIFT (17) | ||
1557 | #define BLC_PWM_CTL2 0x61250 /* 965+ only */ | 1556 | #define BLC_PWM_CTL2 0x61250 /* 965+ only */ |
1558 | #define BLM_COMBINATION_MODE (1 << 30) | ||
1559 | /* | ||
1560 | * This is the most significant 15 bits of the number of backlight cycles in a | ||
1561 | * complete cycle of the modulated backlight control. | ||
1562 | * | ||
1563 | * The actual value is this field multiplied by two. | ||
1564 | */ | ||
1565 | #define BACKLIGHT_MODULATION_FREQ_MASK (0x7fff << 17) | ||
1566 | #define BLM_LEGACY_MODE (1 << 16) | ||
1567 | /* | 1557 | /* |
1568 | * This is the number of cycles out of the backlight modulation cycle for which | 1558 | * This is the number of cycles out of the backlight modulation cycle for which |
1569 | * the backlight is on. | 1559 | * the backlight is on. |
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index c65992df458d..d860abeda70f 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -30,8 +30,6 @@ | |||
30 | 30 | ||
31 | #include "intel_drv.h" | 31 | #include "intel_drv.h" |
32 | 32 | ||
33 | #define PCI_LBPC 0xf4 /* legacy/combination backlight modes */ | ||
34 | |||
35 | void | 33 | void |
36 | intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, | 34 | intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, |
37 | struct drm_display_mode *adjusted_mode) | 35 | struct drm_display_mode *adjusted_mode) |
@@ -112,19 +110,6 @@ done: | |||
112 | dev_priv->pch_pf_size = (width << 16) | height; | 110 | dev_priv->pch_pf_size = (width << 16) | height; |
113 | } | 111 | } |
114 | 112 | ||
115 | static int is_backlight_combination_mode(struct drm_device *dev) | ||
116 | { | ||
117 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
118 | |||
119 | if (INTEL_INFO(dev)->gen >= 4) | ||
120 | return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE; | ||
121 | |||
122 | if (IS_GEN2(dev)) | ||
123 | return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE; | ||
124 | |||
125 | return 0; | ||
126 | } | ||
127 | |||
128 | static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv) | 113 | static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv) |
129 | { | 114 | { |
130 | u32 val; | 115 | u32 val; |
@@ -181,9 +166,6 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev) | |||
181 | if (INTEL_INFO(dev)->gen < 4) | 166 | if (INTEL_INFO(dev)->gen < 4) |
182 | max &= ~1; | 167 | max &= ~1; |
183 | } | 168 | } |
184 | |||
185 | if (is_backlight_combination_mode(dev)) | ||
186 | max *= 0xff; | ||
187 | } | 169 | } |
188 | 170 | ||
189 | DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max); | 171 | DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max); |
@@ -201,15 +183,6 @@ u32 intel_panel_get_backlight(struct drm_device *dev) | |||
201 | val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; | 183 | val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK; |
202 | if (IS_PINEVIEW(dev)) | 184 | if (IS_PINEVIEW(dev)) |
203 | val >>= 1; | 185 | val >>= 1; |
204 | |||
205 | if (is_backlight_combination_mode(dev)){ | ||
206 | u8 lbpc; | ||
207 | |||
208 | val &= ~1; | ||
209 | pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc); | ||
210 | val *= lbpc; | ||
211 | val >>= 1; | ||
212 | } | ||
213 | } | 186 | } |
214 | 187 | ||
215 | DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); | 188 | DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val); |
@@ -232,16 +205,6 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level) | |||
232 | 205 | ||
233 | if (HAS_PCH_SPLIT(dev)) | 206 | if (HAS_PCH_SPLIT(dev)) |
234 | return intel_pch_panel_set_backlight(dev, level); | 207 | return intel_pch_panel_set_backlight(dev, level); |
235 | |||
236 | if (is_backlight_combination_mode(dev)){ | ||
237 | u32 max = intel_panel_get_max_backlight(dev); | ||
238 | u8 lpbc; | ||
239 | |||
240 | lpbc = level * 0xfe / max + 1; | ||
241 | level /= lpbc; | ||
242 | pci_write_config_byte(dev->pdev, PCI_LBPC, lpbc); | ||
243 | } | ||
244 | |||
245 | tmp = I915_READ(BLC_PWM_CTL); | 208 | tmp = I915_READ(BLC_PWM_CTL); |
246 | if (IS_PINEVIEW(dev)) { | 209 | if (IS_PINEVIEW(dev)) { |
247 | tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1); | 210 | tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1); |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c index 49e5e99917e2..6bdab891c64e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bios.c +++ b/drivers/gpu/drm/nouveau/nouveau_bios.c | |||
@@ -6228,7 +6228,7 @@ parse_dcb15_entry(struct drm_device *dev, struct dcb_table *dcb, | |||
6228 | entry->tvconf.has_component_output = false; | 6228 | entry->tvconf.has_component_output = false; |
6229 | break; | 6229 | break; |
6230 | case OUTPUT_LVDS: | 6230 | case OUTPUT_LVDS: |
6231 | if ((conn & 0x00003f00) != 0x10) | 6231 | if ((conn & 0x00003f00) >> 8 != 0x10) |
6232 | entry->lvdsconf.use_straps_for_mode = true; | 6232 | entry->lvdsconf.use_straps_for_mode = true; |
6233 | entry->lvdsconf.use_power_scripts = true; | 6233 | entry->lvdsconf.use_power_scripts = true; |
6234 | break; | 6234 | break; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index a7fae26f4654..d38a4d9f9b0b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c | |||
@@ -128,6 +128,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, | |||
128 | } | 128 | } |
129 | } | 129 | } |
130 | 130 | ||
131 | nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; | ||
131 | nouveau_bo_placement_set(nvbo, flags, 0); | 132 | nouveau_bo_placement_set(nvbo, flags, 0); |
132 | 133 | ||
133 | nvbo->channel = chan; | 134 | nvbo->channel = chan; |
@@ -166,17 +167,17 @@ static void | |||
166 | set_placement_range(struct nouveau_bo *nvbo, uint32_t type) | 167 | set_placement_range(struct nouveau_bo *nvbo, uint32_t type) |
167 | { | 168 | { |
168 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); | 169 | struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); |
170 | int vram_pages = dev_priv->vram_size >> PAGE_SHIFT; | ||
169 | 171 | ||
170 | if (dev_priv->card_type == NV_10 && | 172 | if (dev_priv->card_type == NV_10 && |
171 | nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM)) { | 173 | nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && |
174 | nvbo->bo.mem.num_pages < vram_pages / 2) { | ||
172 | /* | 175 | /* |
173 | * Make sure that the color and depth buffers are handled | 176 | * Make sure that the color and depth buffers are handled |
174 | * by independent memory controller units. Up to a 9x | 177 | * by independent memory controller units. Up to a 9x |
175 | * speed up when alpha-blending and depth-test are enabled | 178 | * speed up when alpha-blending and depth-test are enabled |
176 | * at the same time. | 179 | * at the same time. |
177 | */ | 180 | */ |
178 | int vram_pages = dev_priv->vram_size >> PAGE_SHIFT; | ||
179 | |||
180 | if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) { | 181 | if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) { |
181 | nvbo->placement.fpfn = vram_pages / 2; | 182 | nvbo->placement.fpfn = vram_pages / 2; |
182 | nvbo->placement.lpfn = ~0; | 183 | nvbo->placement.lpfn = ~0; |
@@ -785,7 +786,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, | |||
785 | if (ret) | 786 | if (ret) |
786 | goto out; | 787 | goto out; |
787 | 788 | ||
788 | ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); | 789 | ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem); |
789 | out: | 790 | out: |
790 | ttm_bo_mem_put(bo, &tmp_mem); | 791 | ttm_bo_mem_put(bo, &tmp_mem); |
791 | return ret; | 792 | return ret; |
@@ -811,11 +812,11 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, | |||
811 | if (ret) | 812 | if (ret) |
812 | return ret; | 813 | return ret; |
813 | 814 | ||
814 | ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem); | 815 | ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem); |
815 | if (ret) | 816 | if (ret) |
816 | goto out; | 817 | goto out; |
817 | 818 | ||
818 | ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); | 819 | ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem); |
819 | if (ret) | 820 | if (ret) |
820 | goto out; | 821 | goto out; |
821 | 822 | ||
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index a21e00076839..390d82c3c4b0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c | |||
@@ -507,6 +507,7 @@ nouveau_connector_native_mode(struct drm_connector *connector) | |||
507 | int high_w = 0, high_h = 0, high_v = 0; | 507 | int high_w = 0, high_h = 0, high_v = 0; |
508 | 508 | ||
509 | list_for_each_entry(mode, &nv_connector->base.probed_modes, head) { | 509 | list_for_each_entry(mode, &nv_connector->base.probed_modes, head) { |
510 | mode->vrefresh = drm_mode_vrefresh(mode); | ||
510 | if (helper->mode_valid(connector, mode) != MODE_OK || | 511 | if (helper->mode_valid(connector, mode) != MODE_OK || |
511 | (mode->flags & DRM_MODE_FLAG_INTERLACE)) | 512 | (mode->flags & DRM_MODE_FLAG_INTERLACE)) |
512 | continue; | 513 | continue; |
diff --git a/drivers/gpu/drm/nouveau/nouveau_pm.c b/drivers/gpu/drm/nouveau/nouveau_pm.c index f05c0cddfeca..4399e2f34db4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_pm.c +++ b/drivers/gpu/drm/nouveau/nouveau_pm.c | |||
@@ -543,7 +543,7 @@ nouveau_pm_resume(struct drm_device *dev) | |||
543 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; | 543 | struct nouveau_pm_engine *pm = &dev_priv->engine.pm; |
544 | struct nouveau_pm_level *perflvl; | 544 | struct nouveau_pm_level *perflvl; |
545 | 545 | ||
546 | if (pm->cur == &pm->boot) | 546 | if (!pm->cur || pm->cur == &pm->boot) |
547 | return; | 547 | return; |
548 | 548 | ||
549 | perflvl = pm->cur; | 549 | perflvl = pm->cur; |
diff --git a/drivers/gpu/drm/nouveau/nv04_dfp.c b/drivers/gpu/drm/nouveau/nv04_dfp.c index ef23550407b5..c82db37d9f41 100644 --- a/drivers/gpu/drm/nouveau/nv04_dfp.c +++ b/drivers/gpu/drm/nouveau/nv04_dfp.c | |||
@@ -342,8 +342,8 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder, | |||
342 | if (nv_encoder->dcb->type == OUTPUT_LVDS) { | 342 | if (nv_encoder->dcb->type == OUTPUT_LVDS) { |
343 | bool duallink, dummy; | 343 | bool duallink, dummy; |
344 | 344 | ||
345 | nouveau_bios_parse_lvds_table(dev, nv_connector->native_mode-> | 345 | nouveau_bios_parse_lvds_table(dev, output_mode->clock, |
346 | clock, &duallink, &dummy); | 346 | &duallink, &dummy); |
347 | if (duallink) | 347 | if (duallink) |
348 | regp->fp_control |= (8 << 28); | 348 | regp->fp_control |= (8 << 28); |
349 | } else | 349 | } else |
@@ -518,8 +518,6 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode) | |||
518 | return; | 518 | return; |
519 | 519 | ||
520 | if (nv_encoder->dcb->lvdsconf.use_power_scripts) { | 520 | if (nv_encoder->dcb->lvdsconf.use_power_scripts) { |
521 | struct nouveau_connector *nv_connector = nouveau_encoder_connector_get(nv_encoder); | ||
522 | |||
523 | /* when removing an output, crtc may not be set, but PANEL_OFF | 521 | /* when removing an output, crtc may not be set, but PANEL_OFF |
524 | * must still be run | 522 | * must still be run |
525 | */ | 523 | */ |
@@ -527,12 +525,8 @@ static void nv04_lvds_dpms(struct drm_encoder *encoder, int mode) | |||
527 | nv04_dfp_get_bound_head(dev, nv_encoder->dcb); | 525 | nv04_dfp_get_bound_head(dev, nv_encoder->dcb); |
528 | 526 | ||
529 | if (mode == DRM_MODE_DPMS_ON) { | 527 | if (mode == DRM_MODE_DPMS_ON) { |
530 | if (!nv_connector->native_mode) { | ||
531 | NV_ERROR(dev, "Not turning on LVDS without native mode\n"); | ||
532 | return; | ||
533 | } | ||
534 | call_lvds_script(dev, nv_encoder->dcb, head, | 528 | call_lvds_script(dev, nv_encoder->dcb, head, |
535 | LVDS_PANEL_ON, nv_connector->native_mode->clock); | 529 | LVDS_PANEL_ON, nv_encoder->mode.clock); |
536 | } else | 530 | } else |
537 | /* pxclk of 0 is fine for PANEL_OFF, and for a | 531 | /* pxclk of 0 is fine for PANEL_OFF, and for a |
538 | * disconnected LVDS encoder there is no native_mode | 532 | * disconnected LVDS encoder there is no native_mode |
diff --git a/drivers/gpu/drm/nouveau/nv40_graph.c b/drivers/gpu/drm/nouveau/nv40_graph.c index 8870d72388c8..18d30c2c1aa6 100644 --- a/drivers/gpu/drm/nouveau/nv40_graph.c +++ b/drivers/gpu/drm/nouveau/nv40_graph.c | |||
@@ -211,18 +211,32 @@ nv40_graph_set_tile_region(struct drm_device *dev, int i) | |||
211 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; | 211 | struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i]; |
212 | 212 | ||
213 | switch (dev_priv->chipset) { | 213 | switch (dev_priv->chipset) { |
214 | case 0x40: | ||
215 | case 0x41: /* guess */ | ||
216 | case 0x42: | ||
217 | case 0x43: | ||
218 | case 0x45: /* guess */ | ||
219 | case 0x4e: | ||
220 | nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); | ||
221 | nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); | ||
222 | nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); | ||
223 | nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch); | ||
224 | nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit); | ||
225 | nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr); | ||
226 | break; | ||
214 | case 0x44: | 227 | case 0x44: |
215 | case 0x4a: | 228 | case 0x4a: |
216 | case 0x4e: | ||
217 | nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); | 229 | nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); |
218 | nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); | 230 | nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); |
219 | nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); | 231 | nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); |
220 | break; | 232 | break; |
221 | |||
222 | case 0x46: | 233 | case 0x46: |
223 | case 0x47: | 234 | case 0x47: |
224 | case 0x49: | 235 | case 0x49: |
225 | case 0x4b: | 236 | case 0x4b: |
237 | case 0x4c: | ||
238 | case 0x67: | ||
239 | default: | ||
226 | nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch); | 240 | nv_wr32(dev, NV47_PGRAPH_TSIZE(i), tile->pitch); |
227 | nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit); | 241 | nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), tile->limit); |
228 | nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr); | 242 | nv_wr32(dev, NV47_PGRAPH_TILE(i), tile->addr); |
@@ -230,15 +244,6 @@ nv40_graph_set_tile_region(struct drm_device *dev, int i) | |||
230 | nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit); | 244 | nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit); |
231 | nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr); | 245 | nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr); |
232 | break; | 246 | break; |
233 | |||
234 | default: | ||
235 | nv_wr32(dev, NV20_PGRAPH_TSIZE(i), tile->pitch); | ||
236 | nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), tile->limit); | ||
237 | nv_wr32(dev, NV20_PGRAPH_TILE(i), tile->addr); | ||
238 | nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tile->pitch); | ||
239 | nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tile->limit); | ||
240 | nv_wr32(dev, NV40_PGRAPH_TILE1(i), tile->addr); | ||
241 | break; | ||
242 | } | 247 | } |
243 | } | 248 | } |
244 | 249 | ||
@@ -396,17 +401,20 @@ nv40_graph_init(struct drm_device *dev) | |||
396 | break; | 401 | break; |
397 | default: | 402 | default: |
398 | switch (dev_priv->chipset) { | 403 | switch (dev_priv->chipset) { |
399 | case 0x46: | 404 | case 0x41: |
400 | case 0x47: | 405 | case 0x42: |
401 | case 0x49: | 406 | case 0x43: |
402 | case 0x4b: | 407 | case 0x45: |
403 | nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0)); | 408 | case 0x4e: |
404 | nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1)); | 409 | case 0x44: |
405 | break; | 410 | case 0x4a: |
406 | default: | ||
407 | nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0)); | 411 | nv_wr32(dev, 0x4009F0, nv_rd32(dev, NV04_PFB_CFG0)); |
408 | nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1)); | 412 | nv_wr32(dev, 0x4009F4, nv_rd32(dev, NV04_PFB_CFG1)); |
409 | break; | 413 | break; |
414 | default: | ||
415 | nv_wr32(dev, 0x400DF0, nv_rd32(dev, NV04_PFB_CFG0)); | ||
416 | nv_wr32(dev, 0x400DF4, nv_rd32(dev, NV04_PFB_CFG1)); | ||
417 | break; | ||
410 | } | 418 | } |
411 | nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0)); | 419 | nv_wr32(dev, 0x4069F0, nv_rd32(dev, NV04_PFB_CFG0)); |
412 | nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1)); | 420 | nv_wr32(dev, 0x4069F4, nv_rd32(dev, NV04_PFB_CFG1)); |
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index 095bc507fb16..a4e5e53e0a62 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c | |||
@@ -557,9 +557,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
557 | 557 | ||
558 | /* use recommended ref_div for ss */ | 558 | /* use recommended ref_div for ss */ |
559 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { | 559 | if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) { |
560 | pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; | ||
561 | if (ss_enabled) { | 560 | if (ss_enabled) { |
562 | if (ss->refdiv) { | 561 | if (ss->refdiv) { |
562 | pll->flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP; | ||
563 | pll->flags |= RADEON_PLL_USE_REF_DIV; | 563 | pll->flags |= RADEON_PLL_USE_REF_DIV; |
564 | pll->reference_div = ss->refdiv; | 564 | pll->reference_div = ss->refdiv; |
565 | if (ASIC_IS_AVIVO(rdev)) | 565 | if (ASIC_IS_AVIVO(rdev)) |
@@ -662,10 +662,12 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc, | |||
662 | index, (uint32_t *)&args); | 662 | index, (uint32_t *)&args); |
663 | adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10; | 663 | adjusted_clock = le32_to_cpu(args.v3.sOutput.ulDispPllFreq) * 10; |
664 | if (args.v3.sOutput.ucRefDiv) { | 664 | if (args.v3.sOutput.ucRefDiv) { |
665 | pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; | ||
665 | pll->flags |= RADEON_PLL_USE_REF_DIV; | 666 | pll->flags |= RADEON_PLL_USE_REF_DIV; |
666 | pll->reference_div = args.v3.sOutput.ucRefDiv; | 667 | pll->reference_div = args.v3.sOutput.ucRefDiv; |
667 | } | 668 | } |
668 | if (args.v3.sOutput.ucPostDiv) { | 669 | if (args.v3.sOutput.ucPostDiv) { |
670 | pll->flags |= RADEON_PLL_USE_FRAC_FB_DIV; | ||
669 | pll->flags |= RADEON_PLL_USE_POST_DIV; | 671 | pll->flags |= RADEON_PLL_USE_POST_DIV; |
670 | pll->post_div = args.v3.sOutput.ucPostDiv; | 672 | pll->post_div = args.v3.sOutput.ucPostDiv; |
671 | } | 673 | } |
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 768c60ee4ab6..069efa8c8ecf 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c | |||
@@ -910,6 +910,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
910 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | 910 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; |
911 | break; | 911 | break; |
912 | case R300_TX_FORMAT_X16: | 912 | case R300_TX_FORMAT_X16: |
913 | case R300_TX_FORMAT_FL_I16: | ||
913 | case R300_TX_FORMAT_Y8X8: | 914 | case R300_TX_FORMAT_Y8X8: |
914 | case R300_TX_FORMAT_Z5Y6X5: | 915 | case R300_TX_FORMAT_Z5Y6X5: |
915 | case R300_TX_FORMAT_Z6Y5X5: | 916 | case R300_TX_FORMAT_Z6Y5X5: |
@@ -922,6 +923,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p, | |||
922 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; | 923 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; |
923 | break; | 924 | break; |
924 | case R300_TX_FORMAT_Y16X16: | 925 | case R300_TX_FORMAT_Y16X16: |
926 | case R300_TX_FORMAT_FL_I16A16: | ||
925 | case R300_TX_FORMAT_Z11Y11X10: | 927 | case R300_TX_FORMAT_Z11Y11X10: |
926 | case R300_TX_FORMAT_Z10Y11X11: | 928 | case R300_TX_FORMAT_Z10Y11X11: |
927 | case R300_TX_FORMAT_W8Z8Y8X8: | 929 | case R300_TX_FORMAT_W8Z8Y8X8: |
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 773e484f1646..297bc9a7d6e6 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig | |||
@@ -238,13 +238,13 @@ config SENSORS_K8TEMP | |||
238 | will be called k8temp. | 238 | will be called k8temp. |
239 | 239 | ||
240 | config SENSORS_K10TEMP | 240 | config SENSORS_K10TEMP |
241 | tristate "AMD Phenom/Sempron/Turion/Opteron temperature sensor" | 241 | tristate "AMD Family 10h/11h/12h/14h temperature sensor" |
242 | depends on X86 && PCI | 242 | depends on X86 && PCI |
243 | help | 243 | help |
244 | If you say yes here you get support for the temperature | 244 | If you say yes here you get support for the temperature |
245 | sensor(s) inside your CPU. Supported are later revisions of | 245 | sensor(s) inside your CPU. Supported are later revisions of |
246 | the AMD Family 10h and all revisions of the AMD Family 11h | 246 | the AMD Family 10h and all revisions of the AMD Family 11h, |
247 | microarchitectures. | 247 | 12h (Llano), and 14h (Brazos) microarchitectures. |
248 | 248 | ||
249 | This driver can also be built as a module. If so, the module | 249 | This driver can also be built as a module. If so, the module |
250 | will be called k10temp. | 250 | will be called k10temp. |
@@ -455,13 +455,14 @@ config SENSORS_JZ4740 | |||
455 | called jz4740-hwmon. | 455 | called jz4740-hwmon. |
456 | 456 | ||
457 | config SENSORS_JC42 | 457 | config SENSORS_JC42 |
458 | tristate "JEDEC JC42.4 compliant temperature sensors" | 458 | tristate "JEDEC JC42.4 compliant memory module temperature sensors" |
459 | depends on I2C | 459 | depends on I2C |
460 | help | 460 | help |
461 | If you say yes here you get support for Jedec JC42.4 compliant | 461 | If you say yes here, you get support for JEDEC JC42.4 compliant |
462 | temperature sensors. Support will include, but not be limited to, | 462 | temperature sensors, which are used on many DDR3 memory modules for |
463 | ADT7408, CAT34TS02,, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243, | 463 | mobile devices and servers. Support will include, but not be limited |
464 | MCP9843, SE97, SE98, STTS424, TSE2002B3, and TS3000B3. | 464 | to, ADT7408, CAT34TS02, CAT6095, MAX6604, MCP9805, MCP98242, MCP98243, |
465 | MCP9843, SE97, SE98, STTS424(E), TSE2002B3, and TS3000B3. | ||
465 | 466 | ||
466 | This driver can also be built as a module. If so, the module | 467 | This driver can also be built as a module. If so, the module |
467 | will be called jc42. | 468 | will be called jc42. |
@@ -574,7 +575,7 @@ config SENSORS_LM85 | |||
574 | help | 575 | help |
575 | If you say yes here you get support for National Semiconductor LM85 | 576 | If you say yes here you get support for National Semiconductor LM85 |
576 | sensor chips and clones: ADM1027, ADT7463, ADT7468, EMC6D100, | 577 | sensor chips and clones: ADM1027, ADT7463, ADT7468, EMC6D100, |
577 | EMC6D101 and EMC6D102. | 578 | EMC6D101, EMC6D102, and EMC6D103. |
578 | 579 | ||
579 | This driver can also be built as a module. If so, the module | 580 | This driver can also be built as a module. If so, the module |
580 | will be called lm85. | 581 | will be called lm85. |
diff --git a/drivers/hwmon/jc42.c b/drivers/hwmon/jc42.c index 340fc78c8dde..934991237061 100644 --- a/drivers/hwmon/jc42.c +++ b/drivers/hwmon/jc42.c | |||
@@ -53,6 +53,8 @@ static const unsigned short normal_i2c[] = { | |||
53 | 53 | ||
54 | /* Configuration register defines */ | 54 | /* Configuration register defines */ |
55 | #define JC42_CFG_CRIT_ONLY (1 << 2) | 55 | #define JC42_CFG_CRIT_ONLY (1 << 2) |
56 | #define JC42_CFG_TCRIT_LOCK (1 << 6) | ||
57 | #define JC42_CFG_EVENT_LOCK (1 << 7) | ||
56 | #define JC42_CFG_SHUTDOWN (1 << 8) | 58 | #define JC42_CFG_SHUTDOWN (1 << 8) |
57 | #define JC42_CFG_HYST_SHIFT 9 | 59 | #define JC42_CFG_HYST_SHIFT 9 |
58 | #define JC42_CFG_HYST_MASK 0x03 | 60 | #define JC42_CFG_HYST_MASK 0x03 |
@@ -332,7 +334,7 @@ static ssize_t set_temp_crit_hyst(struct device *dev, | |||
332 | { | 334 | { |
333 | struct i2c_client *client = to_i2c_client(dev); | 335 | struct i2c_client *client = to_i2c_client(dev); |
334 | struct jc42_data *data = i2c_get_clientdata(client); | 336 | struct jc42_data *data = i2c_get_clientdata(client); |
335 | long val; | 337 | unsigned long val; |
336 | int diff, hyst; | 338 | int diff, hyst; |
337 | int err; | 339 | int err; |
338 | int ret = count; | 340 | int ret = count; |
@@ -380,14 +382,14 @@ static ssize_t show_alarm(struct device *dev, | |||
380 | 382 | ||
381 | static DEVICE_ATTR(temp1_input, S_IRUGO, | 383 | static DEVICE_ATTR(temp1_input, S_IRUGO, |
382 | show_temp_input, NULL); | 384 | show_temp_input, NULL); |
383 | static DEVICE_ATTR(temp1_crit, S_IWUSR | S_IRUGO, | 385 | static DEVICE_ATTR(temp1_crit, S_IRUGO, |
384 | show_temp_crit, set_temp_crit); | 386 | show_temp_crit, set_temp_crit); |
385 | static DEVICE_ATTR(temp1_min, S_IWUSR | S_IRUGO, | 387 | static DEVICE_ATTR(temp1_min, S_IRUGO, |
386 | show_temp_min, set_temp_min); | 388 | show_temp_min, set_temp_min); |
387 | static DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, | 389 | static DEVICE_ATTR(temp1_max, S_IRUGO, |
388 | show_temp_max, set_temp_max); | 390 | show_temp_max, set_temp_max); |
389 | 391 | ||
390 | static DEVICE_ATTR(temp1_crit_hyst, S_IWUSR | S_IRUGO, | 392 | static DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, |
391 | show_temp_crit_hyst, set_temp_crit_hyst); | 393 | show_temp_crit_hyst, set_temp_crit_hyst); |
392 | static DEVICE_ATTR(temp1_max_hyst, S_IRUGO, | 394 | static DEVICE_ATTR(temp1_max_hyst, S_IRUGO, |
393 | show_temp_max_hyst, NULL); | 395 | show_temp_max_hyst, NULL); |
@@ -412,8 +414,31 @@ static struct attribute *jc42_attributes[] = { | |||
412 | NULL | 414 | NULL |
413 | }; | 415 | }; |
414 | 416 | ||
417 | static mode_t jc42_attribute_mode(struct kobject *kobj, | ||
418 | struct attribute *attr, int index) | ||
419 | { | ||
420 | struct device *dev = container_of(kobj, struct device, kobj); | ||
421 | struct i2c_client *client = to_i2c_client(dev); | ||
422 | struct jc42_data *data = i2c_get_clientdata(client); | ||
423 | unsigned int config = data->config; | ||
424 | bool readonly; | ||
425 | |||
426 | if (attr == &dev_attr_temp1_crit.attr) | ||
427 | readonly = config & JC42_CFG_TCRIT_LOCK; | ||
428 | else if (attr == &dev_attr_temp1_min.attr || | ||
429 | attr == &dev_attr_temp1_max.attr) | ||
430 | readonly = config & JC42_CFG_EVENT_LOCK; | ||
431 | else if (attr == &dev_attr_temp1_crit_hyst.attr) | ||
432 | readonly = config & (JC42_CFG_EVENT_LOCK | JC42_CFG_TCRIT_LOCK); | ||
433 | else | ||
434 | readonly = true; | ||
435 | |||
436 | return S_IRUGO | (readonly ? 0 : S_IWUSR); | ||
437 | } | ||
438 | |||
415 | static const struct attribute_group jc42_group = { | 439 | static const struct attribute_group jc42_group = { |
416 | .attrs = jc42_attributes, | 440 | .attrs = jc42_attributes, |
441 | .is_visible = jc42_attribute_mode, | ||
417 | }; | 442 | }; |
418 | 443 | ||
419 | /* Return 0 if detection is successful, -ENODEV otherwise */ | 444 | /* Return 0 if detection is successful, -ENODEV otherwise */ |
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index da5a2404cd3e..82bf65aa2968 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * k10temp.c - AMD Family 10h/11h processor hardware monitoring | 2 | * k10temp.c - AMD Family 10h/11h/12h/14h processor hardware monitoring |
3 | * | 3 | * |
4 | * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de> | 4 | * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de> |
5 | * | 5 | * |
@@ -25,7 +25,7 @@ | |||
25 | #include <linux/pci.h> | 25 | #include <linux/pci.h> |
26 | #include <asm/processor.h> | 26 | #include <asm/processor.h> |
27 | 27 | ||
28 | MODULE_DESCRIPTION("AMD Family 10h/11h CPU core temperature monitor"); | 28 | MODULE_DESCRIPTION("AMD Family 10h/11h/12h/14h CPU core temperature monitor"); |
29 | MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>"); | 29 | MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>"); |
30 | MODULE_LICENSE("GPL"); | 30 | MODULE_LICENSE("GPL"); |
31 | 31 | ||
@@ -208,6 +208,7 @@ static void __devexit k10temp_remove(struct pci_dev *pdev) | |||
208 | static const struct pci_device_id k10temp_id_table[] = { | 208 | static const struct pci_device_id k10temp_id_table[] = { |
209 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, | 209 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) }, |
210 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) }, | 210 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_11H_NB_MISC) }, |
211 | { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) }, | ||
211 | {} | 212 | {} |
212 | }; | 213 | }; |
213 | MODULE_DEVICE_TABLE(pci, k10temp_id_table); | 214 | MODULE_DEVICE_TABLE(pci, k10temp_id_table); |
diff --git a/drivers/hwmon/lm85.c b/drivers/hwmon/lm85.c index 1e229847f37a..d2cc28660816 100644 --- a/drivers/hwmon/lm85.c +++ b/drivers/hwmon/lm85.c | |||
@@ -41,7 +41,7 @@ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, I2C_CLIENT_END }; | |||
41 | enum chips { | 41 | enum chips { |
42 | any_chip, lm85b, lm85c, | 42 | any_chip, lm85b, lm85c, |
43 | adm1027, adt7463, adt7468, | 43 | adm1027, adt7463, adt7468, |
44 | emc6d100, emc6d102 | 44 | emc6d100, emc6d102, emc6d103 |
45 | }; | 45 | }; |
46 | 46 | ||
47 | /* The LM85 registers */ | 47 | /* The LM85 registers */ |
@@ -90,6 +90,9 @@ enum chips { | |||
90 | #define LM85_VERSTEP_EMC6D100_A0 0x60 | 90 | #define LM85_VERSTEP_EMC6D100_A0 0x60 |
91 | #define LM85_VERSTEP_EMC6D100_A1 0x61 | 91 | #define LM85_VERSTEP_EMC6D100_A1 0x61 |
92 | #define LM85_VERSTEP_EMC6D102 0x65 | 92 | #define LM85_VERSTEP_EMC6D102 0x65 |
93 | #define LM85_VERSTEP_EMC6D103_A0 0x68 | ||
94 | #define LM85_VERSTEP_EMC6D103_A1 0x69 | ||
95 | #define LM85_VERSTEP_EMC6D103S 0x6A /* Also known as EMC6D103:A2 */ | ||
93 | 96 | ||
94 | #define LM85_REG_CONFIG 0x40 | 97 | #define LM85_REG_CONFIG 0x40 |
95 | 98 | ||
@@ -348,6 +351,7 @@ static const struct i2c_device_id lm85_id[] = { | |||
348 | { "emc6d100", emc6d100 }, | 351 | { "emc6d100", emc6d100 }, |
349 | { "emc6d101", emc6d100 }, | 352 | { "emc6d101", emc6d100 }, |
350 | { "emc6d102", emc6d102 }, | 353 | { "emc6d102", emc6d102 }, |
354 | { "emc6d103", emc6d103 }, | ||
351 | { } | 355 | { } |
352 | }; | 356 | }; |
353 | MODULE_DEVICE_TABLE(i2c, lm85_id); | 357 | MODULE_DEVICE_TABLE(i2c, lm85_id); |
@@ -1250,6 +1254,20 @@ static int lm85_detect(struct i2c_client *client, struct i2c_board_info *info) | |||
1250 | case LM85_VERSTEP_EMC6D102: | 1254 | case LM85_VERSTEP_EMC6D102: |
1251 | type_name = "emc6d102"; | 1255 | type_name = "emc6d102"; |
1252 | break; | 1256 | break; |
1257 | case LM85_VERSTEP_EMC6D103_A0: | ||
1258 | case LM85_VERSTEP_EMC6D103_A1: | ||
1259 | type_name = "emc6d103"; | ||
1260 | break; | ||
1261 | /* | ||
1262 | * Registers apparently missing in EMC6D103S/EMC6D103:A2 | ||
1263 | * compared to EMC6D103:A0, EMC6D103:A1, and EMC6D102 | ||
1264 | * (according to the data sheets), but used unconditionally | ||
1265 | * in the driver: 62[5:7], 6D[0:7], and 6E[0:7]. | ||
1266 | * So skip EMC6D103S for now. | ||
1267 | case LM85_VERSTEP_EMC6D103S: | ||
1268 | type_name = "emc6d103s"; | ||
1269 | break; | ||
1270 | */ | ||
1253 | } | 1271 | } |
1254 | } else { | 1272 | } else { |
1255 | dev_dbg(&adapter->dev, | 1273 | dev_dbg(&adapter->dev, |
@@ -1283,6 +1301,7 @@ static int lm85_probe(struct i2c_client *client, | |||
1283 | case adt7468: | 1301 | case adt7468: |
1284 | case emc6d100: | 1302 | case emc6d100: |
1285 | case emc6d102: | 1303 | case emc6d102: |
1304 | case emc6d103: | ||
1286 | data->freq_map = adm1027_freq_map; | 1305 | data->freq_map = adm1027_freq_map; |
1287 | break; | 1306 | break; |
1288 | default: | 1307 | default: |
@@ -1468,7 +1487,7 @@ static struct lm85_data *lm85_update_device(struct device *dev) | |||
1468 | /* More alarm bits */ | 1487 | /* More alarm bits */ |
1469 | data->alarms |= lm85_read_value(client, | 1488 | data->alarms |= lm85_read_value(client, |
1470 | EMC6D100_REG_ALARM3) << 16; | 1489 | EMC6D100_REG_ALARM3) << 16; |
1471 | } else if (data->type == emc6d102) { | 1490 | } else if (data->type == emc6d102 || data->type == emc6d103) { |
1472 | /* Have to read LSB bits after the MSB ones because | 1491 | /* Have to read LSB bits after the MSB ones because |
1473 | the reading of the MSB bits has frozen the | 1492 | the reading of the MSB bits has frozen the |
1474 | LSBs (backward from the ADM1027). | 1493 | LSBs (backward from the ADM1027). |
diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 8b606fd64022..08c194861af5 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c | |||
@@ -2610,9 +2610,11 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) | |||
2610 | netif_carrier_on(nesvnic->netdev); | 2610 | netif_carrier_on(nesvnic->netdev); |
2611 | 2611 | ||
2612 | spin_lock(&nesvnic->port_ibevent_lock); | 2612 | spin_lock(&nesvnic->port_ibevent_lock); |
2613 | if (nesdev->iw_status == 0) { | 2613 | if (nesvnic->of_device_registered) { |
2614 | nesdev->iw_status = 1; | 2614 | if (nesdev->iw_status == 0) { |
2615 | nes_port_ibevent(nesvnic); | 2615 | nesdev->iw_status = 1; |
2616 | nes_port_ibevent(nesvnic); | ||
2617 | } | ||
2616 | } | 2618 | } |
2617 | spin_unlock(&nesvnic->port_ibevent_lock); | 2619 | spin_unlock(&nesvnic->port_ibevent_lock); |
2618 | } | 2620 | } |
@@ -2642,9 +2644,11 @@ static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number) | |||
2642 | netif_carrier_off(nesvnic->netdev); | 2644 | netif_carrier_off(nesvnic->netdev); |
2643 | 2645 | ||
2644 | spin_lock(&nesvnic->port_ibevent_lock); | 2646 | spin_lock(&nesvnic->port_ibevent_lock); |
2645 | if (nesdev->iw_status == 1) { | 2647 | if (nesvnic->of_device_registered) { |
2646 | nesdev->iw_status = 0; | 2648 | if (nesdev->iw_status == 1) { |
2647 | nes_port_ibevent(nesvnic); | 2649 | nesdev->iw_status = 0; |
2650 | nes_port_ibevent(nesvnic); | ||
2651 | } | ||
2648 | } | 2652 | } |
2649 | spin_unlock(&nesvnic->port_ibevent_lock); | 2653 | spin_unlock(&nesvnic->port_ibevent_lock); |
2650 | } | 2654 | } |
@@ -2703,9 +2707,11 @@ void nes_recheck_link_status(struct work_struct *work) | |||
2703 | netif_carrier_on(nesvnic->netdev); | 2707 | netif_carrier_on(nesvnic->netdev); |
2704 | 2708 | ||
2705 | spin_lock(&nesvnic->port_ibevent_lock); | 2709 | spin_lock(&nesvnic->port_ibevent_lock); |
2706 | if (nesdev->iw_status == 0) { | 2710 | if (nesvnic->of_device_registered) { |
2707 | nesdev->iw_status = 1; | 2711 | if (nesdev->iw_status == 0) { |
2708 | nes_port_ibevent(nesvnic); | 2712 | nesdev->iw_status = 1; |
2713 | nes_port_ibevent(nesvnic); | ||
2714 | } | ||
2709 | } | 2715 | } |
2710 | spin_unlock(&nesvnic->port_ibevent_lock); | 2716 | spin_unlock(&nesvnic->port_ibevent_lock); |
2711 | } | 2717 | } |
@@ -2723,9 +2729,11 @@ void nes_recheck_link_status(struct work_struct *work) | |||
2723 | netif_carrier_off(nesvnic->netdev); | 2729 | netif_carrier_off(nesvnic->netdev); |
2724 | 2730 | ||
2725 | spin_lock(&nesvnic->port_ibevent_lock); | 2731 | spin_lock(&nesvnic->port_ibevent_lock); |
2726 | if (nesdev->iw_status == 1) { | 2732 | if (nesvnic->of_device_registered) { |
2727 | nesdev->iw_status = 0; | 2733 | if (nesdev->iw_status == 1) { |
2728 | nes_port_ibevent(nesvnic); | 2734 | nesdev->iw_status = 0; |
2735 | nes_port_ibevent(nesvnic); | ||
2736 | } | ||
2729 | } | 2737 | } |
2730 | spin_unlock(&nesvnic->port_ibevent_lock); | 2738 | spin_unlock(&nesvnic->port_ibevent_lock); |
2731 | } | 2739 | } |
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index 8245237b67ce..eca0c41f1226 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c | |||
@@ -1005,7 +1005,8 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr) | |||
1005 | * there are still requests that haven't been acked. | 1005 | * there are still requests that haven't been acked. |
1006 | */ | 1006 | */ |
1007 | if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail && | 1007 | if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail && |
1008 | !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN))) | 1008 | !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)) && |
1009 | (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) | ||
1009 | start_timer(qp); | 1010 | start_timer(qp); |
1010 | 1011 | ||
1011 | while (qp->s_last != qp->s_acked) { | 1012 | while (qp->s_last != qp->s_acked) { |
@@ -1439,6 +1440,8 @@ static void qib_rc_rcv_resp(struct qib_ibport *ibp, | |||
1439 | } | 1440 | } |
1440 | 1441 | ||
1441 | spin_lock_irqsave(&qp->s_lock, flags); | 1442 | spin_lock_irqsave(&qp->s_lock, flags); |
1443 | if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) | ||
1444 | goto ack_done; | ||
1442 | 1445 | ||
1443 | /* Ignore invalid responses. */ | 1446 | /* Ignore invalid responses. */ |
1444 | if (qib_cmp24(psn, qp->s_next_psn) >= 0) | 1447 | if (qib_cmp24(psn, qp->s_next_psn) >= 0) |
diff --git a/drivers/isdn/hisax/isdnl2.c b/drivers/isdn/hisax/isdnl2.c index 0858791978d8..cfff0c41d298 100644 --- a/drivers/isdn/hisax/isdnl2.c +++ b/drivers/isdn/hisax/isdnl2.c | |||
@@ -1247,10 +1247,10 @@ static void | |||
1247 | l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) | 1247 | l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) |
1248 | { | 1248 | { |
1249 | struct PStack *st = fi->userdata; | 1249 | struct PStack *st = fi->userdata; |
1250 | struct sk_buff *skb, *oskb; | 1250 | struct sk_buff *skb; |
1251 | struct Layer2 *l2 = &st->l2; | 1251 | struct Layer2 *l2 = &st->l2; |
1252 | u_char header[MAX_HEADER_LEN]; | 1252 | u_char header[MAX_HEADER_LEN]; |
1253 | int i; | 1253 | int i, hdr_space_needed; |
1254 | int unsigned p1; | 1254 | int unsigned p1; |
1255 | u_long flags; | 1255 | u_long flags; |
1256 | 1256 | ||
@@ -1261,6 +1261,16 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) | |||
1261 | if (!skb) | 1261 | if (!skb) |
1262 | return; | 1262 | return; |
1263 | 1263 | ||
1264 | hdr_space_needed = l2headersize(l2, 0); | ||
1265 | if (hdr_space_needed > skb_headroom(skb)) { | ||
1266 | struct sk_buff *orig_skb = skb; | ||
1267 | |||
1268 | skb = skb_realloc_headroom(skb, hdr_space_needed); | ||
1269 | if (!skb) { | ||
1270 | dev_kfree_skb(orig_skb); | ||
1271 | return; | ||
1272 | } | ||
1273 | } | ||
1264 | spin_lock_irqsave(&l2->lock, flags); | 1274 | spin_lock_irqsave(&l2->lock, flags); |
1265 | if(test_bit(FLG_MOD128, &l2->flag)) | 1275 | if(test_bit(FLG_MOD128, &l2->flag)) |
1266 | p1 = (l2->vs - l2->va) % 128; | 1276 | p1 = (l2->vs - l2->va) % 128; |
@@ -1285,19 +1295,7 @@ l2_pull_iqueue(struct FsmInst *fi, int event, void *arg) | |||
1285 | l2->vs = (l2->vs + 1) % 8; | 1295 | l2->vs = (l2->vs + 1) % 8; |
1286 | } | 1296 | } |
1287 | spin_unlock_irqrestore(&l2->lock, flags); | 1297 | spin_unlock_irqrestore(&l2->lock, flags); |
1288 | p1 = skb->data - skb->head; | 1298 | memcpy(skb_push(skb, i), header, i); |
1289 | if (p1 >= i) | ||
1290 | memcpy(skb_push(skb, i), header, i); | ||
1291 | else { | ||
1292 | printk(KERN_WARNING | ||
1293 | "isdl2 pull_iqueue skb header(%d/%d) too short\n", i, p1); | ||
1294 | oskb = skb; | ||
1295 | skb = alloc_skb(oskb->len + i, GFP_ATOMIC); | ||
1296 | memcpy(skb_put(skb, i), header, i); | ||
1297 | skb_copy_from_linear_data(oskb, | ||
1298 | skb_put(skb, oskb->len), oskb->len); | ||
1299 | dev_kfree_skb(oskb); | ||
1300 | } | ||
1301 | st->l2.l2l1(st, PH_PULL | INDICATION, skb); | 1299 | st->l2.l2l1(st, PH_PULL | INDICATION, skb); |
1302 | test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag); | 1300 | test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag); |
1303 | if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) { | 1301 | if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) { |
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c index e9a3eab7b0cf..8c1d85e27be4 100644 --- a/drivers/memstick/core/memstick.c +++ b/drivers/memstick/core/memstick.c | |||
@@ -621,7 +621,7 @@ static int __init memstick_init(void) | |||
621 | { | 621 | { |
622 | int rc; | 622 | int rc; |
623 | 623 | ||
624 | workqueue = create_freezeable_workqueue("kmemstick"); | 624 | workqueue = create_freezable_workqueue("kmemstick"); |
625 | if (!workqueue) | 625 | if (!workqueue) |
626 | return -ENOMEM; | 626 | return -ENOMEM; |
627 | 627 | ||
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h index f71f22948477..1735c84ff757 100644 --- a/drivers/message/fusion/mptbase.h +++ b/drivers/message/fusion/mptbase.h | |||
@@ -76,8 +76,8 @@ | |||
76 | #define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR | 76 | #define COPYRIGHT "Copyright (c) 1999-2008 " MODULEAUTHOR |
77 | #endif | 77 | #endif |
78 | 78 | ||
79 | #define MPT_LINUX_VERSION_COMMON "3.04.17" | 79 | #define MPT_LINUX_VERSION_COMMON "3.04.18" |
80 | #define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.17" | 80 | #define MPT_LINUX_PACKAGE_NAME "@(#)mptlinux-3.04.18" |
81 | #define WHAT_MAGIC_STRING "@" "(" "#" ")" | 81 | #define WHAT_MAGIC_STRING "@" "(" "#" ")" |
82 | 82 | ||
83 | #define show_mptmod_ver(s,ver) \ | 83 | #define show_mptmod_ver(s,ver) \ |
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c index a3856ed90aef..e8deb8ed0499 100644 --- a/drivers/message/fusion/mptctl.c +++ b/drivers/message/fusion/mptctl.c | |||
@@ -597,6 +597,13 @@ mptctl_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply) | |||
597 | } | 597 | } |
598 | 598 | ||
599 | static int | 599 | static int |
600 | mptctl_release(struct inode *inode, struct file *filep) | ||
601 | { | ||
602 | fasync_helper(-1, filep, 0, &async_queue); | ||
603 | return 0; | ||
604 | } | ||
605 | |||
606 | static int | ||
600 | mptctl_fasync(int fd, struct file *filep, int mode) | 607 | mptctl_fasync(int fd, struct file *filep, int mode) |
601 | { | 608 | { |
602 | MPT_ADAPTER *ioc; | 609 | MPT_ADAPTER *ioc; |
@@ -2815,6 +2822,7 @@ static const struct file_operations mptctl_fops = { | |||
2815 | .llseek = no_llseek, | 2822 | .llseek = no_llseek, |
2816 | .fasync = mptctl_fasync, | 2823 | .fasync = mptctl_fasync, |
2817 | .unlocked_ioctl = mptctl_ioctl, | 2824 | .unlocked_ioctl = mptctl_ioctl, |
2825 | .release = mptctl_release, | ||
2818 | #ifdef CONFIG_COMPAT | 2826 | #ifdef CONFIG_COMPAT |
2819 | .compat_ioctl = compat_mpctl_ioctl, | 2827 | .compat_ioctl = compat_mpctl_ioctl, |
2820 | #endif | 2828 | #endif |
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c index 59b8f53d1ece..0d9b82a44540 100644 --- a/drivers/message/fusion/mptscsih.c +++ b/drivers/message/fusion/mptscsih.c | |||
@@ -1873,8 +1873,9 @@ mptscsih_abort(struct scsi_cmnd * SCpnt) | |||
1873 | } | 1873 | } |
1874 | 1874 | ||
1875 | out: | 1875 | out: |
1876 | printk(MYIOC_s_INFO_FMT "task abort: %s (sc=%p)\n", | 1876 | printk(MYIOC_s_INFO_FMT "task abort: %s (rv=%04x) (sc=%p) (sn=%ld)\n", |
1877 | ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), SCpnt); | 1877 | ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), retval, |
1878 | SCpnt, SCpnt->serial_number); | ||
1878 | 1879 | ||
1879 | return retval; | 1880 | return retval; |
1880 | } | 1881 | } |
@@ -1911,7 +1912,7 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt) | |||
1911 | 1912 | ||
1912 | vdevice = SCpnt->device->hostdata; | 1913 | vdevice = SCpnt->device->hostdata; |
1913 | if (!vdevice || !vdevice->vtarget) { | 1914 | if (!vdevice || !vdevice->vtarget) { |
1914 | retval = SUCCESS; | 1915 | retval = 0; |
1915 | goto out; | 1916 | goto out; |
1916 | } | 1917 | } |
1917 | 1918 | ||
diff --git a/drivers/misc/tifm_core.c b/drivers/misc/tifm_core.c index 5f6852dff40b..44d4475a09dd 100644 --- a/drivers/misc/tifm_core.c +++ b/drivers/misc/tifm_core.c | |||
@@ -329,7 +329,7 @@ static int __init tifm_init(void) | |||
329 | { | 329 | { |
330 | int rc; | 330 | int rc; |
331 | 331 | ||
332 | workqueue = create_freezeable_workqueue("tifm"); | 332 | workqueue = create_freezable_workqueue("tifm"); |
333 | if (!workqueue) | 333 | if (!workqueue) |
334 | return -ENOMEM; | 334 | return -ENOMEM; |
335 | 335 | ||
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index 4d2ea8e80140..6df5a55da110 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c | |||
@@ -785,7 +785,7 @@ static int __init vmballoon_init(void) | |||
785 | if (x86_hyper != &x86_hyper_vmware) | 785 | if (x86_hyper != &x86_hyper_vmware) |
786 | return -ENODEV; | 786 | return -ENODEV; |
787 | 787 | ||
788 | vmballoon_wq = create_freezeable_workqueue("vmmemctl"); | 788 | vmballoon_wq = create_freezable_workqueue("vmmemctl"); |
789 | if (!vmballoon_wq) { | 789 | if (!vmballoon_wq) { |
790 | pr_err("failed to create workqueue\n"); | 790 | pr_err("failed to create workqueue\n"); |
791 | return -ENOMEM; | 791 | return -ENOMEM; |
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c index d9d7efbc77cc..6322d1fb5d62 100644 --- a/drivers/mtd/nand/r852.c +++ b/drivers/mtd/nand/r852.c | |||
@@ -930,7 +930,7 @@ int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) | |||
930 | 930 | ||
931 | init_completion(&dev->dma_done); | 931 | init_completion(&dev->dma_done); |
932 | 932 | ||
933 | dev->card_workqueue = create_freezeable_workqueue(DRV_NAME); | 933 | dev->card_workqueue = create_freezable_workqueue(DRV_NAME); |
934 | 934 | ||
935 | if (!dev->card_workqueue) | 935 | if (!dev->card_workqueue) |
936 | goto error9; | 936 | goto error9; |
diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c index 67822cf6c025..ac0d6a8613b5 100644 --- a/drivers/mtd/sm_ftl.c +++ b/drivers/mtd/sm_ftl.c | |||
@@ -1258,7 +1258,7 @@ static struct mtd_blktrans_ops sm_ftl_ops = { | |||
1258 | static __init int sm_module_init(void) | 1258 | static __init int sm_module_init(void) |
1259 | { | 1259 | { |
1260 | int error = 0; | 1260 | int error = 0; |
1261 | cache_flush_workqueue = create_freezeable_workqueue("smflush"); | 1261 | cache_flush_workqueue = create_freezable_workqueue("smflush"); |
1262 | 1262 | ||
1263 | if (IS_ERR(cache_flush_workqueue)) | 1263 | if (IS_ERR(cache_flush_workqueue)) |
1264 | return PTR_ERR(cache_flush_workqueue); | 1264 | return PTR_ERR(cache_flush_workqueue); |
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c index 7ab534aee452..7513c4523ac4 100644 --- a/drivers/net/can/mcp251x.c +++ b/drivers/net/can/mcp251x.c | |||
@@ -940,7 +940,7 @@ static int mcp251x_open(struct net_device *net) | |||
940 | goto open_unlock; | 940 | goto open_unlock; |
941 | } | 941 | } |
942 | 942 | ||
943 | priv->wq = create_freezeable_workqueue("mcp251x_wq"); | 943 | priv->wq = create_freezable_workqueue("mcp251x_wq"); |
944 | INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler); | 944 | INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler); |
945 | INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler); | 945 | INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler); |
946 | 946 | ||
diff --git a/drivers/net/can/softing/Kconfig b/drivers/net/can/softing/Kconfig index 8ba81b3ddd90..5de46a9a77bb 100644 --- a/drivers/net/can/softing/Kconfig +++ b/drivers/net/can/softing/Kconfig | |||
@@ -18,7 +18,7 @@ config CAN_SOFTING | |||
18 | config CAN_SOFTING_CS | 18 | config CAN_SOFTING_CS |
19 | tristate "Softing Gmbh CAN pcmcia cards" | 19 | tristate "Softing Gmbh CAN pcmcia cards" |
20 | depends on PCMCIA | 20 | depends on PCMCIA |
21 | select CAN_SOFTING | 21 | depends on CAN_SOFTING |
22 | ---help--- | 22 | ---help--- |
23 | Support for PCMCIA cards from Softing Gmbh & some cards | 23 | Support for PCMCIA cards from Softing Gmbh & some cards |
24 | from Vector Gmbh. | 24 | from Vector Gmbh. |
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c index 56166ae2059f..6aad64df4dcb 100644 --- a/drivers/net/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/cxgb4vf/cxgb4vf_main.c | |||
@@ -2040,7 +2040,7 @@ static int __devinit setup_debugfs(struct adapter *adapter) | |||
2040 | { | 2040 | { |
2041 | int i; | 2041 | int i; |
2042 | 2042 | ||
2043 | BUG_ON(adapter->debugfs_root == NULL); | 2043 | BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root)); |
2044 | 2044 | ||
2045 | /* | 2045 | /* |
2046 | * Debugfs support is best effort. | 2046 | * Debugfs support is best effort. |
@@ -2061,7 +2061,7 @@ static int __devinit setup_debugfs(struct adapter *adapter) | |||
2061 | */ | 2061 | */ |
2062 | static void cleanup_debugfs(struct adapter *adapter) | 2062 | static void cleanup_debugfs(struct adapter *adapter) |
2063 | { | 2063 | { |
2064 | BUG_ON(adapter->debugfs_root == NULL); | 2064 | BUG_ON(IS_ERR_OR_NULL(adapter->debugfs_root)); |
2065 | 2065 | ||
2066 | /* | 2066 | /* |
2067 | * Unlike our sister routine cleanup_proc(), we don't need to remove | 2067 | * Unlike our sister routine cleanup_proc(), we don't need to remove |
@@ -2489,17 +2489,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, | |||
2489 | struct net_device *netdev; | 2489 | struct net_device *netdev; |
2490 | 2490 | ||
2491 | /* | 2491 | /* |
2492 | * Vet our module parameters. | ||
2493 | */ | ||
2494 | if (msi != MSI_MSIX && msi != MSI_MSI) { | ||
2495 | dev_err(&pdev->dev, "bad module parameter msi=%d; must be %d" | ||
2496 | " (MSI-X or MSI) or %d (MSI)\n", msi, MSI_MSIX, | ||
2497 | MSI_MSI); | ||
2498 | err = -EINVAL; | ||
2499 | goto err_out; | ||
2500 | } | ||
2501 | |||
2502 | /* | ||
2503 | * Print our driver banner the first time we're called to initialize a | 2492 | * Print our driver banner the first time we're called to initialize a |
2504 | * device. | 2493 | * device. |
2505 | */ | 2494 | */ |
@@ -2711,11 +2700,11 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, | |||
2711 | /* | 2700 | /* |
2712 | * Set up our debugfs entries. | 2701 | * Set up our debugfs entries. |
2713 | */ | 2702 | */ |
2714 | if (cxgb4vf_debugfs_root) { | 2703 | if (!IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) { |
2715 | adapter->debugfs_root = | 2704 | adapter->debugfs_root = |
2716 | debugfs_create_dir(pci_name(pdev), | 2705 | debugfs_create_dir(pci_name(pdev), |
2717 | cxgb4vf_debugfs_root); | 2706 | cxgb4vf_debugfs_root); |
2718 | if (adapter->debugfs_root == NULL) | 2707 | if (IS_ERR_OR_NULL(adapter->debugfs_root)) |
2719 | dev_warn(&pdev->dev, "could not create debugfs" | 2708 | dev_warn(&pdev->dev, "could not create debugfs" |
2720 | " directory"); | 2709 | " directory"); |
2721 | else | 2710 | else |
@@ -2770,7 +2759,7 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev, | |||
2770 | */ | 2759 | */ |
2771 | 2760 | ||
2772 | err_free_debugfs: | 2761 | err_free_debugfs: |
2773 | if (adapter->debugfs_root) { | 2762 | if (!IS_ERR_OR_NULL(adapter->debugfs_root)) { |
2774 | cleanup_debugfs(adapter); | 2763 | cleanup_debugfs(adapter); |
2775 | debugfs_remove_recursive(adapter->debugfs_root); | 2764 | debugfs_remove_recursive(adapter->debugfs_root); |
2776 | } | 2765 | } |
@@ -2802,7 +2791,6 @@ err_release_regions: | |||
2802 | err_disable_device: | 2791 | err_disable_device: |
2803 | pci_disable_device(pdev); | 2792 | pci_disable_device(pdev); |
2804 | 2793 | ||
2805 | err_out: | ||
2806 | return err; | 2794 | return err; |
2807 | } | 2795 | } |
2808 | 2796 | ||
@@ -2840,7 +2828,7 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev) | |||
2840 | /* | 2828 | /* |
2841 | * Tear down our debugfs entries. | 2829 | * Tear down our debugfs entries. |
2842 | */ | 2830 | */ |
2843 | if (adapter->debugfs_root) { | 2831 | if (!IS_ERR_OR_NULL(adapter->debugfs_root)) { |
2844 | cleanup_debugfs(adapter); | 2832 | cleanup_debugfs(adapter); |
2845 | debugfs_remove_recursive(adapter->debugfs_root); | 2833 | debugfs_remove_recursive(adapter->debugfs_root); |
2846 | } | 2834 | } |
@@ -2874,6 +2862,46 @@ static void __devexit cxgb4vf_pci_remove(struct pci_dev *pdev) | |||
2874 | } | 2862 | } |
2875 | 2863 | ||
2876 | /* | 2864 | /* |
2865 | * "Shutdown" quiesce the device, stopping Ingress Packet and Interrupt | ||
2866 | * delivery. | ||
2867 | */ | ||
2868 | static void __devexit cxgb4vf_pci_shutdown(struct pci_dev *pdev) | ||
2869 | { | ||
2870 | struct adapter *adapter; | ||
2871 | int pidx; | ||
2872 | |||
2873 | adapter = pci_get_drvdata(pdev); | ||
2874 | if (!adapter) | ||
2875 | return; | ||
2876 | |||
2877 | /* | ||
2878 | * Disable all Virtual Interfaces. This will shut down the | ||
2879 | * delivery of all ingress packets into the chip for these | ||
2880 | * Virtual Interfaces. | ||
2881 | */ | ||
2882 | for_each_port(adapter, pidx) { | ||
2883 | struct net_device *netdev; | ||
2884 | struct port_info *pi; | ||
2885 | |||
2886 | if (!test_bit(pidx, &adapter->registered_device_map)) | ||
2887 | continue; | ||
2888 | |||
2889 | netdev = adapter->port[pidx]; | ||
2890 | if (!netdev) | ||
2891 | continue; | ||
2892 | |||
2893 | pi = netdev_priv(netdev); | ||
2894 | t4vf_enable_vi(adapter, pi->viid, false, false); | ||
2895 | } | ||
2896 | |||
2897 | /* | ||
2898 | * Free up all Queues which will prevent further DMA and | ||
2899 | * Interrupts allowing various internal pathways to drain. | ||
2900 | */ | ||
2901 | t4vf_free_sge_resources(adapter); | ||
2902 | } | ||
2903 | |||
2904 | /* | ||
2877 | * PCI Device registration data structures. | 2905 | * PCI Device registration data structures. |
2878 | */ | 2906 | */ |
2879 | #define CH_DEVICE(devid, idx) \ | 2907 | #define CH_DEVICE(devid, idx) \ |
@@ -2906,6 +2934,7 @@ static struct pci_driver cxgb4vf_driver = { | |||
2906 | .id_table = cxgb4vf_pci_tbl, | 2934 | .id_table = cxgb4vf_pci_tbl, |
2907 | .probe = cxgb4vf_pci_probe, | 2935 | .probe = cxgb4vf_pci_probe, |
2908 | .remove = __devexit_p(cxgb4vf_pci_remove), | 2936 | .remove = __devexit_p(cxgb4vf_pci_remove), |
2937 | .shutdown = __devexit_p(cxgb4vf_pci_shutdown), | ||
2909 | }; | 2938 | }; |
2910 | 2939 | ||
2911 | /* | 2940 | /* |
@@ -2915,14 +2944,25 @@ static int __init cxgb4vf_module_init(void) | |||
2915 | { | 2944 | { |
2916 | int ret; | 2945 | int ret; |
2917 | 2946 | ||
2947 | /* | ||
2948 | * Vet our module parameters. | ||
2949 | */ | ||
2950 | if (msi != MSI_MSIX && msi != MSI_MSI) { | ||
2951 | printk(KERN_WARNING KBUILD_MODNAME | ||
2952 | ": bad module parameter msi=%d; must be %d" | ||
2953 | " (MSI-X or MSI) or %d (MSI)\n", | ||
2954 | msi, MSI_MSIX, MSI_MSI); | ||
2955 | return -EINVAL; | ||
2956 | } | ||
2957 | |||
2918 | /* Debugfs support is optional, just warn if this fails */ | 2958 | /* Debugfs support is optional, just warn if this fails */ |
2919 | cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); | 2959 | cxgb4vf_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL); |
2920 | if (!cxgb4vf_debugfs_root) | 2960 | if (IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) |
2921 | printk(KERN_WARNING KBUILD_MODNAME ": could not create" | 2961 | printk(KERN_WARNING KBUILD_MODNAME ": could not create" |
2922 | " debugfs entry, continuing\n"); | 2962 | " debugfs entry, continuing\n"); |
2923 | 2963 | ||
2924 | ret = pci_register_driver(&cxgb4vf_driver); | 2964 | ret = pci_register_driver(&cxgb4vf_driver); |
2925 | if (ret < 0) | 2965 | if (ret < 0 && !IS_ERR_OR_NULL(cxgb4vf_debugfs_root)) |
2926 | debugfs_remove(cxgb4vf_debugfs_root); | 2966 | debugfs_remove(cxgb4vf_debugfs_root); |
2927 | return ret; | 2967 | return ret; |
2928 | } | 2968 | } |
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c index 0f51c80475ce..192db226ec7f 100644 --- a/drivers/net/cxgb4vf/t4vf_hw.c +++ b/drivers/net/cxgb4vf/t4vf_hw.c | |||
@@ -171,7 +171,7 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, | |||
171 | delay_idx = 0; | 171 | delay_idx = 0; |
172 | ms = delay[0]; | 172 | ms = delay[0]; |
173 | 173 | ||
174 | for (i = 0; i < 500; i += ms) { | 174 | for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) { |
175 | if (sleep_ok) { | 175 | if (sleep_ok) { |
176 | ms = delay[delay_idx]; | 176 | ms = delay[delay_idx]; |
177 | if (delay_idx < ARRAY_SIZE(delay) - 1) | 177 | if (delay_idx < ARRAY_SIZE(delay) - 1) |
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c index 3065870cf2a7..3fa110ddb041 100644 --- a/drivers/net/e1000e/netdev.c +++ b/drivers/net/e1000e/netdev.c | |||
@@ -937,6 +937,9 @@ static void e1000_print_hw_hang(struct work_struct *work) | |||
937 | u16 phy_status, phy_1000t_status, phy_ext_status; | 937 | u16 phy_status, phy_1000t_status, phy_ext_status; |
938 | u16 pci_status; | 938 | u16 pci_status; |
939 | 939 | ||
940 | if (test_bit(__E1000_DOWN, &adapter->state)) | ||
941 | return; | ||
942 | |||
940 | e1e_rphy(hw, PHY_STATUS, &phy_status); | 943 | e1e_rphy(hw, PHY_STATUS, &phy_status); |
941 | e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); | 944 | e1e_rphy(hw, PHY_1000T_STATUS, &phy_1000t_status); |
942 | e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); | 945 | e1e_rphy(hw, PHY_EXT_STATUS, &phy_ext_status); |
@@ -1506,6 +1509,9 @@ static void e1000e_downshift_workaround(struct work_struct *work) | |||
1506 | struct e1000_adapter *adapter = container_of(work, | 1509 | struct e1000_adapter *adapter = container_of(work, |
1507 | struct e1000_adapter, downshift_task); | 1510 | struct e1000_adapter, downshift_task); |
1508 | 1511 | ||
1512 | if (test_bit(__E1000_DOWN, &adapter->state)) | ||
1513 | return; | ||
1514 | |||
1509 | e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); | 1515 | e1000e_gig_downshift_workaround_ich8lan(&adapter->hw); |
1510 | } | 1516 | } |
1511 | 1517 | ||
@@ -3338,6 +3344,21 @@ int e1000e_up(struct e1000_adapter *adapter) | |||
3338 | return 0; | 3344 | return 0; |
3339 | } | 3345 | } |
3340 | 3346 | ||
3347 | static void e1000e_flush_descriptors(struct e1000_adapter *adapter) | ||
3348 | { | ||
3349 | struct e1000_hw *hw = &adapter->hw; | ||
3350 | |||
3351 | if (!(adapter->flags2 & FLAG2_DMA_BURST)) | ||
3352 | return; | ||
3353 | |||
3354 | /* flush pending descriptor writebacks to memory */ | ||
3355 | ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); | ||
3356 | ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); | ||
3357 | |||
3358 | /* execute the writes immediately */ | ||
3359 | e1e_flush(); | ||
3360 | } | ||
3361 | |||
3341 | void e1000e_down(struct e1000_adapter *adapter) | 3362 | void e1000e_down(struct e1000_adapter *adapter) |
3342 | { | 3363 | { |
3343 | struct net_device *netdev = adapter->netdev; | 3364 | struct net_device *netdev = adapter->netdev; |
@@ -3377,6 +3398,9 @@ void e1000e_down(struct e1000_adapter *adapter) | |||
3377 | 3398 | ||
3378 | if (!pci_channel_offline(adapter->pdev)) | 3399 | if (!pci_channel_offline(adapter->pdev)) |
3379 | e1000e_reset(adapter); | 3400 | e1000e_reset(adapter); |
3401 | |||
3402 | e1000e_flush_descriptors(adapter); | ||
3403 | |||
3380 | e1000_clean_tx_ring(adapter); | 3404 | e1000_clean_tx_ring(adapter); |
3381 | e1000_clean_rx_ring(adapter); | 3405 | e1000_clean_rx_ring(adapter); |
3382 | 3406 | ||
@@ -3765,6 +3789,10 @@ static void e1000e_update_phy_task(struct work_struct *work) | |||
3765 | { | 3789 | { |
3766 | struct e1000_adapter *adapter = container_of(work, | 3790 | struct e1000_adapter *adapter = container_of(work, |
3767 | struct e1000_adapter, update_phy_task); | 3791 | struct e1000_adapter, update_phy_task); |
3792 | |||
3793 | if (test_bit(__E1000_DOWN, &adapter->state)) | ||
3794 | return; | ||
3795 | |||
3768 | e1000_get_phy_info(&adapter->hw); | 3796 | e1000_get_phy_info(&adapter->hw); |
3769 | } | 3797 | } |
3770 | 3798 | ||
@@ -3775,6 +3803,10 @@ static void e1000e_update_phy_task(struct work_struct *work) | |||
3775 | static void e1000_update_phy_info(unsigned long data) | 3803 | static void e1000_update_phy_info(unsigned long data) |
3776 | { | 3804 | { |
3777 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; | 3805 | struct e1000_adapter *adapter = (struct e1000_adapter *) data; |
3806 | |||
3807 | if (test_bit(__E1000_DOWN, &adapter->state)) | ||
3808 | return; | ||
3809 | |||
3778 | schedule_work(&adapter->update_phy_task); | 3810 | schedule_work(&adapter->update_phy_task); |
3779 | } | 3811 | } |
3780 | 3812 | ||
@@ -4149,6 +4181,9 @@ static void e1000_watchdog_task(struct work_struct *work) | |||
4149 | u32 link, tctl; | 4181 | u32 link, tctl; |
4150 | int tx_pending = 0; | 4182 | int tx_pending = 0; |
4151 | 4183 | ||
4184 | if (test_bit(__E1000_DOWN, &adapter->state)) | ||
4185 | return; | ||
4186 | |||
4152 | link = e1000e_has_link(adapter); | 4187 | link = e1000e_has_link(adapter); |
4153 | if ((netif_carrier_ok(netdev)) && link) { | 4188 | if ((netif_carrier_ok(netdev)) && link) { |
4154 | /* Cancel scheduled suspend requests. */ | 4189 | /* Cancel scheduled suspend requests. */ |
@@ -4337,19 +4372,12 @@ link_up: | |||
4337 | else | 4372 | else |
4338 | ew32(ICS, E1000_ICS_RXDMT0); | 4373 | ew32(ICS, E1000_ICS_RXDMT0); |
4339 | 4374 | ||
4375 | /* flush pending descriptors to memory before detecting Tx hang */ | ||
4376 | e1000e_flush_descriptors(adapter); | ||
4377 | |||
4340 | /* Force detection of hung controller every watchdog period */ | 4378 | /* Force detection of hung controller every watchdog period */ |
4341 | adapter->detect_tx_hung = 1; | 4379 | adapter->detect_tx_hung = 1; |
4342 | 4380 | ||
4343 | /* flush partial descriptors to memory before detecting Tx hang */ | ||
4344 | if (adapter->flags2 & FLAG2_DMA_BURST) { | ||
4345 | ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD); | ||
4346 | ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD); | ||
4347 | /* | ||
4348 | * no need to flush the writes because the timeout code does | ||
4349 | * an er32 first thing | ||
4350 | */ | ||
4351 | } | ||
4352 | |||
4353 | /* | 4381 | /* |
4354 | * With 82571 controllers, LAA may be overwritten due to controller | 4382 | * With 82571 controllers, LAA may be overwritten due to controller |
4355 | * reset from the other port. Set the appropriate LAA in RAR[0] | 4383 | * reset from the other port. Set the appropriate LAA in RAR[0] |
@@ -4887,6 +4915,10 @@ static void e1000_reset_task(struct work_struct *work) | |||
4887 | struct e1000_adapter *adapter; | 4915 | struct e1000_adapter *adapter; |
4888 | adapter = container_of(work, struct e1000_adapter, reset_task); | 4916 | adapter = container_of(work, struct e1000_adapter, reset_task); |
4889 | 4917 | ||
4918 | /* don't run the task if already down */ | ||
4919 | if (test_bit(__E1000_DOWN, &adapter->state)) | ||
4920 | return; | ||
4921 | |||
4890 | if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) && | 4922 | if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) && |
4891 | (adapter->flags & FLAG_RX_RESTART_NOW))) { | 4923 | (adapter->flags & FLAG_RX_RESTART_NOW))) { |
4892 | e1000e_dump(adapter); | 4924 | e1000e_dump(adapter); |
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index af09296ef0dd..9c0b1bac6af6 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c | |||
@@ -5645,6 +5645,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i | |||
5645 | goto out_error; | 5645 | goto out_error; |
5646 | } | 5646 | } |
5647 | 5647 | ||
5648 | netif_carrier_off(dev); | ||
5649 | |||
5648 | dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n", | 5650 | dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n", |
5649 | dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); | 5651 | dev->name, np->phy_oui, np->phyaddr, dev->dev_addr); |
5650 | 5652 | ||
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c index 8753980668c7..c54a88274d51 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ixgbe/ixgbe_fcoe.c | |||
@@ -159,7 +159,7 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, | |||
159 | struct scatterlist *sg; | 159 | struct scatterlist *sg; |
160 | unsigned int i, j, dmacount; | 160 | unsigned int i, j, dmacount; |
161 | unsigned int len; | 161 | unsigned int len; |
162 | static const unsigned int bufflen = 4096; | 162 | static const unsigned int bufflen = IXGBE_FCBUFF_MIN; |
163 | unsigned int firstoff = 0; | 163 | unsigned int firstoff = 0; |
164 | unsigned int lastsize; | 164 | unsigned int lastsize; |
165 | unsigned int thisoff = 0; | 165 | unsigned int thisoff = 0; |
@@ -254,6 +254,24 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, | |||
254 | /* only the last buffer may have non-full bufflen */ | 254 | /* only the last buffer may have non-full bufflen */ |
255 | lastsize = thisoff + thislen; | 255 | lastsize = thisoff + thislen; |
256 | 256 | ||
257 | /* | ||
258 | * lastsize can not be buffer len. | ||
259 | * If it is then adding another buffer with lastsize = 1. | ||
260 | */ | ||
261 | if (lastsize == bufflen) { | ||
262 | if (j >= IXGBE_BUFFCNT_MAX) { | ||
263 | e_err(drv, "xid=%x:%d,%d,%d:addr=%llx " | ||
264 | "not enough user buffers. We need an extra " | ||
265 | "buffer because lastsize is bufflen.\n", | ||
266 | xid, i, j, dmacount, (u64)addr); | ||
267 | goto out_noddp_free; | ||
268 | } | ||
269 | |||
270 | ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma); | ||
271 | j++; | ||
272 | lastsize = 1; | ||
273 | } | ||
274 | |||
257 | fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); | 275 | fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); |
258 | fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); | 276 | fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); |
259 | fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); | 277 | fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); |
@@ -532,6 +550,24 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) | |||
532 | e_err(drv, "failed to allocated FCoE DDP pool\n"); | 550 | e_err(drv, "failed to allocated FCoE DDP pool\n"); |
533 | 551 | ||
534 | spin_lock_init(&fcoe->lock); | 552 | spin_lock_init(&fcoe->lock); |
553 | |||
554 | /* Extra buffer to be shared by all DDPs for HW work around */ | ||
555 | fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); | ||
556 | if (fcoe->extra_ddp_buffer == NULL) { | ||
557 | e_err(drv, "failed to allocated extra DDP buffer\n"); | ||
558 | goto out_extra_ddp_buffer_alloc; | ||
559 | } | ||
560 | |||
561 | fcoe->extra_ddp_buffer_dma = | ||
562 | dma_map_single(&adapter->pdev->dev, | ||
563 | fcoe->extra_ddp_buffer, | ||
564 | IXGBE_FCBUFF_MIN, | ||
565 | DMA_FROM_DEVICE); | ||
566 | if (dma_mapping_error(&adapter->pdev->dev, | ||
567 | fcoe->extra_ddp_buffer_dma)) { | ||
568 | e_err(drv, "failed to map extra DDP buffer\n"); | ||
569 | goto out_extra_ddp_buffer_dma; | ||
570 | } | ||
535 | } | 571 | } |
536 | 572 | ||
537 | /* Enable L2 eth type filter for FCoE */ | 573 | /* Enable L2 eth type filter for FCoE */ |
@@ -581,6 +617,14 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) | |||
581 | } | 617 | } |
582 | } | 618 | } |
583 | #endif | 619 | #endif |
620 | |||
621 | return; | ||
622 | |||
623 | out_extra_ddp_buffer_dma: | ||
624 | kfree(fcoe->extra_ddp_buffer); | ||
625 | out_extra_ddp_buffer_alloc: | ||
626 | pci_pool_destroy(fcoe->pool); | ||
627 | fcoe->pool = NULL; | ||
584 | } | 628 | } |
585 | 629 | ||
586 | /** | 630 | /** |
@@ -600,6 +644,11 @@ void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter) | |||
600 | if (fcoe->pool) { | 644 | if (fcoe->pool) { |
601 | for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) | 645 | for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) |
602 | ixgbe_fcoe_ddp_put(adapter->netdev, i); | 646 | ixgbe_fcoe_ddp_put(adapter->netdev, i); |
647 | dma_unmap_single(&adapter->pdev->dev, | ||
648 | fcoe->extra_ddp_buffer_dma, | ||
649 | IXGBE_FCBUFF_MIN, | ||
650 | DMA_FROM_DEVICE); | ||
651 | kfree(fcoe->extra_ddp_buffer); | ||
603 | pci_pool_destroy(fcoe->pool); | 652 | pci_pool_destroy(fcoe->pool); |
604 | fcoe->pool = NULL; | 653 | fcoe->pool = NULL; |
605 | } | 654 | } |
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.h b/drivers/net/ixgbe/ixgbe_fcoe.h index 4bc2c551c8db..65cc8fb14fe7 100644 --- a/drivers/net/ixgbe/ixgbe_fcoe.h +++ b/drivers/net/ixgbe/ixgbe_fcoe.h | |||
@@ -70,6 +70,8 @@ struct ixgbe_fcoe { | |||
70 | spinlock_t lock; | 70 | spinlock_t lock; |
71 | struct pci_pool *pool; | 71 | struct pci_pool *pool; |
72 | struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; | 72 | struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; |
73 | unsigned char *extra_ddp_buffer; | ||
74 | dma_addr_t extra_ddp_buffer_dma; | ||
73 | }; | 75 | }; |
74 | 76 | ||
75 | #endif /* _IXGBE_FCOE_H */ | 77 | #endif /* _IXGBE_FCOE_H */ |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c index fbae703b46d7..30f9ccfb4f87 100644 --- a/drivers/net/ixgbe/ixgbe_main.c +++ b/drivers/net/ixgbe/ixgbe_main.c | |||
@@ -3728,7 +3728,8 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) | |||
3728 | * We need to try and force an autonegotiation | 3728 | * We need to try and force an autonegotiation |
3729 | * session, then bring up link. | 3729 | * session, then bring up link. |
3730 | */ | 3730 | */ |
3731 | hw->mac.ops.setup_sfp(hw); | 3731 | if (hw->mac.ops.setup_sfp) |
3732 | hw->mac.ops.setup_sfp(hw); | ||
3732 | if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) | 3733 | if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) |
3733 | schedule_work(&adapter->multispeed_fiber_task); | 3734 | schedule_work(&adapter->multispeed_fiber_task); |
3734 | } else { | 3735 | } else { |
@@ -5968,7 +5969,8 @@ static void ixgbe_sfp_config_module_task(struct work_struct *work) | |||
5968 | unregister_netdev(adapter->netdev); | 5969 | unregister_netdev(adapter->netdev); |
5969 | return; | 5970 | return; |
5970 | } | 5971 | } |
5971 | hw->mac.ops.setup_sfp(hw); | 5972 | if (hw->mac.ops.setup_sfp) |
5973 | hw->mac.ops.setup_sfp(hw); | ||
5972 | 5974 | ||
5973 | if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) | 5975 | if (!(adapter->flags & IXGBE_FLAG_IN_SFP_LINK_TASK)) |
5974 | /* This will also work for DA Twinax connections */ | 5976 | /* This will also work for DA Twinax connections */ |
diff --git a/drivers/net/pch_gbe/pch_gbe.h b/drivers/net/pch_gbe/pch_gbe.h index a0c26a99520f..e1e33c80fb25 100644 --- a/drivers/net/pch_gbe/pch_gbe.h +++ b/drivers/net/pch_gbe/pch_gbe.h | |||
@@ -73,7 +73,7 @@ struct pch_gbe_regs { | |||
73 | struct pch_gbe_regs_mac_adr mac_adr[16]; | 73 | struct pch_gbe_regs_mac_adr mac_adr[16]; |
74 | u32 ADDR_MASK; | 74 | u32 ADDR_MASK; |
75 | u32 MIIM; | 75 | u32 MIIM; |
76 | u32 reserve2; | 76 | u32 MAC_ADDR_LOAD; |
77 | u32 RGMII_ST; | 77 | u32 RGMII_ST; |
78 | u32 RGMII_CTRL; | 78 | u32 RGMII_CTRL; |
79 | u32 reserve3[3]; | 79 | u32 reserve3[3]; |
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c index 4c9a7d4f3fca..b99e90aca37d 100644 --- a/drivers/net/pch_gbe/pch_gbe_main.c +++ b/drivers/net/pch_gbe/pch_gbe_main.c | |||
@@ -29,6 +29,7 @@ const char pch_driver_version[] = DRV_VERSION; | |||
29 | #define PCH_GBE_SHORT_PKT 64 | 29 | #define PCH_GBE_SHORT_PKT 64 |
30 | #define DSC_INIT16 0xC000 | 30 | #define DSC_INIT16 0xC000 |
31 | #define PCH_GBE_DMA_ALIGN 0 | 31 | #define PCH_GBE_DMA_ALIGN 0 |
32 | #define PCH_GBE_DMA_PADDING 2 | ||
32 | #define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */ | 33 | #define PCH_GBE_WATCHDOG_PERIOD (1 * HZ) /* watchdog time */ |
33 | #define PCH_GBE_COPYBREAK_DEFAULT 256 | 34 | #define PCH_GBE_COPYBREAK_DEFAULT 256 |
34 | #define PCH_GBE_PCI_BAR 1 | 35 | #define PCH_GBE_PCI_BAR 1 |
@@ -88,6 +89,12 @@ static unsigned int copybreak __read_mostly = PCH_GBE_COPYBREAK_DEFAULT; | |||
88 | static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg); | 89 | static int pch_gbe_mdio_read(struct net_device *netdev, int addr, int reg); |
89 | static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg, | 90 | static void pch_gbe_mdio_write(struct net_device *netdev, int addr, int reg, |
90 | int data); | 91 | int data); |
92 | |||
93 | inline void pch_gbe_mac_load_mac_addr(struct pch_gbe_hw *hw) | ||
94 | { | ||
95 | iowrite32(0x01, &hw->reg->MAC_ADDR_LOAD); | ||
96 | } | ||
97 | |||
91 | /** | 98 | /** |
92 | * pch_gbe_mac_read_mac_addr - Read MAC address | 99 | * pch_gbe_mac_read_mac_addr - Read MAC address |
93 | * @hw: Pointer to the HW structure | 100 | * @hw: Pointer to the HW structure |
@@ -1365,16 +1372,13 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, | |||
1365 | struct pch_gbe_buffer *buffer_info; | 1372 | struct pch_gbe_buffer *buffer_info; |
1366 | struct pch_gbe_rx_desc *rx_desc; | 1373 | struct pch_gbe_rx_desc *rx_desc; |
1367 | u32 length; | 1374 | u32 length; |
1368 | unsigned char tmp_packet[ETH_HLEN]; | ||
1369 | unsigned int i; | 1375 | unsigned int i; |
1370 | unsigned int cleaned_count = 0; | 1376 | unsigned int cleaned_count = 0; |
1371 | bool cleaned = false; | 1377 | bool cleaned = false; |
1372 | struct sk_buff *skb; | 1378 | struct sk_buff *skb, *new_skb; |
1373 | u8 dma_status; | 1379 | u8 dma_status; |
1374 | u16 gbec_status; | 1380 | u16 gbec_status; |
1375 | u32 tcp_ip_status; | 1381 | u32 tcp_ip_status; |
1376 | u8 skb_copy_flag = 0; | ||
1377 | u8 skb_padding_flag = 0; | ||
1378 | 1382 | ||
1379 | i = rx_ring->next_to_clean; | 1383 | i = rx_ring->next_to_clean; |
1380 | 1384 | ||
@@ -1418,55 +1422,70 @@ pch_gbe_clean_rx(struct pch_gbe_adapter *adapter, | |||
1418 | pr_err("Receive CRC Error\n"); | 1422 | pr_err("Receive CRC Error\n"); |
1419 | } else { | 1423 | } else { |
1420 | /* get receive length */ | 1424 | /* get receive length */ |
1421 | /* length convert[-3], padding[-2] */ | 1425 | /* length convert[-3] */ |
1422 | length = (rx_desc->rx_words_eob) - 3 - 2; | 1426 | length = (rx_desc->rx_words_eob) - 3; |
1423 | 1427 | ||
1424 | /* Decide the data conversion method */ | 1428 | /* Decide the data conversion method */ |
1425 | if (!adapter->rx_csum) { | 1429 | if (!adapter->rx_csum) { |
1426 | /* [Header:14][payload] */ | 1430 | /* [Header:14][payload] */ |
1427 | skb_padding_flag = 0; | 1431 | if (NET_IP_ALIGN) { |
1428 | skb_copy_flag = 1; | 1432 | /* Because alignment differs, |
1433 | * the new_skb is newly allocated, | ||
1434 | * and data is copied to new_skb.*/ | ||
1435 | new_skb = netdev_alloc_skb(netdev, | ||
1436 | length + NET_IP_ALIGN); | ||
1437 | if (!new_skb) { | ||
1438 | /* dorrop error */ | ||
1439 | pr_err("New skb allocation " | ||
1440 | "Error\n"); | ||
1441 | goto dorrop; | ||
1442 | } | ||
1443 | skb_reserve(new_skb, NET_IP_ALIGN); | ||
1444 | memcpy(new_skb->data, skb->data, | ||
1445 | length); | ||
1446 | skb = new_skb; | ||
1447 | } else { | ||
1448 | /* DMA buffer is used as SKB as it is.*/ | ||
1449 | buffer_info->skb = NULL; | ||
1450 | } | ||
1429 | } else { | 1451 | } else { |
1430 | /* [Header:14][padding:2][payload] */ | 1452 | /* [Header:14][padding:2][payload] */ |
1431 | skb_padding_flag = 1; | 1453 | /* The length includes padding length */ |
1432 | if (length < copybreak) | 1454 | length = length - PCH_GBE_DMA_PADDING; |
1433 | skb_copy_flag = 1; | 1455 | if ((length < copybreak) || |
1434 | else | 1456 | (NET_IP_ALIGN != PCH_GBE_DMA_PADDING)) { |
1435 | skb_copy_flag = 0; | 1457 | /* Because alignment differs, |
1436 | } | 1458 | * the new_skb is newly allocated, |
1437 | 1459 | * and data is copied to new_skb. | |
1438 | /* Data conversion */ | 1460 | * Padding data is deleted |
1439 | if (skb_copy_flag) { /* recycle skb */ | 1461 | * at the time of a copy.*/ |
1440 | struct sk_buff *new_skb; | 1462 | new_skb = netdev_alloc_skb(netdev, |
1441 | new_skb = | 1463 | length + NET_IP_ALIGN); |
1442 | netdev_alloc_skb(netdev, | 1464 | if (!new_skb) { |
1443 | length + NET_IP_ALIGN); | 1465 | /* dorrop error */ |
1444 | if (new_skb) { | 1466 | pr_err("New skb allocation " |
1445 | if (!skb_padding_flag) { | 1467 | "Error\n"); |
1446 | skb_reserve(new_skb, | 1468 | goto dorrop; |
1447 | NET_IP_ALIGN); | ||
1448 | } | 1469 | } |
1470 | skb_reserve(new_skb, NET_IP_ALIGN); | ||
1449 | memcpy(new_skb->data, skb->data, | 1471 | memcpy(new_skb->data, skb->data, |
1450 | length); | 1472 | ETH_HLEN); |
1451 | /* save the skb | 1473 | memcpy(&new_skb->data[ETH_HLEN], |
1452 | * in buffer_info as good */ | 1474 | &skb->data[ETH_HLEN + |
1475 | PCH_GBE_DMA_PADDING], | ||
1476 | length - ETH_HLEN); | ||
1453 | skb = new_skb; | 1477 | skb = new_skb; |
1454 | } else if (!skb_padding_flag) { | 1478 | } else { |
1455 | /* dorrop error */ | 1479 | /* Padding data is deleted |
1456 | pr_err("New skb allocation Error\n"); | 1480 | * by moving header data.*/ |
1457 | goto dorrop; | 1481 | memmove(&skb->data[PCH_GBE_DMA_PADDING], |
1482 | &skb->data[0], ETH_HLEN); | ||
1483 | skb_reserve(skb, NET_IP_ALIGN); | ||
1484 | buffer_info->skb = NULL; | ||
1458 | } | 1485 | } |
1459 | } else { | ||
1460 | buffer_info->skb = NULL; | ||
1461 | } | 1486 | } |
1462 | if (skb_padding_flag) { | 1487 | /* The length includes FCS length */ |
1463 | memcpy(&tmp_packet[0], &skb->data[0], ETH_HLEN); | 1488 | length = length - ETH_FCS_LEN; |
1464 | memcpy(&skb->data[NET_IP_ALIGN], &tmp_packet[0], | ||
1465 | ETH_HLEN); | ||
1466 | skb_reserve(skb, NET_IP_ALIGN); | ||
1467 | |||
1468 | } | ||
1469 | |||
1470 | /* update status of driver */ | 1489 | /* update status of driver */ |
1471 | adapter->stats.rx_bytes += length; | 1490 | adapter->stats.rx_bytes += length; |
1472 | adapter->stats.rx_packets++; | 1491 | adapter->stats.rx_packets++; |
@@ -2318,6 +2337,7 @@ static int pch_gbe_probe(struct pci_dev *pdev, | |||
2318 | netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO; | 2337 | netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO; |
2319 | pch_gbe_set_ethtool_ops(netdev); | 2338 | pch_gbe_set_ethtool_ops(netdev); |
2320 | 2339 | ||
2340 | pch_gbe_mac_load_mac_addr(&adapter->hw); | ||
2321 | pch_gbe_mac_reset_hw(&adapter->hw); | 2341 | pch_gbe_mac_reset_hw(&adapter->hw); |
2322 | 2342 | ||
2323 | /* setup the private structure */ | 2343 | /* setup the private structure */ |
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 59ccf0c5c610..469ab0b7ce31 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c | |||
@@ -3190,6 +3190,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
3190 | if (pci_dev_run_wake(pdev)) | 3190 | if (pci_dev_run_wake(pdev)) |
3191 | pm_runtime_put_noidle(&pdev->dev); | 3191 | pm_runtime_put_noidle(&pdev->dev); |
3192 | 3192 | ||
3193 | netif_carrier_off(dev); | ||
3194 | |||
3193 | out: | 3195 | out: |
3194 | return rc; | 3196 | return rc; |
3195 | 3197 | ||
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c index 34a0af3837f9..0e5f03135b50 100644 --- a/drivers/net/stmmac/stmmac_main.c +++ b/drivers/net/stmmac/stmmac_main.c | |||
@@ -1560,8 +1560,10 @@ static int stmmac_mac_device_setup(struct net_device *dev) | |||
1560 | 1560 | ||
1561 | priv->hw = device; | 1561 | priv->hw = device; |
1562 | 1562 | ||
1563 | if (device_can_wakeup(priv->device)) | 1563 | if (device_can_wakeup(priv->device)) { |
1564 | priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ | 1564 | priv->wolopts = WAKE_MAGIC; /* Magic Frame as default */ |
1565 | enable_irq_wake(dev->irq); | ||
1566 | } | ||
1565 | 1567 | ||
1566 | return 0; | 1568 | return 0; |
1567 | } | 1569 | } |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 93b32d366611..06c0e5033656 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -11158,7 +11158,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
11158 | if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) | 11158 | if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) |
11159 | break; /* We have no PHY */ | 11159 | break; /* We have no PHY */ |
11160 | 11160 | ||
11161 | if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) | 11161 | if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) || |
11162 | ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && | ||
11163 | !netif_running(dev))) | ||
11162 | return -EAGAIN; | 11164 | return -EAGAIN; |
11163 | 11165 | ||
11164 | spin_lock_bh(&tp->lock); | 11166 | spin_lock_bh(&tp->lock); |
@@ -11174,7 +11176,9 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) | |||
11174 | if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) | 11176 | if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) |
11175 | break; /* We have no PHY */ | 11177 | break; /* We have no PHY */ |
11176 | 11178 | ||
11177 | if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) | 11179 | if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) || |
11180 | ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) && | ||
11181 | !netif_running(dev))) | ||
11178 | return -EAGAIN; | 11182 | return -EAGAIN; |
11179 | 11183 | ||
11180 | spin_lock_bh(&tp->lock); | 11184 | spin_lock_bh(&tp->lock); |
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c index bed8fcedff49..6d83812603b6 100644 --- a/drivers/net/usb/hso.c +++ b/drivers/net/usb/hso.c | |||
@@ -2628,15 +2628,15 @@ exit: | |||
2628 | 2628 | ||
2629 | static void hso_free_tiomget(struct hso_serial *serial) | 2629 | static void hso_free_tiomget(struct hso_serial *serial) |
2630 | { | 2630 | { |
2631 | struct hso_tiocmget *tiocmget = serial->tiocmget; | 2631 | struct hso_tiocmget *tiocmget; |
2632 | if (!serial) | ||
2633 | return; | ||
2634 | tiocmget = serial->tiocmget; | ||
2632 | if (tiocmget) { | 2635 | if (tiocmget) { |
2633 | if (tiocmget->urb) { | 2636 | usb_free_urb(tiocmget->urb); |
2634 | usb_free_urb(tiocmget->urb); | 2637 | tiocmget->urb = NULL; |
2635 | tiocmget->urb = NULL; | ||
2636 | } | ||
2637 | serial->tiocmget = NULL; | 2638 | serial->tiocmget = NULL; |
2638 | kfree(tiocmget); | 2639 | kfree(tiocmget); |
2639 | |||
2640 | } | 2640 | } |
2641 | } | 2641 | } |
2642 | 2642 | ||
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index ed9a41643ff4..95c41d56631c 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c | |||
@@ -931,8 +931,10 @@ fail_halt: | |||
931 | if (urb != NULL) { | 931 | if (urb != NULL) { |
932 | clear_bit (EVENT_RX_MEMORY, &dev->flags); | 932 | clear_bit (EVENT_RX_MEMORY, &dev->flags); |
933 | status = usb_autopm_get_interface(dev->intf); | 933 | status = usb_autopm_get_interface(dev->intf); |
934 | if (status < 0) | 934 | if (status < 0) { |
935 | usb_free_urb(urb); | ||
935 | goto fail_lowmem; | 936 | goto fail_lowmem; |
937 | } | ||
936 | if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK) | 938 | if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK) |
937 | resched = 0; | 939 | resched = 0; |
938 | usb_autopm_put_interface(dev->intf); | 940 | usb_autopm_put_interface(dev->intf); |
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c index a9b852be4509..39b6f16c87fa 100644 --- a/drivers/net/wireless/iwlwifi/iwl-3945.c +++ b/drivers/net/wireless/iwlwifi/iwl-3945.c | |||
@@ -402,72 +402,6 @@ static void iwl3945_accumulative_statistics(struct iwl_priv *priv, | |||
402 | } | 402 | } |
403 | #endif | 403 | #endif |
404 | 404 | ||
405 | /** | ||
406 | * iwl3945_good_plcp_health - checks for plcp error. | ||
407 | * | ||
408 | * When the plcp error is exceeding the thresholds, reset the radio | ||
409 | * to improve the throughput. | ||
410 | */ | ||
411 | static bool iwl3945_good_plcp_health(struct iwl_priv *priv, | ||
412 | struct iwl_rx_packet *pkt) | ||
413 | { | ||
414 | bool rc = true; | ||
415 | struct iwl3945_notif_statistics current_stat; | ||
416 | int combined_plcp_delta; | ||
417 | unsigned int plcp_msec; | ||
418 | unsigned long plcp_received_jiffies; | ||
419 | |||
420 | if (priv->cfg->base_params->plcp_delta_threshold == | ||
421 | IWL_MAX_PLCP_ERR_THRESHOLD_DISABLE) { | ||
422 | IWL_DEBUG_RADIO(priv, "plcp_err check disabled\n"); | ||
423 | return rc; | ||
424 | } | ||
425 | memcpy(¤t_stat, pkt->u.raw, sizeof(struct | ||
426 | iwl3945_notif_statistics)); | ||
427 | /* | ||
428 | * check for plcp_err and trigger radio reset if it exceeds | ||
429 | * the plcp error threshold plcp_delta. | ||
430 | */ | ||
431 | plcp_received_jiffies = jiffies; | ||
432 | plcp_msec = jiffies_to_msecs((long) plcp_received_jiffies - | ||
433 | (long) priv->plcp_jiffies); | ||
434 | priv->plcp_jiffies = plcp_received_jiffies; | ||
435 | /* | ||
436 | * check to make sure plcp_msec is not 0 to prevent division | ||
437 | * by zero. | ||
438 | */ | ||
439 | if (plcp_msec) { | ||
440 | combined_plcp_delta = | ||
441 | (le32_to_cpu(current_stat.rx.ofdm.plcp_err) - | ||
442 | le32_to_cpu(priv->_3945.statistics.rx.ofdm.plcp_err)); | ||
443 | |||
444 | if ((combined_plcp_delta > 0) && | ||
445 | ((combined_plcp_delta * 100) / plcp_msec) > | ||
446 | priv->cfg->base_params->plcp_delta_threshold) { | ||
447 | /* | ||
448 | * if plcp_err exceed the threshold, the following | ||
449 | * data is printed in csv format: | ||
450 | * Text: plcp_err exceeded %d, | ||
451 | * Received ofdm.plcp_err, | ||
452 | * Current ofdm.plcp_err, | ||
453 | * combined_plcp_delta, | ||
454 | * plcp_msec | ||
455 | */ | ||
456 | IWL_DEBUG_RADIO(priv, "plcp_err exceeded %u, " | ||
457 | "%u, %d, %u mSecs\n", | ||
458 | priv->cfg->base_params->plcp_delta_threshold, | ||
459 | le32_to_cpu(current_stat.rx.ofdm.plcp_err), | ||
460 | combined_plcp_delta, plcp_msec); | ||
461 | /* | ||
462 | * Reset the RF radio due to the high plcp | ||
463 | * error rate | ||
464 | */ | ||
465 | rc = false; | ||
466 | } | ||
467 | } | ||
468 | return rc; | ||
469 | } | ||
470 | |||
471 | void iwl3945_hw_rx_statistics(struct iwl_priv *priv, | 405 | void iwl3945_hw_rx_statistics(struct iwl_priv *priv, |
472 | struct iwl_rx_mem_buffer *rxb) | 406 | struct iwl_rx_mem_buffer *rxb) |
473 | { | 407 | { |
@@ -2734,7 +2668,6 @@ static struct iwl_lib_ops iwl3945_lib = { | |||
2734 | .isr_ops = { | 2668 | .isr_ops = { |
2735 | .isr = iwl_isr_legacy, | 2669 | .isr = iwl_isr_legacy, |
2736 | }, | 2670 | }, |
2737 | .check_plcp_health = iwl3945_good_plcp_health, | ||
2738 | 2671 | ||
2739 | .debugfs_ops = { | 2672 | .debugfs_ops = { |
2740 | .rx_stats_read = iwl3945_ucode_rx_stats_read, | 2673 | .rx_stats_read = iwl3945_ucode_rx_stats_read, |
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index d163bc2e2b9e..a59af5b24f0a 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig | |||
@@ -227,7 +227,7 @@ config SONYPI_COMPAT | |||
227 | config IDEAPAD_LAPTOP | 227 | config IDEAPAD_LAPTOP |
228 | tristate "Lenovo IdeaPad Laptop Extras" | 228 | tristate "Lenovo IdeaPad Laptop Extras" |
229 | depends on ACPI | 229 | depends on ACPI |
230 | depends on RFKILL | 230 | depends on RFKILL && INPUT |
231 | select INPUT_SPARSEKMAP | 231 | select INPUT_SPARSEKMAP |
232 | help | 232 | help |
233 | This is a driver for the rfkill switches on Lenovo IdeaPad netbooks. | 233 | This is a driver for the rfkill switches on Lenovo IdeaPad netbooks. |
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index c5c4b8c32eb8..38b34a73866a 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c | |||
@@ -84,7 +84,7 @@ MODULE_LICENSE("GPL"); | |||
84 | */ | 84 | */ |
85 | #define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB" | 85 | #define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB" |
86 | #define AMW0_GUID2 "431F16ED-0C2B-444C-B267-27DEB140CF9C" | 86 | #define AMW0_GUID2 "431F16ED-0C2B-444C-B267-27DEB140CF9C" |
87 | #define WMID_GUID1 "6AF4F258-B401-42fd-BE91-3D4AC2D7C0D3" | 87 | #define WMID_GUID1 "6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3" |
88 | #define WMID_GUID2 "95764E09-FB56-4e83-B31A-37761F60994A" | 88 | #define WMID_GUID2 "95764E09-FB56-4e83-B31A-37761F60994A" |
89 | #define WMID_GUID3 "61EF69EA-865C-4BC3-A502-A0DEBA0CB531" | 89 | #define WMID_GUID3 "61EF69EA-865C-4BC3-A502-A0DEBA0CB531" |
90 | 90 | ||
@@ -1280,7 +1280,7 @@ static ssize_t set_bool_threeg(struct device *dev, | |||
1280 | return -EINVAL; | 1280 | return -EINVAL; |
1281 | return count; | 1281 | return count; |
1282 | } | 1282 | } |
1283 | static DEVICE_ATTR(threeg, S_IWUGO | S_IRUGO | S_IWUSR, show_bool_threeg, | 1283 | static DEVICE_ATTR(threeg, S_IRUGO | S_IWUSR, show_bool_threeg, |
1284 | set_bool_threeg); | 1284 | set_bool_threeg); |
1285 | 1285 | ||
1286 | static ssize_t show_interface(struct device *dev, struct device_attribute *attr, | 1286 | static ssize_t show_interface(struct device *dev, struct device_attribute *attr, |
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c index 4633fd8532cc..fe495939c307 100644 --- a/drivers/platform/x86/asus_acpi.c +++ b/drivers/platform/x86/asus_acpi.c | |||
@@ -1081,14 +1081,8 @@ static int asus_hotk_add_fs(struct acpi_device *device) | |||
1081 | struct proc_dir_entry *proc; | 1081 | struct proc_dir_entry *proc; |
1082 | mode_t mode; | 1082 | mode_t mode; |
1083 | 1083 | ||
1084 | /* | ||
1085 | * If parameter uid or gid is not changed, keep the default setting for | ||
1086 | * our proc entries (-rw-rw-rw-) else, it means we care about security, | ||
1087 | * and then set to -rw-rw---- | ||
1088 | */ | ||
1089 | |||
1090 | if ((asus_uid == 0) && (asus_gid == 0)) { | 1084 | if ((asus_uid == 0) && (asus_gid == 0)) { |
1091 | mode = S_IFREG | S_IRUGO | S_IWUGO; | 1085 | mode = S_IFREG | S_IRUGO | S_IWUSR | S_IWGRP; |
1092 | } else { | 1086 | } else { |
1093 | mode = S_IFREG | S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP; | 1087 | mode = S_IFREG | S_IRUSR | S_IRGRP | S_IWUSR | S_IWGRP; |
1094 | printk(KERN_WARNING " asus_uid and asus_gid parameters are " | 1088 | printk(KERN_WARNING " asus_uid and asus_gid parameters are " |
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c index 34657f96b5a5..ad24ef36f9f7 100644 --- a/drivers/platform/x86/dell-laptop.c +++ b/drivers/platform/x86/dell-laptop.c | |||
@@ -290,9 +290,12 @@ static int dell_rfkill_set(void *data, bool blocked) | |||
290 | dell_send_request(buffer, 17, 11); | 290 | dell_send_request(buffer, 17, 11); |
291 | 291 | ||
292 | /* If the hardware switch controls this radio, and the hardware | 292 | /* If the hardware switch controls this radio, and the hardware |
293 | switch is disabled, don't allow changing the software state */ | 293 | switch is disabled, don't allow changing the software state. |
294 | If the hardware switch is reported as not supported, always | ||
295 | fire the SMI to toggle the killswitch. */ | ||
294 | if ((hwswitch_state & BIT(hwswitch_bit)) && | 296 | if ((hwswitch_state & BIT(hwswitch_bit)) && |
295 | !(buffer->output[1] & BIT(16))) { | 297 | !(buffer->output[1] & BIT(16)) && |
298 | (buffer->output[1] & BIT(0))) { | ||
296 | ret = -EINVAL; | 299 | ret = -EINVAL; |
297 | goto out; | 300 | goto out; |
298 | } | 301 | } |
@@ -398,6 +401,23 @@ static const struct file_operations dell_debugfs_fops = { | |||
398 | 401 | ||
399 | static void dell_update_rfkill(struct work_struct *ignored) | 402 | static void dell_update_rfkill(struct work_struct *ignored) |
400 | { | 403 | { |
404 | int status; | ||
405 | |||
406 | get_buffer(); | ||
407 | dell_send_request(buffer, 17, 11); | ||
408 | status = buffer->output[1]; | ||
409 | release_buffer(); | ||
410 | |||
411 | /* if hardware rfkill is not supported, set it explicitly */ | ||
412 | if (!(status & BIT(0))) { | ||
413 | if (wifi_rfkill) | ||
414 | dell_rfkill_set((void *)1, !((status & BIT(17)) >> 17)); | ||
415 | if (bluetooth_rfkill) | ||
416 | dell_rfkill_set((void *)2, !((status & BIT(18)) >> 18)); | ||
417 | if (wwan_rfkill) | ||
418 | dell_rfkill_set((void *)3, !((status & BIT(19)) >> 19)); | ||
419 | } | ||
420 | |||
401 | if (wifi_rfkill) | 421 | if (wifi_rfkill) |
402 | dell_rfkill_query(wifi_rfkill, (void *)1); | 422 | dell_rfkill_query(wifi_rfkill, (void *)1); |
403 | if (bluetooth_rfkill) | 423 | if (bluetooth_rfkill) |
diff --git a/drivers/platform/x86/intel_pmic_gpio.c b/drivers/platform/x86/intel_pmic_gpio.c index 930e62762365..61433d492862 100644 --- a/drivers/platform/x86/intel_pmic_gpio.c +++ b/drivers/platform/x86/intel_pmic_gpio.c | |||
@@ -60,69 +60,20 @@ enum pmic_gpio_register { | |||
60 | #define GPOSW_DOU 0x08 | 60 | #define GPOSW_DOU 0x08 |
61 | #define GPOSW_RDRV 0x30 | 61 | #define GPOSW_RDRV 0x30 |
62 | 62 | ||
63 | #define GPIO_UPDATE_TYPE 0x80000000 | ||
63 | 64 | ||
64 | #define NUM_GPIO 24 | 65 | #define NUM_GPIO 24 |
65 | 66 | ||
66 | struct pmic_gpio_irq { | ||
67 | spinlock_t lock; | ||
68 | u32 trigger[NUM_GPIO]; | ||
69 | u32 dirty; | ||
70 | struct work_struct work; | ||
71 | }; | ||
72 | |||
73 | |||
74 | struct pmic_gpio { | 67 | struct pmic_gpio { |
68 | struct mutex buslock; | ||
75 | struct gpio_chip chip; | 69 | struct gpio_chip chip; |
76 | struct pmic_gpio_irq irqtypes; | ||
77 | void *gpiointr; | 70 | void *gpiointr; |
78 | int irq; | 71 | int irq; |
79 | unsigned irq_base; | 72 | unsigned irq_base; |
73 | unsigned int update_type; | ||
74 | u32 trigger_type; | ||
80 | }; | 75 | }; |
81 | 76 | ||
82 | static void pmic_program_irqtype(int gpio, int type) | ||
83 | { | ||
84 | if (type & IRQ_TYPE_EDGE_RISING) | ||
85 | intel_scu_ipc_update_register(GPIO0 + gpio, 0x20, 0x20); | ||
86 | else | ||
87 | intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x20); | ||
88 | |||
89 | if (type & IRQ_TYPE_EDGE_FALLING) | ||
90 | intel_scu_ipc_update_register(GPIO0 + gpio, 0x10, 0x10); | ||
91 | else | ||
92 | intel_scu_ipc_update_register(GPIO0 + gpio, 0x00, 0x10); | ||
93 | }; | ||
94 | |||
95 | static void pmic_irqtype_work(struct work_struct *work) | ||
96 | { | ||
97 | struct pmic_gpio_irq *t = | ||
98 | container_of(work, struct pmic_gpio_irq, work); | ||
99 | unsigned long flags; | ||
100 | int i; | ||
101 | u16 type; | ||
102 | |||
103 | spin_lock_irqsave(&t->lock, flags); | ||
104 | /* As we drop the lock, we may need multiple scans if we race the | ||
105 | pmic_irq_type function */ | ||
106 | while (t->dirty) { | ||
107 | /* | ||
108 | * For each pin that has the dirty bit set send an IPC | ||
109 | * message to configure the hardware via the PMIC | ||
110 | */ | ||
111 | for (i = 0; i < NUM_GPIO; i++) { | ||
112 | if (!(t->dirty & (1 << i))) | ||
113 | continue; | ||
114 | t->dirty &= ~(1 << i); | ||
115 | /* We can't trust the array entry or dirty | ||
116 | once the lock is dropped */ | ||
117 | type = t->trigger[i]; | ||
118 | spin_unlock_irqrestore(&t->lock, flags); | ||
119 | pmic_program_irqtype(i, type); | ||
120 | spin_lock_irqsave(&t->lock, flags); | ||
121 | } | ||
122 | } | ||
123 | spin_unlock_irqrestore(&t->lock, flags); | ||
124 | } | ||
125 | |||
126 | static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset) | 77 | static int pmic_gpio_direction_input(struct gpio_chip *chip, unsigned offset) |
127 | { | 78 | { |
128 | if (offset > 8) { | 79 | if (offset > 8) { |
@@ -190,25 +141,24 @@ static void pmic_gpio_set(struct gpio_chip *chip, unsigned offset, int value) | |||
190 | 1 << (offset - 16)); | 141 | 1 << (offset - 16)); |
191 | } | 142 | } |
192 | 143 | ||
193 | static int pmic_irq_type(unsigned irq, unsigned type) | 144 | /* |
145 | * This is called from genirq with pg->buslock locked and | ||
146 | * irq_desc->lock held. We can not access the scu bus here, so we | ||
147 | * store the change and update in the bus_sync_unlock() function below | ||
148 | */ | ||
149 | static int pmic_irq_type(struct irq_data *data, unsigned type) | ||
194 | { | 150 | { |
195 | struct pmic_gpio *pg = get_irq_chip_data(irq); | 151 | struct pmic_gpio *pg = irq_data_get_irq_chip_data(data); |
196 | u32 gpio = irq - pg->irq_base; | 152 | u32 gpio = data->irq - pg->irq_base; |
197 | unsigned long flags; | ||
198 | 153 | ||
199 | if (gpio >= pg->chip.ngpio) | 154 | if (gpio >= pg->chip.ngpio) |
200 | return -EINVAL; | 155 | return -EINVAL; |
201 | 156 | ||
202 | spin_lock_irqsave(&pg->irqtypes.lock, flags); | 157 | pg->trigger_type = type; |
203 | pg->irqtypes.trigger[gpio] = type; | 158 | pg->update_type = gpio | GPIO_UPDATE_TYPE; |
204 | pg->irqtypes.dirty |= (1 << gpio); | ||
205 | spin_unlock_irqrestore(&pg->irqtypes.lock, flags); | ||
206 | schedule_work(&pg->irqtypes.work); | ||
207 | return 0; | 159 | return 0; |
208 | } | 160 | } |
209 | 161 | ||
210 | |||
211 | |||
212 | static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset) | 162 | static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset) |
213 | { | 163 | { |
214 | struct pmic_gpio *pg = container_of(chip, struct pmic_gpio, chip); | 164 | struct pmic_gpio *pg = container_of(chip, struct pmic_gpio, chip); |
@@ -217,38 +167,32 @@ static int pmic_gpio_to_irq(struct gpio_chip *chip, unsigned offset) | |||
217 | } | 167 | } |
218 | 168 | ||
219 | /* the gpiointr register is read-clear, so just do nothing. */ | 169 | /* the gpiointr register is read-clear, so just do nothing. */ |
220 | static void pmic_irq_unmask(unsigned irq) | 170 | static void pmic_irq_unmask(struct irq_data *data) { } |
221 | { | ||
222 | }; | ||
223 | 171 | ||
224 | static void pmic_irq_mask(unsigned irq) | 172 | static void pmic_irq_mask(struct irq_data *data) { } |
225 | { | ||
226 | }; | ||
227 | 173 | ||
228 | static struct irq_chip pmic_irqchip = { | 174 | static struct irq_chip pmic_irqchip = { |
229 | .name = "PMIC-GPIO", | 175 | .name = "PMIC-GPIO", |
230 | .mask = pmic_irq_mask, | 176 | .irq_mask = pmic_irq_mask, |
231 | .unmask = pmic_irq_unmask, | 177 | .irq_unmask = pmic_irq_unmask, |
232 | .set_type = pmic_irq_type, | 178 | .irq_set_type = pmic_irq_type, |
233 | }; | 179 | }; |
234 | 180 | ||
235 | static void pmic_irq_handler(unsigned irq, struct irq_desc *desc) | 181 | static irqreturn_t pmic_irq_handler(int irq, void *data) |
236 | { | 182 | { |
237 | struct pmic_gpio *pg = (struct pmic_gpio *)get_irq_data(irq); | 183 | struct pmic_gpio *pg = data; |
238 | u8 intsts = *((u8 *)pg->gpiointr + 4); | 184 | u8 intsts = *((u8 *)pg->gpiointr + 4); |
239 | int gpio; | 185 | int gpio; |
186 | irqreturn_t ret = IRQ_NONE; | ||
240 | 187 | ||
241 | for (gpio = 0; gpio < 8; gpio++) { | 188 | for (gpio = 0; gpio < 8; gpio++) { |
242 | if (intsts & (1 << gpio)) { | 189 | if (intsts & (1 << gpio)) { |
243 | pr_debug("pmic pin %d triggered\n", gpio); | 190 | pr_debug("pmic pin %d triggered\n", gpio); |
244 | generic_handle_irq(pg->irq_base + gpio); | 191 | generic_handle_irq(pg->irq_base + gpio); |
192 | ret = IRQ_HANDLED; | ||
245 | } | 193 | } |
246 | } | 194 | } |
247 | 195 | return ret; | |
248 | if (desc->chip->irq_eoi) | ||
249 | desc->chip->irq_eoi(irq_get_irq_data(irq)); | ||
250 | else | ||
251 | dev_warn(pg->chip.dev, "missing EOI handler for irq %d\n", irq); | ||
252 | } | 196 | } |
253 | 197 | ||
254 | static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev) | 198 | static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev) |
@@ -297,8 +241,7 @@ static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev) | |||
297 | pg->chip.can_sleep = 1; | 241 | pg->chip.can_sleep = 1; |
298 | pg->chip.dev = dev; | 242 | pg->chip.dev = dev; |
299 | 243 | ||
300 | INIT_WORK(&pg->irqtypes.work, pmic_irqtype_work); | 244 | mutex_init(&pg->buslock); |
301 | spin_lock_init(&pg->irqtypes.lock); | ||
302 | 245 | ||
303 | pg->chip.dev = dev; | 246 | pg->chip.dev = dev; |
304 | retval = gpiochip_add(&pg->chip); | 247 | retval = gpiochip_add(&pg->chip); |
@@ -306,8 +249,13 @@ static int __devinit platform_pmic_gpio_probe(struct platform_device *pdev) | |||
306 | printk(KERN_ERR "%s: Can not add pmic gpio chip.\n", __func__); | 249 | printk(KERN_ERR "%s: Can not add pmic gpio chip.\n", __func__); |
307 | goto err; | 250 | goto err; |
308 | } | 251 | } |
309 | set_irq_data(pg->irq, pg); | 252 | |
310 | set_irq_chained_handler(pg->irq, pmic_irq_handler); | 253 | retval = request_irq(pg->irq, pmic_irq_handler, 0, "pmic", pg); |
254 | if (retval) { | ||
255 | printk(KERN_WARNING "pmic: Interrupt request failed\n"); | ||
256 | goto err; | ||
257 | } | ||
258 | |||
311 | for (i = 0; i < 8; i++) { | 259 | for (i = 0; i < 8; i++) { |
312 | set_irq_chip_and_handler_name(i + pg->irq_base, &pmic_irqchip, | 260 | set_irq_chip_and_handler_name(i + pg->irq_base, &pmic_irqchip, |
313 | handle_simple_irq, "demux"); | 261 | handle_simple_irq, "demux"); |
diff --git a/drivers/platform/x86/tc1100-wmi.c b/drivers/platform/x86/tc1100-wmi.c index 1fe0f1feff71..865ef78d6f1a 100644 --- a/drivers/platform/x86/tc1100-wmi.c +++ b/drivers/platform/x86/tc1100-wmi.c | |||
@@ -162,7 +162,7 @@ set_bool_##value(struct device *dev, struct device_attribute *attr, \ | |||
162 | return -EINVAL; \ | 162 | return -EINVAL; \ |
163 | return count; \ | 163 | return count; \ |
164 | } \ | 164 | } \ |
165 | static DEVICE_ATTR(value, S_IWUGO | S_IRUGO | S_IWUSR, \ | 165 | static DEVICE_ATTR(value, S_IRUGO | S_IWUSR, \ |
166 | show_bool_##value, set_bool_##value); | 166 | show_bool_##value, set_bool_##value); |
167 | 167 | ||
168 | show_set_bool(wireless, TC1100_INSTANCE_WIRELESS); | 168 | show_set_bool(wireless, TC1100_INSTANCE_WIRELESS); |
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index dd599585c6a9..eb9922385ef8 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c | |||
@@ -2275,16 +2275,12 @@ static void tpacpi_input_send_key(const unsigned int scancode) | |||
2275 | if (keycode != KEY_RESERVED) { | 2275 | if (keycode != KEY_RESERVED) { |
2276 | mutex_lock(&tpacpi_inputdev_send_mutex); | 2276 | mutex_lock(&tpacpi_inputdev_send_mutex); |
2277 | 2277 | ||
2278 | input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, scancode); | ||
2278 | input_report_key(tpacpi_inputdev, keycode, 1); | 2279 | input_report_key(tpacpi_inputdev, keycode, 1); |
2279 | if (keycode == KEY_UNKNOWN) | ||
2280 | input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, | ||
2281 | scancode); | ||
2282 | input_sync(tpacpi_inputdev); | 2280 | input_sync(tpacpi_inputdev); |
2283 | 2281 | ||
2282 | input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, scancode); | ||
2284 | input_report_key(tpacpi_inputdev, keycode, 0); | 2283 | input_report_key(tpacpi_inputdev, keycode, 0); |
2285 | if (keycode == KEY_UNKNOWN) | ||
2286 | input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN, | ||
2287 | scancode); | ||
2288 | input_sync(tpacpi_inputdev); | 2284 | input_sync(tpacpi_inputdev); |
2289 | 2285 | ||
2290 | mutex_unlock(&tpacpi_inputdev_send_mutex); | 2286 | mutex_unlock(&tpacpi_inputdev_send_mutex); |
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index cdd97192dc69..4941cade319f 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig | |||
@@ -97,6 +97,18 @@ config RTC_INTF_DEV | |||
97 | 97 | ||
98 | If unsure, say Y. | 98 | If unsure, say Y. |
99 | 99 | ||
100 | config RTC_INTF_DEV_UIE_EMUL | ||
101 | bool "RTC UIE emulation on dev interface" | ||
102 | depends on RTC_INTF_DEV | ||
103 | help | ||
104 | Provides an emulation for RTC_UIE if the underlying rtc chip | ||
105 | driver does not expose RTC_UIE ioctls. Those requests generate | ||
106 | once-per-second update interrupts, used for synchronization. | ||
107 | |||
108 | The emulation code will read the time from the hardware | ||
109 | clock several times per second, please enable this option | ||
110 | only if you know that you really need it. | ||
111 | |||
100 | config RTC_DRV_TEST | 112 | config RTC_DRV_TEST |
101 | tristate "Test driver/device" | 113 | tristate "Test driver/device" |
102 | help | 114 | help |
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index a0c01967244d..cb2f0728fd70 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c | |||
@@ -209,9 +209,8 @@ int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled) | |||
209 | } | 209 | } |
210 | 210 | ||
211 | if (err) | 211 | if (err) |
212 | return err; | 212 | /* nothing */; |
213 | 213 | else if (!rtc->ops) | |
214 | if (!rtc->ops) | ||
215 | err = -ENODEV; | 214 | err = -ENODEV; |
216 | else if (!rtc->ops->alarm_irq_enable) | 215 | else if (!rtc->ops->alarm_irq_enable) |
217 | err = -EINVAL; | 216 | err = -EINVAL; |
@@ -229,6 +228,12 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled) | |||
229 | if (err) | 228 | if (err) |
230 | return err; | 229 | return err; |
231 | 230 | ||
231 | #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL | ||
232 | if (enabled == 0 && rtc->uie_irq_active) { | ||
233 | mutex_unlock(&rtc->ops_lock); | ||
234 | return rtc_dev_update_irq_enable_emul(rtc, 0); | ||
235 | } | ||
236 | #endif | ||
232 | /* make sure we're changing state */ | 237 | /* make sure we're changing state */ |
233 | if (rtc->uie_rtctimer.enabled == enabled) | 238 | if (rtc->uie_rtctimer.enabled == enabled) |
234 | goto out; | 239 | goto out; |
@@ -248,6 +253,16 @@ int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled) | |||
248 | 253 | ||
249 | out: | 254 | out: |
250 | mutex_unlock(&rtc->ops_lock); | 255 | mutex_unlock(&rtc->ops_lock); |
256 | #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL | ||
257 | /* | ||
258 | * Enable emulation if the driver did not provide | ||
259 | * the update_irq_enable function pointer or if returned | ||
260 | * -EINVAL to signal that it has been configured without | ||
261 | * interrupts or that are not available at the moment. | ||
262 | */ | ||
263 | if (err == -EINVAL) | ||
264 | err = rtc_dev_update_irq_enable_emul(rtc, enabled); | ||
265 | #endif | ||
251 | return err; | 266 | return err; |
252 | 267 | ||
253 | } | 268 | } |
@@ -263,7 +278,7 @@ EXPORT_SYMBOL_GPL(rtc_update_irq_enable); | |||
263 | * | 278 | * |
264 | * Triggers the registered irq_task function callback. | 279 | * Triggers the registered irq_task function callback. |
265 | */ | 280 | */ |
266 | static void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode) | 281 | void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode) |
267 | { | 282 | { |
268 | unsigned long flags; | 283 | unsigned long flags; |
269 | 284 | ||
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c index 37c3cc1b3dd5..d0e06edb14c5 100644 --- a/drivers/rtc/rtc-dev.c +++ b/drivers/rtc/rtc-dev.c | |||
@@ -46,6 +46,105 @@ static int rtc_dev_open(struct inode *inode, struct file *file) | |||
46 | return err; | 46 | return err; |
47 | } | 47 | } |
48 | 48 | ||
49 | #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL | ||
50 | /* | ||
51 | * Routine to poll RTC seconds field for change as often as possible, | ||
52 | * after first RTC_UIE use timer to reduce polling | ||
53 | */ | ||
54 | static void rtc_uie_task(struct work_struct *work) | ||
55 | { | ||
56 | struct rtc_device *rtc = | ||
57 | container_of(work, struct rtc_device, uie_task); | ||
58 | struct rtc_time tm; | ||
59 | int num = 0; | ||
60 | int err; | ||
61 | |||
62 | err = rtc_read_time(rtc, &tm); | ||
63 | |||
64 | spin_lock_irq(&rtc->irq_lock); | ||
65 | if (rtc->stop_uie_polling || err) { | ||
66 | rtc->uie_task_active = 0; | ||
67 | } else if (rtc->oldsecs != tm.tm_sec) { | ||
68 | num = (tm.tm_sec + 60 - rtc->oldsecs) % 60; | ||
69 | rtc->oldsecs = tm.tm_sec; | ||
70 | rtc->uie_timer.expires = jiffies + HZ - (HZ/10); | ||
71 | rtc->uie_timer_active = 1; | ||
72 | rtc->uie_task_active = 0; | ||
73 | add_timer(&rtc->uie_timer); | ||
74 | } else if (schedule_work(&rtc->uie_task) == 0) { | ||
75 | rtc->uie_task_active = 0; | ||
76 | } | ||
77 | spin_unlock_irq(&rtc->irq_lock); | ||
78 | if (num) | ||
79 | rtc_handle_legacy_irq(rtc, num, RTC_UF); | ||
80 | } | ||
81 | static void rtc_uie_timer(unsigned long data) | ||
82 | { | ||
83 | struct rtc_device *rtc = (struct rtc_device *)data; | ||
84 | unsigned long flags; | ||
85 | |||
86 | spin_lock_irqsave(&rtc->irq_lock, flags); | ||
87 | rtc->uie_timer_active = 0; | ||
88 | rtc->uie_task_active = 1; | ||
89 | if ((schedule_work(&rtc->uie_task) == 0)) | ||
90 | rtc->uie_task_active = 0; | ||
91 | spin_unlock_irqrestore(&rtc->irq_lock, flags); | ||
92 | } | ||
93 | |||
94 | static int clear_uie(struct rtc_device *rtc) | ||
95 | { | ||
96 | spin_lock_irq(&rtc->irq_lock); | ||
97 | if (rtc->uie_irq_active) { | ||
98 | rtc->stop_uie_polling = 1; | ||
99 | if (rtc->uie_timer_active) { | ||
100 | spin_unlock_irq(&rtc->irq_lock); | ||
101 | del_timer_sync(&rtc->uie_timer); | ||
102 | spin_lock_irq(&rtc->irq_lock); | ||
103 | rtc->uie_timer_active = 0; | ||
104 | } | ||
105 | if (rtc->uie_task_active) { | ||
106 | spin_unlock_irq(&rtc->irq_lock); | ||
107 | flush_scheduled_work(); | ||
108 | spin_lock_irq(&rtc->irq_lock); | ||
109 | } | ||
110 | rtc->uie_irq_active = 0; | ||
111 | } | ||
112 | spin_unlock_irq(&rtc->irq_lock); | ||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | static int set_uie(struct rtc_device *rtc) | ||
117 | { | ||
118 | struct rtc_time tm; | ||
119 | int err; | ||
120 | |||
121 | err = rtc_read_time(rtc, &tm); | ||
122 | if (err) | ||
123 | return err; | ||
124 | spin_lock_irq(&rtc->irq_lock); | ||
125 | if (!rtc->uie_irq_active) { | ||
126 | rtc->uie_irq_active = 1; | ||
127 | rtc->stop_uie_polling = 0; | ||
128 | rtc->oldsecs = tm.tm_sec; | ||
129 | rtc->uie_task_active = 1; | ||
130 | if (schedule_work(&rtc->uie_task) == 0) | ||
131 | rtc->uie_task_active = 0; | ||
132 | } | ||
133 | rtc->irq_data = 0; | ||
134 | spin_unlock_irq(&rtc->irq_lock); | ||
135 | return 0; | ||
136 | } | ||
137 | |||
138 | int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, unsigned int enabled) | ||
139 | { | ||
140 | if (enabled) | ||
141 | return set_uie(rtc); | ||
142 | else | ||
143 | return clear_uie(rtc); | ||
144 | } | ||
145 | EXPORT_SYMBOL(rtc_dev_update_irq_enable_emul); | ||
146 | |||
147 | #endif /* CONFIG_RTC_INTF_DEV_UIE_EMUL */ | ||
49 | 148 | ||
50 | static ssize_t | 149 | static ssize_t |
51 | rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) | 150 | rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) |
@@ -387,6 +486,11 @@ void rtc_dev_prepare(struct rtc_device *rtc) | |||
387 | 486 | ||
388 | rtc->dev.devt = MKDEV(MAJOR(rtc_devt), rtc->id); | 487 | rtc->dev.devt = MKDEV(MAJOR(rtc_devt), rtc->id); |
389 | 488 | ||
489 | #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL | ||
490 | INIT_WORK(&rtc->uie_task, rtc_uie_task); | ||
491 | setup_timer(&rtc->uie_timer, rtc_uie_timer, (unsigned long)rtc); | ||
492 | #endif | ||
493 | |||
390 | cdev_init(&rtc->char_dev, &rtc_dev_fops); | 494 | cdev_init(&rtc->char_dev, &rtc_dev_fops); |
391 | rtc->char_dev.owner = rtc->owner; | 495 | rtc->char_dev.owner = rtc->owner; |
392 | } | 496 | } |
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 318672d05563..a9fe23d5bd0f 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -72,7 +72,7 @@ static struct dasd_discipline dasd_eckd_discipline; | |||
72 | static struct ccw_device_id dasd_eckd_ids[] = { | 72 | static struct ccw_device_id dasd_eckd_ids[] = { |
73 | { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1}, | 73 | { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1}, |
74 | { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2}, | 74 | { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2}, |
75 | { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), .driver_info = 0x3}, | 75 | { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3380, 0), .driver_info = 0x3}, |
76 | { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4}, | 76 | { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4}, |
77 | { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5}, | 77 | { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5}, |
78 | { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6}, | 78 | { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6}, |
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index 44578b56ad0a..d3e58d763b43 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c | |||
@@ -1561,6 +1561,7 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport) | |||
1561 | { | 1561 | { |
1562 | struct Scsi_Host *host = rport_to_shost(rport); | 1562 | struct Scsi_Host *host = rport_to_shost(rport); |
1563 | fc_port_t *fcport = *(fc_port_t **)rport->dd_data; | 1563 | fc_port_t *fcport = *(fc_port_t **)rport->dd_data; |
1564 | unsigned long flags; | ||
1564 | 1565 | ||
1565 | if (!fcport) | 1566 | if (!fcport) |
1566 | return; | 1567 | return; |
@@ -1573,10 +1574,10 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport) | |||
1573 | * Transport has effectively 'deleted' the rport, clear | 1574 | * Transport has effectively 'deleted' the rport, clear |
1574 | * all local references. | 1575 | * all local references. |
1575 | */ | 1576 | */ |
1576 | spin_lock_irq(host->host_lock); | 1577 | spin_lock_irqsave(host->host_lock, flags); |
1577 | fcport->rport = fcport->drport = NULL; | 1578 | fcport->rport = fcport->drport = NULL; |
1578 | *((fc_port_t **)rport->dd_data) = NULL; | 1579 | *((fc_port_t **)rport->dd_data) = NULL; |
1579 | spin_unlock_irq(host->host_lock); | 1580 | spin_unlock_irqrestore(host->host_lock, flags); |
1580 | 1581 | ||
1581 | if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) | 1582 | if (test_bit(ABORT_ISP_ACTIVE, &fcport->vha->dpc_flags)) |
1582 | return; | 1583 | return; |
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index f948e1a73aec..d9479c3fe5f8 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c | |||
@@ -2505,11 +2505,12 @@ qla2x00_rport_del(void *data) | |||
2505 | { | 2505 | { |
2506 | fc_port_t *fcport = data; | 2506 | fc_port_t *fcport = data; |
2507 | struct fc_rport *rport; | 2507 | struct fc_rport *rport; |
2508 | unsigned long flags; | ||
2508 | 2509 | ||
2509 | spin_lock_irq(fcport->vha->host->host_lock); | 2510 | spin_lock_irqsave(fcport->vha->host->host_lock, flags); |
2510 | rport = fcport->drport ? fcport->drport: fcport->rport; | 2511 | rport = fcport->drport ? fcport->drport: fcport->rport; |
2511 | fcport->drport = NULL; | 2512 | fcport->drport = NULL; |
2512 | spin_unlock_irq(fcport->vha->host->host_lock); | 2513 | spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); |
2513 | if (rport) | 2514 | if (rport) |
2514 | fc_remote_port_delete(rport); | 2515 | fc_remote_port_delete(rport); |
2515 | } | 2516 | } |
@@ -2879,6 +2880,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) | |||
2879 | struct fc_rport_identifiers rport_ids; | 2880 | struct fc_rport_identifiers rport_ids; |
2880 | struct fc_rport *rport; | 2881 | struct fc_rport *rport; |
2881 | struct qla_hw_data *ha = vha->hw; | 2882 | struct qla_hw_data *ha = vha->hw; |
2883 | unsigned long flags; | ||
2882 | 2884 | ||
2883 | qla2x00_rport_del(fcport); | 2885 | qla2x00_rport_del(fcport); |
2884 | 2886 | ||
@@ -2893,9 +2895,9 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport) | |||
2893 | "Unable to allocate fc remote port!\n"); | 2895 | "Unable to allocate fc remote port!\n"); |
2894 | return; | 2896 | return; |
2895 | } | 2897 | } |
2896 | spin_lock_irq(fcport->vha->host->host_lock); | 2898 | spin_lock_irqsave(fcport->vha->host->host_lock, flags); |
2897 | *((fc_port_t **)rport->dd_data) = fcport; | 2899 | *((fc_port_t **)rport->dd_data) = fcport; |
2898 | spin_unlock_irq(fcport->vha->host->host_lock); | 2900 | spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); |
2899 | 2901 | ||
2900 | rport->supported_classes = fcport->supported_classes; | 2902 | rport->supported_classes = fcport->supported_classes; |
2901 | 2903 | ||
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index c194c23ca1fb..f27724d76cf6 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c | |||
@@ -562,7 +562,6 @@ qla2xxx_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *) | |||
562 | } | 562 | } |
563 | if (atomic_read(&fcport->state) != FCS_ONLINE) { | 563 | if (atomic_read(&fcport->state) != FCS_ONLINE) { |
564 | if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || | 564 | if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD || |
565 | atomic_read(&fcport->state) == FCS_DEVICE_LOST || | ||
566 | atomic_read(&base_vha->loop_state) == LOOP_DEAD) { | 565 | atomic_read(&base_vha->loop_state) == LOOP_DEAD) { |
567 | cmd->result = DID_NO_CONNECT << 16; | 566 | cmd->result = DID_NO_CONNECT << 16; |
568 | goto qc24_fail_command; | 567 | goto qc24_fail_command; |
@@ -2513,6 +2512,7 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport, | |||
2513 | { | 2512 | { |
2514 | struct fc_rport *rport; | 2513 | struct fc_rport *rport; |
2515 | scsi_qla_host_t *base_vha; | 2514 | scsi_qla_host_t *base_vha; |
2515 | unsigned long flags; | ||
2516 | 2516 | ||
2517 | if (!fcport->rport) | 2517 | if (!fcport->rport) |
2518 | return; | 2518 | return; |
@@ -2520,9 +2520,9 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport, | |||
2520 | rport = fcport->rport; | 2520 | rport = fcport->rport; |
2521 | if (defer) { | 2521 | if (defer) { |
2522 | base_vha = pci_get_drvdata(vha->hw->pdev); | 2522 | base_vha = pci_get_drvdata(vha->hw->pdev); |
2523 | spin_lock_irq(vha->host->host_lock); | 2523 | spin_lock_irqsave(vha->host->host_lock, flags); |
2524 | fcport->drport = rport; | 2524 | fcport->drport = rport; |
2525 | spin_unlock_irq(vha->host->host_lock); | 2525 | spin_unlock_irqrestore(vha->host->host_lock, flags); |
2526 | set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); | 2526 | set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); |
2527 | qla2xxx_wake_dpc(base_vha); | 2527 | qla2xxx_wake_dpc(base_vha); |
2528 | } else | 2528 | } else |
@@ -3282,10 +3282,10 @@ qla2x00_do_dpc(void *data) | |||
3282 | 3282 | ||
3283 | set_user_nice(current, -20); | 3283 | set_user_nice(current, -20); |
3284 | 3284 | ||
3285 | set_current_state(TASK_INTERRUPTIBLE); | ||
3285 | while (!kthread_should_stop()) { | 3286 | while (!kthread_should_stop()) { |
3286 | DEBUG3(printk("qla2x00: DPC handler sleeping\n")); | 3287 | DEBUG3(printk("qla2x00: DPC handler sleeping\n")); |
3287 | 3288 | ||
3288 | set_current_state(TASK_INTERRUPTIBLE); | ||
3289 | schedule(); | 3289 | schedule(); |
3290 | __set_current_state(TASK_RUNNING); | 3290 | __set_current_state(TASK_RUNNING); |
3291 | 3291 | ||
@@ -3454,7 +3454,9 @@ qla2x00_do_dpc(void *data) | |||
3454 | qla2x00_do_dpc_all_vps(base_vha); | 3454 | qla2x00_do_dpc_all_vps(base_vha); |
3455 | 3455 | ||
3456 | ha->dpc_active = 0; | 3456 | ha->dpc_active = 0; |
3457 | set_current_state(TASK_INTERRUPTIBLE); | ||
3457 | } /* End of while(1) */ | 3458 | } /* End of while(1) */ |
3459 | __set_current_state(TASK_RUNNING); | ||
3458 | 3460 | ||
3459 | DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no)); | 3461 | DEBUG(printk("scsi(%ld): DPC handler exiting\n", base_vha->host_no)); |
3460 | 3462 | ||
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 7b310934efed..a6b2d72022fc 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c | |||
@@ -1671,7 +1671,7 @@ static int do_device_access(struct scsi_cmnd *scmd, | |||
1671 | unsigned long long lba, unsigned int num, int write) | 1671 | unsigned long long lba, unsigned int num, int write) |
1672 | { | 1672 | { |
1673 | int ret; | 1673 | int ret; |
1674 | unsigned int block, rest = 0; | 1674 | unsigned long long block, rest = 0; |
1675 | int (*func)(struct scsi_cmnd *, unsigned char *, int); | 1675 | int (*func)(struct scsi_cmnd *, unsigned char *, int); |
1676 | 1676 | ||
1677 | func = write ? fetch_to_dev_buffer : fill_from_dev_buffer; | 1677 | func = write ? fetch_to_dev_buffer : fill_from_dev_buffer; |
diff --git a/drivers/spi/pxa2xx_spi_pci.c b/drivers/spi/pxa2xx_spi_pci.c index 351d8a375b57..19752b09e155 100644 --- a/drivers/spi/pxa2xx_spi_pci.c +++ b/drivers/spi/pxa2xx_spi_pci.c | |||
@@ -7,10 +7,9 @@ | |||
7 | #include <linux/of_device.h> | 7 | #include <linux/of_device.h> |
8 | #include <linux/spi/pxa2xx_spi.h> | 8 | #include <linux/spi/pxa2xx_spi.h> |
9 | 9 | ||
10 | struct awesome_struct { | 10 | struct ce4100_info { |
11 | struct ssp_device ssp; | 11 | struct ssp_device ssp; |
12 | struct platform_device spi_pdev; | 12 | struct platform_device *spi_pdev; |
13 | struct pxa2xx_spi_master spi_pdata; | ||
14 | }; | 13 | }; |
15 | 14 | ||
16 | static DEFINE_MUTEX(ssp_lock); | 15 | static DEFINE_MUTEX(ssp_lock); |
@@ -51,23 +50,15 @@ void pxa_ssp_free(struct ssp_device *ssp) | |||
51 | } | 50 | } |
52 | EXPORT_SYMBOL_GPL(pxa_ssp_free); | 51 | EXPORT_SYMBOL_GPL(pxa_ssp_free); |
53 | 52 | ||
54 | static void plat_dev_release(struct device *dev) | ||
55 | { | ||
56 | struct awesome_struct *as = container_of(dev, | ||
57 | struct awesome_struct, spi_pdev.dev); | ||
58 | |||
59 | of_device_node_put(&as->spi_pdev.dev); | ||
60 | } | ||
61 | |||
62 | static int __devinit ce4100_spi_probe(struct pci_dev *dev, | 53 | static int __devinit ce4100_spi_probe(struct pci_dev *dev, |
63 | const struct pci_device_id *ent) | 54 | const struct pci_device_id *ent) |
64 | { | 55 | { |
65 | int ret; | 56 | int ret; |
66 | resource_size_t phys_beg; | 57 | resource_size_t phys_beg; |
67 | resource_size_t phys_len; | 58 | resource_size_t phys_len; |
68 | struct awesome_struct *spi_info; | 59 | struct ce4100_info *spi_info; |
69 | struct platform_device *pdev; | 60 | struct platform_device *pdev; |
70 | struct pxa2xx_spi_master *spi_pdata; | 61 | struct pxa2xx_spi_master spi_pdata; |
71 | struct ssp_device *ssp; | 62 | struct ssp_device *ssp; |
72 | 63 | ||
73 | ret = pci_enable_device(dev); | 64 | ret = pci_enable_device(dev); |
@@ -84,33 +75,30 @@ static int __devinit ce4100_spi_probe(struct pci_dev *dev, | |||
84 | return ret; | 75 | return ret; |
85 | } | 76 | } |
86 | 77 | ||
78 | pdev = platform_device_alloc("pxa2xx-spi", dev->devfn); | ||
87 | spi_info = kzalloc(sizeof(*spi_info), GFP_KERNEL); | 79 | spi_info = kzalloc(sizeof(*spi_info), GFP_KERNEL); |
88 | if (!spi_info) { | 80 | if (!pdev || !spi_info ) { |
89 | ret = -ENOMEM; | 81 | ret = -ENOMEM; |
90 | goto err_kz; | 82 | goto err_nomem; |
91 | } | 83 | } |
92 | ssp = &spi_info->ssp; | 84 | memset(&spi_pdata, 0, sizeof(spi_pdata)); |
93 | pdev = &spi_info->spi_pdev; | 85 | spi_pdata.num_chipselect = dev->devfn; |
94 | spi_pdata = &spi_info->spi_pdata; | ||
95 | 86 | ||
96 | pdev->name = "pxa2xx-spi"; | 87 | ret = platform_device_add_data(pdev, &spi_pdata, sizeof(spi_pdata)); |
97 | pdev->id = dev->devfn; | 88 | if (ret) |
98 | pdev->dev.parent = &dev->dev; | 89 | goto err_nomem; |
99 | pdev->dev.platform_data = &spi_info->spi_pdata; | ||
100 | 90 | ||
91 | pdev->dev.parent = &dev->dev; | ||
101 | #ifdef CONFIG_OF | 92 | #ifdef CONFIG_OF |
102 | pdev->dev.of_node = dev->dev.of_node; | 93 | pdev->dev.of_node = dev->dev.of_node; |
103 | #endif | 94 | #endif |
104 | pdev->dev.release = plat_dev_release; | 95 | ssp = &spi_info->ssp; |
105 | |||
106 | spi_pdata->num_chipselect = dev->devfn; | ||
107 | |||
108 | ssp->phys_base = pci_resource_start(dev, 0); | 96 | ssp->phys_base = pci_resource_start(dev, 0); |
109 | ssp->mmio_base = ioremap(phys_beg, phys_len); | 97 | ssp->mmio_base = ioremap(phys_beg, phys_len); |
110 | if (!ssp->mmio_base) { | 98 | if (!ssp->mmio_base) { |
111 | dev_err(&pdev->dev, "failed to ioremap() registers\n"); | 99 | dev_err(&pdev->dev, "failed to ioremap() registers\n"); |
112 | ret = -EIO; | 100 | ret = -EIO; |
113 | goto err_remap; | 101 | goto err_nomem; |
114 | } | 102 | } |
115 | ssp->irq = dev->irq; | 103 | ssp->irq = dev->irq; |
116 | ssp->port_id = pdev->id; | 104 | ssp->port_id = pdev->id; |
@@ -122,7 +110,7 @@ static int __devinit ce4100_spi_probe(struct pci_dev *dev, | |||
122 | 110 | ||
123 | pci_set_drvdata(dev, spi_info); | 111 | pci_set_drvdata(dev, spi_info); |
124 | 112 | ||
125 | ret = platform_device_register(pdev); | 113 | ret = platform_device_add(pdev); |
126 | if (ret) | 114 | if (ret) |
127 | goto err_dev_add; | 115 | goto err_dev_add; |
128 | 116 | ||
@@ -135,27 +123,21 @@ err_dev_add: | |||
135 | mutex_unlock(&ssp_lock); | 123 | mutex_unlock(&ssp_lock); |
136 | iounmap(ssp->mmio_base); | 124 | iounmap(ssp->mmio_base); |
137 | 125 | ||
138 | err_remap: | 126 | err_nomem: |
139 | kfree(spi_info); | ||
140 | |||
141 | err_kz: | ||
142 | release_mem_region(phys_beg, phys_len); | 127 | release_mem_region(phys_beg, phys_len); |
143 | 128 | platform_device_put(pdev); | |
129 | kfree(spi_info); | ||
144 | return ret; | 130 | return ret; |
145 | } | 131 | } |
146 | 132 | ||
147 | static void __devexit ce4100_spi_remove(struct pci_dev *dev) | 133 | static void __devexit ce4100_spi_remove(struct pci_dev *dev) |
148 | { | 134 | { |
149 | struct awesome_struct *spi_info; | 135 | struct ce4100_info *spi_info; |
150 | struct platform_device *pdev; | ||
151 | struct ssp_device *ssp; | 136 | struct ssp_device *ssp; |
152 | 137 | ||
153 | spi_info = pci_get_drvdata(dev); | 138 | spi_info = pci_get_drvdata(dev); |
154 | |||
155 | ssp = &spi_info->ssp; | 139 | ssp = &spi_info->ssp; |
156 | pdev = &spi_info->spi_pdev; | 140 | platform_device_unregister(spi_info->spi_pdev); |
157 | |||
158 | platform_device_unregister(pdev); | ||
159 | 141 | ||
160 | iounmap(ssp->mmio_base); | 142 | iounmap(ssp->mmio_base); |
161 | release_mem_region(pci_resource_start(dev, 0), | 143 | release_mem_region(pci_resource_start(dev, 0), |
@@ -171,7 +153,6 @@ static void __devexit ce4100_spi_remove(struct pci_dev *dev) | |||
171 | } | 153 | } |
172 | 154 | ||
173 | static struct pci_device_id ce4100_spi_devices[] __devinitdata = { | 155 | static struct pci_device_id ce4100_spi_devices[] __devinitdata = { |
174 | |||
175 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e6a) }, | 156 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e6a) }, |
176 | { }, | 157 | { }, |
177 | }; | 158 | }; |
diff --git a/drivers/target/Makefile b/drivers/target/Makefile index 5cfd70819f08..973bb190ef57 100644 --- a/drivers/target/Makefile +++ b/drivers/target/Makefile | |||
@@ -13,8 +13,7 @@ target_core_mod-y := target_core_configfs.o \ | |||
13 | target_core_transport.o \ | 13 | target_core_transport.o \ |
14 | target_core_cdb.o \ | 14 | target_core_cdb.o \ |
15 | target_core_ua.o \ | 15 | target_core_ua.o \ |
16 | target_core_rd.o \ | 16 | target_core_rd.o |
17 | target_core_mib.o | ||
18 | 17 | ||
19 | obj-$(CONFIG_TARGET_CORE) += target_core_mod.o | 18 | obj-$(CONFIG_TARGET_CORE) += target_core_mod.o |
20 | 19 | ||
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 2764510798b0..caf8dc18ee0a 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c | |||
@@ -37,7 +37,6 @@ | |||
37 | #include <linux/parser.h> | 37 | #include <linux/parser.h> |
38 | #include <linux/syscalls.h> | 38 | #include <linux/syscalls.h> |
39 | #include <linux/configfs.h> | 39 | #include <linux/configfs.h> |
40 | #include <linux/proc_fs.h> | ||
41 | 40 | ||
42 | #include <target/target_core_base.h> | 41 | #include <target/target_core_base.h> |
43 | #include <target/target_core_device.h> | 42 | #include <target/target_core_device.h> |
@@ -1971,13 +1970,35 @@ static void target_core_dev_release(struct config_item *item) | |||
1971 | { | 1970 | { |
1972 | struct se_subsystem_dev *se_dev = container_of(to_config_group(item), | 1971 | struct se_subsystem_dev *se_dev = container_of(to_config_group(item), |
1973 | struct se_subsystem_dev, se_dev_group); | 1972 | struct se_subsystem_dev, se_dev_group); |
1974 | struct config_group *dev_cg; | 1973 | struct se_hba *hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item); |
1975 | 1974 | struct se_subsystem_api *t = hba->transport; | |
1976 | if (!(se_dev)) | 1975 | struct config_group *dev_cg = &se_dev->se_dev_group; |
1977 | return; | ||
1978 | 1976 | ||
1979 | dev_cg = &se_dev->se_dev_group; | ||
1980 | kfree(dev_cg->default_groups); | 1977 | kfree(dev_cg->default_groups); |
1978 | /* | ||
1979 | * This pointer will set when the storage is enabled with: | ||
1980 | *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable` | ||
1981 | */ | ||
1982 | if (se_dev->se_dev_ptr) { | ||
1983 | printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_" | ||
1984 | "virtual_device() for se_dev_ptr: %p\n", | ||
1985 | se_dev->se_dev_ptr); | ||
1986 | |||
1987 | se_free_virtual_device(se_dev->se_dev_ptr, hba); | ||
1988 | } else { | ||
1989 | /* | ||
1990 | * Release struct se_subsystem_dev->se_dev_su_ptr.. | ||
1991 | */ | ||
1992 | printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_" | ||
1993 | "device() for se_dev_su_ptr: %p\n", | ||
1994 | se_dev->se_dev_su_ptr); | ||
1995 | |||
1996 | t->free_device(se_dev->se_dev_su_ptr); | ||
1997 | } | ||
1998 | |||
1999 | printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem" | ||
2000 | "_dev_t: %p\n", se_dev); | ||
2001 | kfree(se_dev); | ||
1981 | } | 2002 | } |
1982 | 2003 | ||
1983 | static ssize_t target_core_dev_show(struct config_item *item, | 2004 | static ssize_t target_core_dev_show(struct config_item *item, |
@@ -2140,7 +2161,16 @@ static struct configfs_attribute *target_core_alua_lu_gp_attrs[] = { | |||
2140 | NULL, | 2161 | NULL, |
2141 | }; | 2162 | }; |
2142 | 2163 | ||
2164 | static void target_core_alua_lu_gp_release(struct config_item *item) | ||
2165 | { | ||
2166 | struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item), | ||
2167 | struct t10_alua_lu_gp, lu_gp_group); | ||
2168 | |||
2169 | core_alua_free_lu_gp(lu_gp); | ||
2170 | } | ||
2171 | |||
2143 | static struct configfs_item_operations target_core_alua_lu_gp_ops = { | 2172 | static struct configfs_item_operations target_core_alua_lu_gp_ops = { |
2173 | .release = target_core_alua_lu_gp_release, | ||
2144 | .show_attribute = target_core_alua_lu_gp_attr_show, | 2174 | .show_attribute = target_core_alua_lu_gp_attr_show, |
2145 | .store_attribute = target_core_alua_lu_gp_attr_store, | 2175 | .store_attribute = target_core_alua_lu_gp_attr_store, |
2146 | }; | 2176 | }; |
@@ -2191,9 +2221,11 @@ static void target_core_alua_drop_lu_gp( | |||
2191 | printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit" | 2221 | printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit" |
2192 | " Group: core/alua/lu_gps/%s, ID: %hu\n", | 2222 | " Group: core/alua/lu_gps/%s, ID: %hu\n", |
2193 | config_item_name(item), lu_gp->lu_gp_id); | 2223 | config_item_name(item), lu_gp->lu_gp_id); |
2194 | 2224 | /* | |
2225 | * core_alua_free_lu_gp() is called from target_core_alua_lu_gp_ops->release() | ||
2226 | * -> target_core_alua_lu_gp_release() | ||
2227 | */ | ||
2195 | config_item_put(item); | 2228 | config_item_put(item); |
2196 | core_alua_free_lu_gp(lu_gp); | ||
2197 | } | 2229 | } |
2198 | 2230 | ||
2199 | static struct configfs_group_operations target_core_alua_lu_gps_group_ops = { | 2231 | static struct configfs_group_operations target_core_alua_lu_gps_group_ops = { |
@@ -2549,7 +2581,16 @@ static struct configfs_attribute *target_core_alua_tg_pt_gp_attrs[] = { | |||
2549 | NULL, | 2581 | NULL, |
2550 | }; | 2582 | }; |
2551 | 2583 | ||
2584 | static void target_core_alua_tg_pt_gp_release(struct config_item *item) | ||
2585 | { | ||
2586 | struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item), | ||
2587 | struct t10_alua_tg_pt_gp, tg_pt_gp_group); | ||
2588 | |||
2589 | core_alua_free_tg_pt_gp(tg_pt_gp); | ||
2590 | } | ||
2591 | |||
2552 | static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = { | 2592 | static struct configfs_item_operations target_core_alua_tg_pt_gp_ops = { |
2593 | .release = target_core_alua_tg_pt_gp_release, | ||
2553 | .show_attribute = target_core_alua_tg_pt_gp_attr_show, | 2594 | .show_attribute = target_core_alua_tg_pt_gp_attr_show, |
2554 | .store_attribute = target_core_alua_tg_pt_gp_attr_store, | 2595 | .store_attribute = target_core_alua_tg_pt_gp_attr_store, |
2555 | }; | 2596 | }; |
@@ -2602,9 +2643,11 @@ static void target_core_alua_drop_tg_pt_gp( | |||
2602 | printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port" | 2643 | printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port" |
2603 | " Group: alua/tg_pt_gps/%s, ID: %hu\n", | 2644 | " Group: alua/tg_pt_gps/%s, ID: %hu\n", |
2604 | config_item_name(item), tg_pt_gp->tg_pt_gp_id); | 2645 | config_item_name(item), tg_pt_gp->tg_pt_gp_id); |
2605 | 2646 | /* | |
2647 | * core_alua_free_tg_pt_gp() is called from target_core_alua_tg_pt_gp_ops->release() | ||
2648 | * -> target_core_alua_tg_pt_gp_release(). | ||
2649 | */ | ||
2606 | config_item_put(item); | 2650 | config_item_put(item); |
2607 | core_alua_free_tg_pt_gp(tg_pt_gp); | ||
2608 | } | 2651 | } |
2609 | 2652 | ||
2610 | static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = { | 2653 | static struct configfs_group_operations target_core_alua_tg_pt_gps_group_ops = { |
@@ -2771,13 +2814,11 @@ static void target_core_drop_subdev( | |||
2771 | struct se_subsystem_api *t; | 2814 | struct se_subsystem_api *t; |
2772 | struct config_item *df_item; | 2815 | struct config_item *df_item; |
2773 | struct config_group *dev_cg, *tg_pt_gp_cg; | 2816 | struct config_group *dev_cg, *tg_pt_gp_cg; |
2774 | int i, ret; | 2817 | int i; |
2775 | 2818 | ||
2776 | hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item); | 2819 | hba = item_to_hba(&se_dev->se_dev_hba->hba_group.cg_item); |
2777 | 2820 | ||
2778 | if (mutex_lock_interruptible(&hba->hba_access_mutex)) | 2821 | mutex_lock(&hba->hba_access_mutex); |
2779 | goto out; | ||
2780 | |||
2781 | t = hba->transport; | 2822 | t = hba->transport; |
2782 | 2823 | ||
2783 | spin_lock(&se_global->g_device_lock); | 2824 | spin_lock(&se_global->g_device_lock); |
@@ -2791,7 +2832,10 @@ static void target_core_drop_subdev( | |||
2791 | config_item_put(df_item); | 2832 | config_item_put(df_item); |
2792 | } | 2833 | } |
2793 | kfree(tg_pt_gp_cg->default_groups); | 2834 | kfree(tg_pt_gp_cg->default_groups); |
2794 | core_alua_free_tg_pt_gp(T10_ALUA(se_dev)->default_tg_pt_gp); | 2835 | /* |
2836 | * core_alua_free_tg_pt_gp() is called from ->default_tg_pt_gp | ||
2837 | * directly from target_core_alua_tg_pt_gp_release(). | ||
2838 | */ | ||
2795 | T10_ALUA(se_dev)->default_tg_pt_gp = NULL; | 2839 | T10_ALUA(se_dev)->default_tg_pt_gp = NULL; |
2796 | 2840 | ||
2797 | dev_cg = &se_dev->se_dev_group; | 2841 | dev_cg = &se_dev->se_dev_group; |
@@ -2800,38 +2844,12 @@ static void target_core_drop_subdev( | |||
2800 | dev_cg->default_groups[i] = NULL; | 2844 | dev_cg->default_groups[i] = NULL; |
2801 | config_item_put(df_item); | 2845 | config_item_put(df_item); |
2802 | } | 2846 | } |
2803 | |||
2804 | config_item_put(item); | ||
2805 | /* | 2847 | /* |
2806 | * This pointer will set when the storage is enabled with: | 2848 | * The releasing of se_dev and associated se_dev->se_dev_ptr is done |
2807 | * `echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable` | 2849 | * from target_core_dev_item_ops->release() ->target_core_dev_release(). |
2808 | */ | 2850 | */ |
2809 | if (se_dev->se_dev_ptr) { | 2851 | config_item_put(item); |
2810 | printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_" | ||
2811 | "virtual_device() for se_dev_ptr: %p\n", | ||
2812 | se_dev->se_dev_ptr); | ||
2813 | |||
2814 | ret = se_free_virtual_device(se_dev->se_dev_ptr, hba); | ||
2815 | if (ret < 0) | ||
2816 | goto hba_out; | ||
2817 | } else { | ||
2818 | /* | ||
2819 | * Release struct se_subsystem_dev->se_dev_su_ptr.. | ||
2820 | */ | ||
2821 | printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_" | ||
2822 | "device() for se_dev_su_ptr: %p\n", | ||
2823 | se_dev->se_dev_su_ptr); | ||
2824 | |||
2825 | t->free_device(se_dev->se_dev_su_ptr); | ||
2826 | } | ||
2827 | |||
2828 | printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem" | ||
2829 | "_dev_t: %p\n", se_dev); | ||
2830 | |||
2831 | hba_out: | ||
2832 | mutex_unlock(&hba->hba_access_mutex); | 2852 | mutex_unlock(&hba->hba_access_mutex); |
2833 | out: | ||
2834 | kfree(se_dev); | ||
2835 | } | 2853 | } |
2836 | 2854 | ||
2837 | static struct configfs_group_operations target_core_hba_group_ops = { | 2855 | static struct configfs_group_operations target_core_hba_group_ops = { |
@@ -2914,6 +2932,13 @@ SE_HBA_ATTR(hba_mode, S_IRUGO | S_IWUSR); | |||
2914 | 2932 | ||
2915 | CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group); | 2933 | CONFIGFS_EATTR_OPS(target_core_hba, se_hba, hba_group); |
2916 | 2934 | ||
2935 | static void target_core_hba_release(struct config_item *item) | ||
2936 | { | ||
2937 | struct se_hba *hba = container_of(to_config_group(item), | ||
2938 | struct se_hba, hba_group); | ||
2939 | core_delete_hba(hba); | ||
2940 | } | ||
2941 | |||
2917 | static struct configfs_attribute *target_core_hba_attrs[] = { | 2942 | static struct configfs_attribute *target_core_hba_attrs[] = { |
2918 | &target_core_hba_hba_info.attr, | 2943 | &target_core_hba_hba_info.attr, |
2919 | &target_core_hba_hba_mode.attr, | 2944 | &target_core_hba_hba_mode.attr, |
@@ -2921,6 +2946,7 @@ static struct configfs_attribute *target_core_hba_attrs[] = { | |||
2921 | }; | 2946 | }; |
2922 | 2947 | ||
2923 | static struct configfs_item_operations target_core_hba_item_ops = { | 2948 | static struct configfs_item_operations target_core_hba_item_ops = { |
2949 | .release = target_core_hba_release, | ||
2924 | .show_attribute = target_core_hba_attr_show, | 2950 | .show_attribute = target_core_hba_attr_show, |
2925 | .store_attribute = target_core_hba_attr_store, | 2951 | .store_attribute = target_core_hba_attr_store, |
2926 | }; | 2952 | }; |
@@ -2997,10 +3023,11 @@ static void target_core_call_delhbafromtarget( | |||
2997 | struct config_group *group, | 3023 | struct config_group *group, |
2998 | struct config_item *item) | 3024 | struct config_item *item) |
2999 | { | 3025 | { |
3000 | struct se_hba *hba = item_to_hba(item); | 3026 | /* |
3001 | 3027 | * core_delete_hba() is called from target_core_hba_item_ops->release() | |
3028 | * -> target_core_hba_release() | ||
3029 | */ | ||
3002 | config_item_put(item); | 3030 | config_item_put(item); |
3003 | core_delete_hba(hba); | ||
3004 | } | 3031 | } |
3005 | 3032 | ||
3006 | static struct configfs_group_operations target_core_group_ops = { | 3033 | static struct configfs_group_operations target_core_group_ops = { |
@@ -3022,7 +3049,6 @@ static int target_core_init_configfs(void) | |||
3022 | struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL; | 3049 | struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL; |
3023 | struct config_group *lu_gp_cg = NULL; | 3050 | struct config_group *lu_gp_cg = NULL; |
3024 | struct configfs_subsystem *subsys; | 3051 | struct configfs_subsystem *subsys; |
3025 | struct proc_dir_entry *scsi_target_proc = NULL; | ||
3026 | struct t10_alua_lu_gp *lu_gp; | 3052 | struct t10_alua_lu_gp *lu_gp; |
3027 | int ret; | 3053 | int ret; |
3028 | 3054 | ||
@@ -3128,21 +3154,10 @@ static int target_core_init_configfs(void) | |||
3128 | if (core_dev_setup_virtual_lun0() < 0) | 3154 | if (core_dev_setup_virtual_lun0() < 0) |
3129 | goto out; | 3155 | goto out; |
3130 | 3156 | ||
3131 | scsi_target_proc = proc_mkdir("scsi_target", 0); | ||
3132 | if (!(scsi_target_proc)) { | ||
3133 | printk(KERN_ERR "proc_mkdir(scsi_target, 0) failed\n"); | ||
3134 | goto out; | ||
3135 | } | ||
3136 | ret = init_scsi_target_mib(); | ||
3137 | if (ret < 0) | ||
3138 | goto out; | ||
3139 | |||
3140 | return 0; | 3157 | return 0; |
3141 | 3158 | ||
3142 | out: | 3159 | out: |
3143 | configfs_unregister_subsystem(subsys); | 3160 | configfs_unregister_subsystem(subsys); |
3144 | if (scsi_target_proc) | ||
3145 | remove_proc_entry("scsi_target", 0); | ||
3146 | core_dev_release_virtual_lun0(); | 3161 | core_dev_release_virtual_lun0(); |
3147 | rd_module_exit(); | 3162 | rd_module_exit(); |
3148 | out_global: | 3163 | out_global: |
@@ -3178,8 +3193,7 @@ static void target_core_exit_configfs(void) | |||
3178 | config_item_put(item); | 3193 | config_item_put(item); |
3179 | } | 3194 | } |
3180 | kfree(lu_gp_cg->default_groups); | 3195 | kfree(lu_gp_cg->default_groups); |
3181 | core_alua_free_lu_gp(se_global->default_lu_gp); | 3196 | lu_gp_cg->default_groups = NULL; |
3182 | se_global->default_lu_gp = NULL; | ||
3183 | 3197 | ||
3184 | alua_cg = &se_global->alua_group; | 3198 | alua_cg = &se_global->alua_group; |
3185 | for (i = 0; alua_cg->default_groups[i]; i++) { | 3199 | for (i = 0; alua_cg->default_groups[i]; i++) { |
@@ -3188,6 +3202,7 @@ static void target_core_exit_configfs(void) | |||
3188 | config_item_put(item); | 3202 | config_item_put(item); |
3189 | } | 3203 | } |
3190 | kfree(alua_cg->default_groups); | 3204 | kfree(alua_cg->default_groups); |
3205 | alua_cg->default_groups = NULL; | ||
3191 | 3206 | ||
3192 | hba_cg = &se_global->target_core_hbagroup; | 3207 | hba_cg = &se_global->target_core_hbagroup; |
3193 | for (i = 0; hba_cg->default_groups[i]; i++) { | 3208 | for (i = 0; hba_cg->default_groups[i]; i++) { |
@@ -3196,20 +3211,20 @@ static void target_core_exit_configfs(void) | |||
3196 | config_item_put(item); | 3211 | config_item_put(item); |
3197 | } | 3212 | } |
3198 | kfree(hba_cg->default_groups); | 3213 | kfree(hba_cg->default_groups); |
3199 | 3214 | hba_cg->default_groups = NULL; | |
3200 | for (i = 0; subsys->su_group.default_groups[i]; i++) { | 3215 | /* |
3201 | item = &subsys->su_group.default_groups[i]->cg_item; | 3216 | * We expect subsys->su_group.default_groups to be released |
3202 | subsys->su_group.default_groups[i] = NULL; | 3217 | * by configfs subsystem provider logic.. |
3203 | config_item_put(item); | 3218 | */ |
3204 | } | 3219 | configfs_unregister_subsystem(subsys); |
3205 | kfree(subsys->su_group.default_groups); | 3220 | kfree(subsys->su_group.default_groups); |
3206 | 3221 | ||
3207 | configfs_unregister_subsystem(subsys); | 3222 | core_alua_free_lu_gp(se_global->default_lu_gp); |
3223 | se_global->default_lu_gp = NULL; | ||
3224 | |||
3208 | printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric" | 3225 | printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric" |
3209 | " Infrastructure\n"); | 3226 | " Infrastructure\n"); |
3210 | 3227 | ||
3211 | remove_scsi_target_mib(); | ||
3212 | remove_proc_entry("scsi_target", 0); | ||
3213 | core_dev_release_virtual_lun0(); | 3228 | core_dev_release_virtual_lun0(); |
3214 | rd_module_exit(); | 3229 | rd_module_exit(); |
3215 | release_se_global(); | 3230 | release_se_global(); |
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 317ce58d426d..5da051a07fa3 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c | |||
@@ -373,11 +373,11 @@ int core_update_device_list_for_node( | |||
373 | /* | 373 | /* |
374 | * deve->se_lun_acl will be NULL for demo-mode created LUNs | 374 | * deve->se_lun_acl will be NULL for demo-mode created LUNs |
375 | * that have not been explictly concerted to MappedLUNs -> | 375 | * that have not been explictly concerted to MappedLUNs -> |
376 | * struct se_lun_acl. | 376 | * struct se_lun_acl, but we remove deve->alua_port_list from |
377 | * port->sep_alua_list. This also means that active UAs and | ||
378 | * NodeACL context specific PR metadata for demo-mode | ||
379 | * MappedLUN *deve will be released below.. | ||
377 | */ | 380 | */ |
378 | if (!(deve->se_lun_acl)) | ||
379 | return 0; | ||
380 | |||
381 | spin_lock_bh(&port->sep_alua_lock); | 381 | spin_lock_bh(&port->sep_alua_lock); |
382 | list_del(&deve->alua_port_list); | 382 | list_del(&deve->alua_port_list); |
383 | spin_unlock_bh(&port->sep_alua_lock); | 383 | spin_unlock_bh(&port->sep_alua_lock); |
@@ -395,12 +395,14 @@ int core_update_device_list_for_node( | |||
395 | printk(KERN_ERR "struct se_dev_entry->se_lun_acl" | 395 | printk(KERN_ERR "struct se_dev_entry->se_lun_acl" |
396 | " already set for demo mode -> explict" | 396 | " already set for demo mode -> explict" |
397 | " LUN ACL transition\n"); | 397 | " LUN ACL transition\n"); |
398 | spin_unlock_irq(&nacl->device_list_lock); | ||
398 | return -1; | 399 | return -1; |
399 | } | 400 | } |
400 | if (deve->se_lun != lun) { | 401 | if (deve->se_lun != lun) { |
401 | printk(KERN_ERR "struct se_dev_entry->se_lun does" | 402 | printk(KERN_ERR "struct se_dev_entry->se_lun does" |
402 | " match passed struct se_lun for demo mode" | 403 | " match passed struct se_lun for demo mode" |
403 | " -> explict LUN ACL transition\n"); | 404 | " -> explict LUN ACL transition\n"); |
405 | spin_unlock_irq(&nacl->device_list_lock); | ||
404 | return -1; | 406 | return -1; |
405 | } | 407 | } |
406 | deve->se_lun_acl = lun_acl; | 408 | deve->se_lun_acl = lun_acl; |
@@ -865,9 +867,6 @@ static void se_dev_stop(struct se_device *dev) | |||
865 | } | 867 | } |
866 | } | 868 | } |
867 | spin_unlock(&hba->device_lock); | 869 | spin_unlock(&hba->device_lock); |
868 | |||
869 | while (atomic_read(&hba->dev_mib_access_count)) | ||
870 | cpu_relax(); | ||
871 | } | 870 | } |
872 | 871 | ||
873 | int se_dev_check_online(struct se_device *dev) | 872 | int se_dev_check_online(struct se_device *dev) |
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index 32b148d7e261..b65d1c8e7740 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c | |||
@@ -214,12 +214,22 @@ TCM_MAPPEDLUN_ATTR(write_protect, S_IRUGO | S_IWUSR); | |||
214 | 214 | ||
215 | CONFIGFS_EATTR_OPS(target_fabric_mappedlun, se_lun_acl, se_lun_group); | 215 | CONFIGFS_EATTR_OPS(target_fabric_mappedlun, se_lun_acl, se_lun_group); |
216 | 216 | ||
217 | static void target_fabric_mappedlun_release(struct config_item *item) | ||
218 | { | ||
219 | struct se_lun_acl *lacl = container_of(to_config_group(item), | ||
220 | struct se_lun_acl, se_lun_group); | ||
221 | struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg; | ||
222 | |||
223 | core_dev_free_initiator_node_lun_acl(se_tpg, lacl); | ||
224 | } | ||
225 | |||
217 | static struct configfs_attribute *target_fabric_mappedlun_attrs[] = { | 226 | static struct configfs_attribute *target_fabric_mappedlun_attrs[] = { |
218 | &target_fabric_mappedlun_write_protect.attr, | 227 | &target_fabric_mappedlun_write_protect.attr, |
219 | NULL, | 228 | NULL, |
220 | }; | 229 | }; |
221 | 230 | ||
222 | static struct configfs_item_operations target_fabric_mappedlun_item_ops = { | 231 | static struct configfs_item_operations target_fabric_mappedlun_item_ops = { |
232 | .release = target_fabric_mappedlun_release, | ||
223 | .show_attribute = target_fabric_mappedlun_attr_show, | 233 | .show_attribute = target_fabric_mappedlun_attr_show, |
224 | .store_attribute = target_fabric_mappedlun_attr_store, | 234 | .store_attribute = target_fabric_mappedlun_attr_store, |
225 | .allow_link = target_fabric_mappedlun_link, | 235 | .allow_link = target_fabric_mappedlun_link, |
@@ -337,15 +347,21 @@ static void target_fabric_drop_mappedlun( | |||
337 | struct config_group *group, | 347 | struct config_group *group, |
338 | struct config_item *item) | 348 | struct config_item *item) |
339 | { | 349 | { |
340 | struct se_lun_acl *lacl = container_of(to_config_group(item), | ||
341 | struct se_lun_acl, se_lun_group); | ||
342 | struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg; | ||
343 | |||
344 | config_item_put(item); | 350 | config_item_put(item); |
345 | core_dev_free_initiator_node_lun_acl(se_tpg, lacl); | 351 | } |
352 | |||
353 | static void target_fabric_nacl_base_release(struct config_item *item) | ||
354 | { | ||
355 | struct se_node_acl *se_nacl = container_of(to_config_group(item), | ||
356 | struct se_node_acl, acl_group); | ||
357 | struct se_portal_group *se_tpg = se_nacl->se_tpg; | ||
358 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; | ||
359 | |||
360 | tf->tf_ops.fabric_drop_nodeacl(se_nacl); | ||
346 | } | 361 | } |
347 | 362 | ||
348 | static struct configfs_item_operations target_fabric_nacl_base_item_ops = { | 363 | static struct configfs_item_operations target_fabric_nacl_base_item_ops = { |
364 | .release = target_fabric_nacl_base_release, | ||
349 | .show_attribute = target_fabric_nacl_base_attr_show, | 365 | .show_attribute = target_fabric_nacl_base_attr_show, |
350 | .store_attribute = target_fabric_nacl_base_attr_store, | 366 | .store_attribute = target_fabric_nacl_base_attr_store, |
351 | }; | 367 | }; |
@@ -404,9 +420,6 @@ static void target_fabric_drop_nodeacl( | |||
404 | struct config_group *group, | 420 | struct config_group *group, |
405 | struct config_item *item) | 421 | struct config_item *item) |
406 | { | 422 | { |
407 | struct se_portal_group *se_tpg = container_of(group, | ||
408 | struct se_portal_group, tpg_acl_group); | ||
409 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; | ||
410 | struct se_node_acl *se_nacl = container_of(to_config_group(item), | 423 | struct se_node_acl *se_nacl = container_of(to_config_group(item), |
411 | struct se_node_acl, acl_group); | 424 | struct se_node_acl, acl_group); |
412 | struct config_item *df_item; | 425 | struct config_item *df_item; |
@@ -419,9 +432,10 @@ static void target_fabric_drop_nodeacl( | |||
419 | nacl_cg->default_groups[i] = NULL; | 432 | nacl_cg->default_groups[i] = NULL; |
420 | config_item_put(df_item); | 433 | config_item_put(df_item); |
421 | } | 434 | } |
422 | 435 | /* | |
436 | * struct se_node_acl free is done in target_fabric_nacl_base_release() | ||
437 | */ | ||
423 | config_item_put(item); | 438 | config_item_put(item); |
424 | tf->tf_ops.fabric_drop_nodeacl(se_nacl); | ||
425 | } | 439 | } |
426 | 440 | ||
427 | static struct configfs_group_operations target_fabric_nacl_group_ops = { | 441 | static struct configfs_group_operations target_fabric_nacl_group_ops = { |
@@ -437,7 +451,18 @@ TF_CIT_SETUP(tpg_nacl, NULL, &target_fabric_nacl_group_ops, NULL); | |||
437 | 451 | ||
438 | CONFIGFS_EATTR_OPS(target_fabric_np_base, se_tpg_np, tpg_np_group); | 452 | CONFIGFS_EATTR_OPS(target_fabric_np_base, se_tpg_np, tpg_np_group); |
439 | 453 | ||
454 | static void target_fabric_np_base_release(struct config_item *item) | ||
455 | { | ||
456 | struct se_tpg_np *se_tpg_np = container_of(to_config_group(item), | ||
457 | struct se_tpg_np, tpg_np_group); | ||
458 | struct se_portal_group *se_tpg = se_tpg_np->tpg_np_parent; | ||
459 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; | ||
460 | |||
461 | tf->tf_ops.fabric_drop_np(se_tpg_np); | ||
462 | } | ||
463 | |||
440 | static struct configfs_item_operations target_fabric_np_base_item_ops = { | 464 | static struct configfs_item_operations target_fabric_np_base_item_ops = { |
465 | .release = target_fabric_np_base_release, | ||
441 | .show_attribute = target_fabric_np_base_attr_show, | 466 | .show_attribute = target_fabric_np_base_attr_show, |
442 | .store_attribute = target_fabric_np_base_attr_store, | 467 | .store_attribute = target_fabric_np_base_attr_store, |
443 | }; | 468 | }; |
@@ -466,6 +491,7 @@ static struct config_group *target_fabric_make_np( | |||
466 | if (!(se_tpg_np) || IS_ERR(se_tpg_np)) | 491 | if (!(se_tpg_np) || IS_ERR(se_tpg_np)) |
467 | return ERR_PTR(-EINVAL); | 492 | return ERR_PTR(-EINVAL); |
468 | 493 | ||
494 | se_tpg_np->tpg_np_parent = se_tpg; | ||
469 | config_group_init_type_name(&se_tpg_np->tpg_np_group, name, | 495 | config_group_init_type_name(&se_tpg_np->tpg_np_group, name, |
470 | &TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit); | 496 | &TF_CIT_TMPL(tf)->tfc_tpg_np_base_cit); |
471 | 497 | ||
@@ -476,14 +502,10 @@ static void target_fabric_drop_np( | |||
476 | struct config_group *group, | 502 | struct config_group *group, |
477 | struct config_item *item) | 503 | struct config_item *item) |
478 | { | 504 | { |
479 | struct se_portal_group *se_tpg = container_of(group, | 505 | /* |
480 | struct se_portal_group, tpg_np_group); | 506 | * struct se_tpg_np is released via target_fabric_np_base_release() |
481 | struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; | 507 | */ |
482 | struct se_tpg_np *se_tpg_np = container_of(to_config_group(item), | ||
483 | struct se_tpg_np, tpg_np_group); | ||
484 | |||
485 | config_item_put(item); | 508 | config_item_put(item); |
486 | tf->tf_ops.fabric_drop_np(se_tpg_np); | ||
487 | } | 509 | } |
488 | 510 | ||
489 | static struct configfs_group_operations target_fabric_np_group_ops = { | 511 | static struct configfs_group_operations target_fabric_np_group_ops = { |
@@ -814,7 +836,18 @@ TF_CIT_SETUP(tpg_param, &target_fabric_tpg_param_item_ops, NULL, NULL); | |||
814 | */ | 836 | */ |
815 | CONFIGFS_EATTR_OPS(target_fabric_tpg, se_portal_group, tpg_group); | 837 | CONFIGFS_EATTR_OPS(target_fabric_tpg, se_portal_group, tpg_group); |
816 | 838 | ||
839 | static void target_fabric_tpg_release(struct config_item *item) | ||
840 | { | ||
841 | struct se_portal_group *se_tpg = container_of(to_config_group(item), | ||
842 | struct se_portal_group, tpg_group); | ||
843 | struct se_wwn *wwn = se_tpg->se_tpg_wwn; | ||
844 | struct target_fabric_configfs *tf = wwn->wwn_tf; | ||
845 | |||
846 | tf->tf_ops.fabric_drop_tpg(se_tpg); | ||
847 | } | ||
848 | |||
817 | static struct configfs_item_operations target_fabric_tpg_base_item_ops = { | 849 | static struct configfs_item_operations target_fabric_tpg_base_item_ops = { |
850 | .release = target_fabric_tpg_release, | ||
818 | .show_attribute = target_fabric_tpg_attr_show, | 851 | .show_attribute = target_fabric_tpg_attr_show, |
819 | .store_attribute = target_fabric_tpg_attr_store, | 852 | .store_attribute = target_fabric_tpg_attr_store, |
820 | }; | 853 | }; |
@@ -872,8 +905,6 @@ static void target_fabric_drop_tpg( | |||
872 | struct config_group *group, | 905 | struct config_group *group, |
873 | struct config_item *item) | 906 | struct config_item *item) |
874 | { | 907 | { |
875 | struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group); | ||
876 | struct target_fabric_configfs *tf = wwn->wwn_tf; | ||
877 | struct se_portal_group *se_tpg = container_of(to_config_group(item), | 908 | struct se_portal_group *se_tpg = container_of(to_config_group(item), |
878 | struct se_portal_group, tpg_group); | 909 | struct se_portal_group, tpg_group); |
879 | struct config_group *tpg_cg = &se_tpg->tpg_group; | 910 | struct config_group *tpg_cg = &se_tpg->tpg_group; |
@@ -890,15 +921,28 @@ static void target_fabric_drop_tpg( | |||
890 | } | 921 | } |
891 | 922 | ||
892 | config_item_put(item); | 923 | config_item_put(item); |
893 | tf->tf_ops.fabric_drop_tpg(se_tpg); | ||
894 | } | 924 | } |
895 | 925 | ||
926 | static void target_fabric_release_wwn(struct config_item *item) | ||
927 | { | ||
928 | struct se_wwn *wwn = container_of(to_config_group(item), | ||
929 | struct se_wwn, wwn_group); | ||
930 | struct target_fabric_configfs *tf = wwn->wwn_tf; | ||
931 | |||
932 | tf->tf_ops.fabric_drop_wwn(wwn); | ||
933 | } | ||
934 | |||
935 | static struct configfs_item_operations target_fabric_tpg_item_ops = { | ||
936 | .release = target_fabric_release_wwn, | ||
937 | }; | ||
938 | |||
896 | static struct configfs_group_operations target_fabric_tpg_group_ops = { | 939 | static struct configfs_group_operations target_fabric_tpg_group_ops = { |
897 | .make_group = target_fabric_make_tpg, | 940 | .make_group = target_fabric_make_tpg, |
898 | .drop_item = target_fabric_drop_tpg, | 941 | .drop_item = target_fabric_drop_tpg, |
899 | }; | 942 | }; |
900 | 943 | ||
901 | TF_CIT_SETUP(tpg, NULL, &target_fabric_tpg_group_ops, NULL); | 944 | TF_CIT_SETUP(tpg, &target_fabric_tpg_item_ops, &target_fabric_tpg_group_ops, |
945 | NULL); | ||
902 | 946 | ||
903 | /* End of tfc_tpg_cit */ | 947 | /* End of tfc_tpg_cit */ |
904 | 948 | ||
@@ -932,13 +976,7 @@ static void target_fabric_drop_wwn( | |||
932 | struct config_group *group, | 976 | struct config_group *group, |
933 | struct config_item *item) | 977 | struct config_item *item) |
934 | { | 978 | { |
935 | struct target_fabric_configfs *tf = container_of(group, | ||
936 | struct target_fabric_configfs, tf_group); | ||
937 | struct se_wwn *wwn = container_of(to_config_group(item), | ||
938 | struct se_wwn, wwn_group); | ||
939 | |||
940 | config_item_put(item); | 979 | config_item_put(item); |
941 | tf->tf_ops.fabric_drop_wwn(wwn); | ||
942 | } | 980 | } |
943 | 981 | ||
944 | static struct configfs_group_operations target_fabric_wwn_group_ops = { | 982 | static struct configfs_group_operations target_fabric_wwn_group_ops = { |
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index c6e0d757e76e..67f0c09983c8 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c | |||
@@ -154,7 +154,7 @@ static struct se_device *iblock_create_virtdevice( | |||
154 | 154 | ||
155 | bd = blkdev_get_by_path(ib_dev->ibd_udev_path, | 155 | bd = blkdev_get_by_path(ib_dev->ibd_udev_path, |
156 | FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev); | 156 | FMODE_WRITE|FMODE_READ|FMODE_EXCL, ib_dev); |
157 | if (!(bd)) | 157 | if (IS_ERR(bd)) |
158 | goto failed; | 158 | goto failed; |
159 | /* | 159 | /* |
160 | * Setup the local scope queue_limits from struct request_queue->limits | 160 | * Setup the local scope queue_limits from struct request_queue->limits |
@@ -220,8 +220,10 @@ static void iblock_free_device(void *p) | |||
220 | { | 220 | { |
221 | struct iblock_dev *ib_dev = p; | 221 | struct iblock_dev *ib_dev = p; |
222 | 222 | ||
223 | blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); | 223 | if (ib_dev->ibd_bd != NULL) |
224 | bioset_free(ib_dev->ibd_bio_set); | 224 | blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); |
225 | if (ib_dev->ibd_bio_set != NULL) | ||
226 | bioset_free(ib_dev->ibd_bio_set); | ||
225 | kfree(ib_dev); | 227 | kfree(ib_dev); |
226 | } | 228 | } |
227 | 229 | ||
diff --git a/drivers/target/target_core_mib.c b/drivers/target/target_core_mib.c deleted file mode 100644 index d5a48aa0d2d1..000000000000 --- a/drivers/target/target_core_mib.c +++ /dev/null | |||
@@ -1,1078 +0,0 @@ | |||
1 | /******************************************************************************* | ||
2 | * Filename: target_core_mib.c | ||
3 | * | ||
4 | * Copyright (c) 2006-2007 SBE, Inc. All Rights Reserved. | ||
5 | * Copyright (c) 2007-2010 Rising Tide Systems | ||
6 | * Copyright (c) 2008-2010 Linux-iSCSI.org | ||
7 | * | ||
8 | * Nicholas A. Bellinger <nab@linux-iscsi.org> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2 of the License, or | ||
13 | * (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, | ||
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
18 | * GNU General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License | ||
21 | * along with this program; if not, write to the Free Software | ||
22 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
23 | * | ||
24 | ******************************************************************************/ | ||
25 | |||
26 | |||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/delay.h> | ||
30 | #include <linux/timer.h> | ||
31 | #include <linux/string.h> | ||
32 | #include <linux/version.h> | ||
33 | #include <generated/utsrelease.h> | ||
34 | #include <linux/utsname.h> | ||
35 | #include <linux/proc_fs.h> | ||
36 | #include <linux/seq_file.h> | ||
37 | #include <linux/blkdev.h> | ||
38 | #include <scsi/scsi.h> | ||
39 | #include <scsi/scsi_device.h> | ||
40 | #include <scsi/scsi_host.h> | ||
41 | |||
42 | #include <target/target_core_base.h> | ||
43 | #include <target/target_core_transport.h> | ||
44 | #include <target/target_core_fabric_ops.h> | ||
45 | #include <target/target_core_configfs.h> | ||
46 | |||
47 | #include "target_core_hba.h" | ||
48 | #include "target_core_mib.h" | ||
49 | |||
50 | /* SCSI mib table index */ | ||
51 | static struct scsi_index_table scsi_index_table; | ||
52 | |||
53 | #ifndef INITIAL_JIFFIES | ||
54 | #define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) | ||
55 | #endif | ||
56 | |||
57 | /* SCSI Instance Table */ | ||
58 | #define SCSI_INST_SW_INDEX 1 | ||
59 | #define SCSI_TRANSPORT_INDEX 1 | ||
60 | |||
61 | #define NONE "None" | ||
62 | #define ISPRINT(a) ((a >= ' ') && (a <= '~')) | ||
63 | |||
64 | static inline int list_is_first(const struct list_head *list, | ||
65 | const struct list_head *head) | ||
66 | { | ||
67 | return list->prev == head; | ||
68 | } | ||
69 | |||
70 | static void *locate_hba_start( | ||
71 | struct seq_file *seq, | ||
72 | loff_t *pos) | ||
73 | { | ||
74 | spin_lock(&se_global->g_device_lock); | ||
75 | return seq_list_start(&se_global->g_se_dev_list, *pos); | ||
76 | } | ||
77 | |||
78 | static void *locate_hba_next( | ||
79 | struct seq_file *seq, | ||
80 | void *v, | ||
81 | loff_t *pos) | ||
82 | { | ||
83 | return seq_list_next(v, &se_global->g_se_dev_list, pos); | ||
84 | } | ||
85 | |||
86 | static void locate_hba_stop(struct seq_file *seq, void *v) | ||
87 | { | ||
88 | spin_unlock(&se_global->g_device_lock); | ||
89 | } | ||
90 | |||
91 | /**************************************************************************** | ||
92 | * SCSI MIB Tables | ||
93 | ****************************************************************************/ | ||
94 | |||
95 | /* | ||
96 | * SCSI Instance Table | ||
97 | */ | ||
98 | static void *scsi_inst_seq_start( | ||
99 | struct seq_file *seq, | ||
100 | loff_t *pos) | ||
101 | { | ||
102 | spin_lock(&se_global->hba_lock); | ||
103 | return seq_list_start(&se_global->g_hba_list, *pos); | ||
104 | } | ||
105 | |||
106 | static void *scsi_inst_seq_next( | ||
107 | struct seq_file *seq, | ||
108 | void *v, | ||
109 | loff_t *pos) | ||
110 | { | ||
111 | return seq_list_next(v, &se_global->g_hba_list, pos); | ||
112 | } | ||
113 | |||
114 | static void scsi_inst_seq_stop(struct seq_file *seq, void *v) | ||
115 | { | ||
116 | spin_unlock(&se_global->hba_lock); | ||
117 | } | ||
118 | |||
119 | static int scsi_inst_seq_show(struct seq_file *seq, void *v) | ||
120 | { | ||
121 | struct se_hba *hba = list_entry(v, struct se_hba, hba_list); | ||
122 | |||
123 | if (list_is_first(&hba->hba_list, &se_global->g_hba_list)) | ||
124 | seq_puts(seq, "inst sw_indx\n"); | ||
125 | |||
126 | seq_printf(seq, "%u %u\n", hba->hba_index, SCSI_INST_SW_INDEX); | ||
127 | seq_printf(seq, "plugin: %s version: %s\n", | ||
128 | hba->transport->name, TARGET_CORE_VERSION); | ||
129 | |||
130 | return 0; | ||
131 | } | ||
132 | |||
133 | static const struct seq_operations scsi_inst_seq_ops = { | ||
134 | .start = scsi_inst_seq_start, | ||
135 | .next = scsi_inst_seq_next, | ||
136 | .stop = scsi_inst_seq_stop, | ||
137 | .show = scsi_inst_seq_show | ||
138 | }; | ||
139 | |||
140 | static int scsi_inst_seq_open(struct inode *inode, struct file *file) | ||
141 | { | ||
142 | return seq_open(file, &scsi_inst_seq_ops); | ||
143 | } | ||
144 | |||
145 | static const struct file_operations scsi_inst_seq_fops = { | ||
146 | .owner = THIS_MODULE, | ||
147 | .open = scsi_inst_seq_open, | ||
148 | .read = seq_read, | ||
149 | .llseek = seq_lseek, | ||
150 | .release = seq_release, | ||
151 | }; | ||
152 | |||
153 | /* | ||
154 | * SCSI Device Table | ||
155 | */ | ||
156 | static void *scsi_dev_seq_start(struct seq_file *seq, loff_t *pos) | ||
157 | { | ||
158 | return locate_hba_start(seq, pos); | ||
159 | } | ||
160 | |||
161 | static void *scsi_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
162 | { | ||
163 | return locate_hba_next(seq, v, pos); | ||
164 | } | ||
165 | |||
166 | static void scsi_dev_seq_stop(struct seq_file *seq, void *v) | ||
167 | { | ||
168 | locate_hba_stop(seq, v); | ||
169 | } | ||
170 | |||
171 | static int scsi_dev_seq_show(struct seq_file *seq, void *v) | ||
172 | { | ||
173 | struct se_hba *hba; | ||
174 | struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, | ||
175 | g_se_dev_list); | ||
176 | struct se_device *dev = se_dev->se_dev_ptr; | ||
177 | char str[28]; | ||
178 | int k; | ||
179 | |||
180 | if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) | ||
181 | seq_puts(seq, "inst indx role ports\n"); | ||
182 | |||
183 | if (!(dev)) | ||
184 | return 0; | ||
185 | |||
186 | hba = dev->se_hba; | ||
187 | if (!(hba)) { | ||
188 | /* Log error ? */ | ||
189 | return 0; | ||
190 | } | ||
191 | |||
192 | seq_printf(seq, "%u %u %s %u\n", hba->hba_index, | ||
193 | dev->dev_index, "Target", dev->dev_port_count); | ||
194 | |||
195 | memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); | ||
196 | |||
197 | /* vendor */ | ||
198 | for (k = 0; k < 8; k++) | ||
199 | str[k] = ISPRINT(DEV_T10_WWN(dev)->vendor[k]) ? | ||
200 | DEV_T10_WWN(dev)->vendor[k] : 0x20; | ||
201 | str[k] = 0x20; | ||
202 | |||
203 | /* model */ | ||
204 | for (k = 0; k < 16; k++) | ||
205 | str[k+9] = ISPRINT(DEV_T10_WWN(dev)->model[k]) ? | ||
206 | DEV_T10_WWN(dev)->model[k] : 0x20; | ||
207 | str[k + 9] = 0; | ||
208 | |||
209 | seq_printf(seq, "dev_alias: %s\n", str); | ||
210 | |||
211 | return 0; | ||
212 | } | ||
213 | |||
214 | static const struct seq_operations scsi_dev_seq_ops = { | ||
215 | .start = scsi_dev_seq_start, | ||
216 | .next = scsi_dev_seq_next, | ||
217 | .stop = scsi_dev_seq_stop, | ||
218 | .show = scsi_dev_seq_show | ||
219 | }; | ||
220 | |||
221 | static int scsi_dev_seq_open(struct inode *inode, struct file *file) | ||
222 | { | ||
223 | return seq_open(file, &scsi_dev_seq_ops); | ||
224 | } | ||
225 | |||
226 | static const struct file_operations scsi_dev_seq_fops = { | ||
227 | .owner = THIS_MODULE, | ||
228 | .open = scsi_dev_seq_open, | ||
229 | .read = seq_read, | ||
230 | .llseek = seq_lseek, | ||
231 | .release = seq_release, | ||
232 | }; | ||
233 | |||
234 | /* | ||
235 | * SCSI Port Table | ||
236 | */ | ||
237 | static void *scsi_port_seq_start(struct seq_file *seq, loff_t *pos) | ||
238 | { | ||
239 | return locate_hba_start(seq, pos); | ||
240 | } | ||
241 | |||
242 | static void *scsi_port_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
243 | { | ||
244 | return locate_hba_next(seq, v, pos); | ||
245 | } | ||
246 | |||
247 | static void scsi_port_seq_stop(struct seq_file *seq, void *v) | ||
248 | { | ||
249 | locate_hba_stop(seq, v); | ||
250 | } | ||
251 | |||
252 | static int scsi_port_seq_show(struct seq_file *seq, void *v) | ||
253 | { | ||
254 | struct se_hba *hba; | ||
255 | struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, | ||
256 | g_se_dev_list); | ||
257 | struct se_device *dev = se_dev->se_dev_ptr; | ||
258 | struct se_port *sep, *sep_tmp; | ||
259 | |||
260 | if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) | ||
261 | seq_puts(seq, "inst device indx role busy_count\n"); | ||
262 | |||
263 | if (!(dev)) | ||
264 | return 0; | ||
265 | |||
266 | hba = dev->se_hba; | ||
267 | if (!(hba)) { | ||
268 | /* Log error ? */ | ||
269 | return 0; | ||
270 | } | ||
271 | |||
272 | /* FIXME: scsiPortBusyStatuses count */ | ||
273 | spin_lock(&dev->se_port_lock); | ||
274 | list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) { | ||
275 | seq_printf(seq, "%u %u %u %s%u %u\n", hba->hba_index, | ||
276 | dev->dev_index, sep->sep_index, "Device", | ||
277 | dev->dev_index, 0); | ||
278 | } | ||
279 | spin_unlock(&dev->se_port_lock); | ||
280 | |||
281 | return 0; | ||
282 | } | ||
283 | |||
284 | static const struct seq_operations scsi_port_seq_ops = { | ||
285 | .start = scsi_port_seq_start, | ||
286 | .next = scsi_port_seq_next, | ||
287 | .stop = scsi_port_seq_stop, | ||
288 | .show = scsi_port_seq_show | ||
289 | }; | ||
290 | |||
291 | static int scsi_port_seq_open(struct inode *inode, struct file *file) | ||
292 | { | ||
293 | return seq_open(file, &scsi_port_seq_ops); | ||
294 | } | ||
295 | |||
296 | static const struct file_operations scsi_port_seq_fops = { | ||
297 | .owner = THIS_MODULE, | ||
298 | .open = scsi_port_seq_open, | ||
299 | .read = seq_read, | ||
300 | .llseek = seq_lseek, | ||
301 | .release = seq_release, | ||
302 | }; | ||
303 | |||
304 | /* | ||
305 | * SCSI Transport Table | ||
306 | */ | ||
307 | static void *scsi_transport_seq_start(struct seq_file *seq, loff_t *pos) | ||
308 | { | ||
309 | return locate_hba_start(seq, pos); | ||
310 | } | ||
311 | |||
312 | static void *scsi_transport_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
313 | { | ||
314 | return locate_hba_next(seq, v, pos); | ||
315 | } | ||
316 | |||
317 | static void scsi_transport_seq_stop(struct seq_file *seq, void *v) | ||
318 | { | ||
319 | locate_hba_stop(seq, v); | ||
320 | } | ||
321 | |||
322 | static int scsi_transport_seq_show(struct seq_file *seq, void *v) | ||
323 | { | ||
324 | struct se_hba *hba; | ||
325 | struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, | ||
326 | g_se_dev_list); | ||
327 | struct se_device *dev = se_dev->se_dev_ptr; | ||
328 | struct se_port *se, *se_tmp; | ||
329 | struct se_portal_group *tpg; | ||
330 | struct t10_wwn *wwn; | ||
331 | char buf[64]; | ||
332 | |||
333 | if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) | ||
334 | seq_puts(seq, "inst device indx dev_name\n"); | ||
335 | |||
336 | if (!(dev)) | ||
337 | return 0; | ||
338 | |||
339 | hba = dev->se_hba; | ||
340 | if (!(hba)) { | ||
341 | /* Log error ? */ | ||
342 | return 0; | ||
343 | } | ||
344 | |||
345 | wwn = DEV_T10_WWN(dev); | ||
346 | |||
347 | spin_lock(&dev->se_port_lock); | ||
348 | list_for_each_entry_safe(se, se_tmp, &dev->dev_sep_list, sep_list) { | ||
349 | tpg = se->sep_tpg; | ||
350 | sprintf(buf, "scsiTransport%s", | ||
351 | TPG_TFO(tpg)->get_fabric_name()); | ||
352 | |||
353 | seq_printf(seq, "%u %s %u %s+%s\n", | ||
354 | hba->hba_index, /* scsiTransportIndex */ | ||
355 | buf, /* scsiTransportType */ | ||
356 | (TPG_TFO(tpg)->tpg_get_inst_index != NULL) ? | ||
357 | TPG_TFO(tpg)->tpg_get_inst_index(tpg) : | ||
358 | 0, | ||
359 | TPG_TFO(tpg)->tpg_get_wwn(tpg), | ||
360 | (strlen(wwn->unit_serial)) ? | ||
361 | /* scsiTransportDevName */ | ||
362 | wwn->unit_serial : wwn->vendor); | ||
363 | } | ||
364 | spin_unlock(&dev->se_port_lock); | ||
365 | |||
366 | return 0; | ||
367 | } | ||
368 | |||
369 | static const struct seq_operations scsi_transport_seq_ops = { | ||
370 | .start = scsi_transport_seq_start, | ||
371 | .next = scsi_transport_seq_next, | ||
372 | .stop = scsi_transport_seq_stop, | ||
373 | .show = scsi_transport_seq_show | ||
374 | }; | ||
375 | |||
376 | static int scsi_transport_seq_open(struct inode *inode, struct file *file) | ||
377 | { | ||
378 | return seq_open(file, &scsi_transport_seq_ops); | ||
379 | } | ||
380 | |||
381 | static const struct file_operations scsi_transport_seq_fops = { | ||
382 | .owner = THIS_MODULE, | ||
383 | .open = scsi_transport_seq_open, | ||
384 | .read = seq_read, | ||
385 | .llseek = seq_lseek, | ||
386 | .release = seq_release, | ||
387 | }; | ||
388 | |||
389 | /* | ||
390 | * SCSI Target Device Table | ||
391 | */ | ||
392 | static void *scsi_tgt_dev_seq_start(struct seq_file *seq, loff_t *pos) | ||
393 | { | ||
394 | return locate_hba_start(seq, pos); | ||
395 | } | ||
396 | |||
397 | static void *scsi_tgt_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
398 | { | ||
399 | return locate_hba_next(seq, v, pos); | ||
400 | } | ||
401 | |||
402 | static void scsi_tgt_dev_seq_stop(struct seq_file *seq, void *v) | ||
403 | { | ||
404 | locate_hba_stop(seq, v); | ||
405 | } | ||
406 | |||
407 | |||
408 | #define LU_COUNT 1 /* for now */ | ||
409 | static int scsi_tgt_dev_seq_show(struct seq_file *seq, void *v) | ||
410 | { | ||
411 | struct se_hba *hba; | ||
412 | struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, | ||
413 | g_se_dev_list); | ||
414 | struct se_device *dev = se_dev->se_dev_ptr; | ||
415 | int non_accessible_lus = 0; | ||
416 | char status[16]; | ||
417 | |||
418 | if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) | ||
419 | seq_puts(seq, "inst indx num_LUs status non_access_LUs" | ||
420 | " resets\n"); | ||
421 | |||
422 | if (!(dev)) | ||
423 | return 0; | ||
424 | |||
425 | hba = dev->se_hba; | ||
426 | if (!(hba)) { | ||
427 | /* Log error ? */ | ||
428 | return 0; | ||
429 | } | ||
430 | |||
431 | switch (dev->dev_status) { | ||
432 | case TRANSPORT_DEVICE_ACTIVATED: | ||
433 | strcpy(status, "activated"); | ||
434 | break; | ||
435 | case TRANSPORT_DEVICE_DEACTIVATED: | ||
436 | strcpy(status, "deactivated"); | ||
437 | non_accessible_lus = 1; | ||
438 | break; | ||
439 | case TRANSPORT_DEVICE_SHUTDOWN: | ||
440 | strcpy(status, "shutdown"); | ||
441 | non_accessible_lus = 1; | ||
442 | break; | ||
443 | case TRANSPORT_DEVICE_OFFLINE_ACTIVATED: | ||
444 | case TRANSPORT_DEVICE_OFFLINE_DEACTIVATED: | ||
445 | strcpy(status, "offline"); | ||
446 | non_accessible_lus = 1; | ||
447 | break; | ||
448 | default: | ||
449 | sprintf(status, "unknown(%d)", dev->dev_status); | ||
450 | non_accessible_lus = 1; | ||
451 | } | ||
452 | |||
453 | seq_printf(seq, "%u %u %u %s %u %u\n", | ||
454 | hba->hba_index, dev->dev_index, LU_COUNT, | ||
455 | status, non_accessible_lus, dev->num_resets); | ||
456 | |||
457 | return 0; | ||
458 | } | ||
459 | |||
460 | static const struct seq_operations scsi_tgt_dev_seq_ops = { | ||
461 | .start = scsi_tgt_dev_seq_start, | ||
462 | .next = scsi_tgt_dev_seq_next, | ||
463 | .stop = scsi_tgt_dev_seq_stop, | ||
464 | .show = scsi_tgt_dev_seq_show | ||
465 | }; | ||
466 | |||
467 | static int scsi_tgt_dev_seq_open(struct inode *inode, struct file *file) | ||
468 | { | ||
469 | return seq_open(file, &scsi_tgt_dev_seq_ops); | ||
470 | } | ||
471 | |||
472 | static const struct file_operations scsi_tgt_dev_seq_fops = { | ||
473 | .owner = THIS_MODULE, | ||
474 | .open = scsi_tgt_dev_seq_open, | ||
475 | .read = seq_read, | ||
476 | .llseek = seq_lseek, | ||
477 | .release = seq_release, | ||
478 | }; | ||
479 | |||
480 | /* | ||
481 | * SCSI Target Port Table | ||
482 | */ | ||
483 | static void *scsi_tgt_port_seq_start(struct seq_file *seq, loff_t *pos) | ||
484 | { | ||
485 | return locate_hba_start(seq, pos); | ||
486 | } | ||
487 | |||
488 | static void *scsi_tgt_port_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
489 | { | ||
490 | return locate_hba_next(seq, v, pos); | ||
491 | } | ||
492 | |||
493 | static void scsi_tgt_port_seq_stop(struct seq_file *seq, void *v) | ||
494 | { | ||
495 | locate_hba_stop(seq, v); | ||
496 | } | ||
497 | |||
498 | static int scsi_tgt_port_seq_show(struct seq_file *seq, void *v) | ||
499 | { | ||
500 | struct se_hba *hba; | ||
501 | struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, | ||
502 | g_se_dev_list); | ||
503 | struct se_device *dev = se_dev->se_dev_ptr; | ||
504 | struct se_port *sep, *sep_tmp; | ||
505 | struct se_portal_group *tpg; | ||
506 | u32 rx_mbytes, tx_mbytes; | ||
507 | unsigned long long num_cmds; | ||
508 | char buf[64]; | ||
509 | |||
510 | if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) | ||
511 | seq_puts(seq, "inst device indx name port_index in_cmds" | ||
512 | " write_mbytes read_mbytes hs_in_cmds\n"); | ||
513 | |||
514 | if (!(dev)) | ||
515 | return 0; | ||
516 | |||
517 | hba = dev->se_hba; | ||
518 | if (!(hba)) { | ||
519 | /* Log error ? */ | ||
520 | return 0; | ||
521 | } | ||
522 | |||
523 | spin_lock(&dev->se_port_lock); | ||
524 | list_for_each_entry_safe(sep, sep_tmp, &dev->dev_sep_list, sep_list) { | ||
525 | tpg = sep->sep_tpg; | ||
526 | sprintf(buf, "%sPort#", | ||
527 | TPG_TFO(tpg)->get_fabric_name()); | ||
528 | |||
529 | seq_printf(seq, "%u %u %u %s%d %s%s%d ", | ||
530 | hba->hba_index, | ||
531 | dev->dev_index, | ||
532 | sep->sep_index, | ||
533 | buf, sep->sep_index, | ||
534 | TPG_TFO(tpg)->tpg_get_wwn(tpg), "+t+", | ||
535 | TPG_TFO(tpg)->tpg_get_tag(tpg)); | ||
536 | |||
537 | spin_lock(&sep->sep_lun->lun_sep_lock); | ||
538 | num_cmds = sep->sep_stats.cmd_pdus; | ||
539 | rx_mbytes = (sep->sep_stats.rx_data_octets >> 20); | ||
540 | tx_mbytes = (sep->sep_stats.tx_data_octets >> 20); | ||
541 | spin_unlock(&sep->sep_lun->lun_sep_lock); | ||
542 | |||
543 | seq_printf(seq, "%llu %u %u %u\n", num_cmds, | ||
544 | rx_mbytes, tx_mbytes, 0); | ||
545 | } | ||
546 | spin_unlock(&dev->se_port_lock); | ||
547 | |||
548 | return 0; | ||
549 | } | ||
550 | |||
551 | static const struct seq_operations scsi_tgt_port_seq_ops = { | ||
552 | .start = scsi_tgt_port_seq_start, | ||
553 | .next = scsi_tgt_port_seq_next, | ||
554 | .stop = scsi_tgt_port_seq_stop, | ||
555 | .show = scsi_tgt_port_seq_show | ||
556 | }; | ||
557 | |||
558 | static int scsi_tgt_port_seq_open(struct inode *inode, struct file *file) | ||
559 | { | ||
560 | return seq_open(file, &scsi_tgt_port_seq_ops); | ||
561 | } | ||
562 | |||
563 | static const struct file_operations scsi_tgt_port_seq_fops = { | ||
564 | .owner = THIS_MODULE, | ||
565 | .open = scsi_tgt_port_seq_open, | ||
566 | .read = seq_read, | ||
567 | .llseek = seq_lseek, | ||
568 | .release = seq_release, | ||
569 | }; | ||
570 | |||
571 | /* | ||
572 | * SCSI Authorized Initiator Table: | ||
573 | * It contains the SCSI Initiators authorized to be attached to one of the | ||
574 | * local Target ports. | ||
575 | * Iterates through all active TPGs and extracts the info from the ACLs | ||
576 | */ | ||
577 | static void *scsi_auth_intr_seq_start(struct seq_file *seq, loff_t *pos) | ||
578 | { | ||
579 | spin_lock_bh(&se_global->se_tpg_lock); | ||
580 | return seq_list_start(&se_global->g_se_tpg_list, *pos); | ||
581 | } | ||
582 | |||
583 | static void *scsi_auth_intr_seq_next(struct seq_file *seq, void *v, | ||
584 | loff_t *pos) | ||
585 | { | ||
586 | return seq_list_next(v, &se_global->g_se_tpg_list, pos); | ||
587 | } | ||
588 | |||
589 | static void scsi_auth_intr_seq_stop(struct seq_file *seq, void *v) | ||
590 | { | ||
591 | spin_unlock_bh(&se_global->se_tpg_lock); | ||
592 | } | ||
593 | |||
594 | static int scsi_auth_intr_seq_show(struct seq_file *seq, void *v) | ||
595 | { | ||
596 | struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group, | ||
597 | se_tpg_list); | ||
598 | struct se_dev_entry *deve; | ||
599 | struct se_lun *lun; | ||
600 | struct se_node_acl *se_nacl; | ||
601 | int j; | ||
602 | |||
603 | if (list_is_first(&se_tpg->se_tpg_list, | ||
604 | &se_global->g_se_tpg_list)) | ||
605 | seq_puts(seq, "inst dev port indx dev_or_port intr_name " | ||
606 | "map_indx att_count num_cmds read_mbytes " | ||
607 | "write_mbytes hs_num_cmds creation_time row_status\n"); | ||
608 | |||
609 | if (!(se_tpg)) | ||
610 | return 0; | ||
611 | |||
612 | spin_lock(&se_tpg->acl_node_lock); | ||
613 | list_for_each_entry(se_nacl, &se_tpg->acl_node_list, acl_list) { | ||
614 | |||
615 | atomic_inc(&se_nacl->mib_ref_count); | ||
616 | smp_mb__after_atomic_inc(); | ||
617 | spin_unlock(&se_tpg->acl_node_lock); | ||
618 | |||
619 | spin_lock_irq(&se_nacl->device_list_lock); | ||
620 | for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) { | ||
621 | deve = &se_nacl->device_list[j]; | ||
622 | if (!(deve->lun_flags & | ||
623 | TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) || | ||
624 | (!deve->se_lun)) | ||
625 | continue; | ||
626 | lun = deve->se_lun; | ||
627 | if (!lun->lun_se_dev) | ||
628 | continue; | ||
629 | |||
630 | seq_printf(seq, "%u %u %u %u %u %s %u %u %u %u %u %u" | ||
631 | " %u %s\n", | ||
632 | /* scsiInstIndex */ | ||
633 | (TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ? | ||
634 | TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) : | ||
635 | 0, | ||
636 | /* scsiDeviceIndex */ | ||
637 | lun->lun_se_dev->dev_index, | ||
638 | /* scsiAuthIntrTgtPortIndex */ | ||
639 | TPG_TFO(se_tpg)->tpg_get_tag(se_tpg), | ||
640 | /* scsiAuthIntrIndex */ | ||
641 | se_nacl->acl_index, | ||
642 | /* scsiAuthIntrDevOrPort */ | ||
643 | 1, | ||
644 | /* scsiAuthIntrName */ | ||
645 | se_nacl->initiatorname[0] ? | ||
646 | se_nacl->initiatorname : NONE, | ||
647 | /* FIXME: scsiAuthIntrLunMapIndex */ | ||
648 | 0, | ||
649 | /* scsiAuthIntrAttachedTimes */ | ||
650 | deve->attach_count, | ||
651 | /* scsiAuthIntrOutCommands */ | ||
652 | deve->total_cmds, | ||
653 | /* scsiAuthIntrReadMegaBytes */ | ||
654 | (u32)(deve->read_bytes >> 20), | ||
655 | /* scsiAuthIntrWrittenMegaBytes */ | ||
656 | (u32)(deve->write_bytes >> 20), | ||
657 | /* FIXME: scsiAuthIntrHSOutCommands */ | ||
658 | 0, | ||
659 | /* scsiAuthIntrLastCreation */ | ||
660 | (u32)(((u32)deve->creation_time - | ||
661 | INITIAL_JIFFIES) * 100 / HZ), | ||
662 | /* FIXME: scsiAuthIntrRowStatus */ | ||
663 | "Ready"); | ||
664 | } | ||
665 | spin_unlock_irq(&se_nacl->device_list_lock); | ||
666 | |||
667 | spin_lock(&se_tpg->acl_node_lock); | ||
668 | atomic_dec(&se_nacl->mib_ref_count); | ||
669 | smp_mb__after_atomic_dec(); | ||
670 | } | ||
671 | spin_unlock(&se_tpg->acl_node_lock); | ||
672 | |||
673 | return 0; | ||
674 | } | ||
675 | |||
676 | static const struct seq_operations scsi_auth_intr_seq_ops = { | ||
677 | .start = scsi_auth_intr_seq_start, | ||
678 | .next = scsi_auth_intr_seq_next, | ||
679 | .stop = scsi_auth_intr_seq_stop, | ||
680 | .show = scsi_auth_intr_seq_show | ||
681 | }; | ||
682 | |||
683 | static int scsi_auth_intr_seq_open(struct inode *inode, struct file *file) | ||
684 | { | ||
685 | return seq_open(file, &scsi_auth_intr_seq_ops); | ||
686 | } | ||
687 | |||
688 | static const struct file_operations scsi_auth_intr_seq_fops = { | ||
689 | .owner = THIS_MODULE, | ||
690 | .open = scsi_auth_intr_seq_open, | ||
691 | .read = seq_read, | ||
692 | .llseek = seq_lseek, | ||
693 | .release = seq_release, | ||
694 | }; | ||
695 | |||
696 | /* | ||
697 | * SCSI Attached Initiator Port Table: | ||
698 | * It lists the SCSI Initiators attached to one of the local Target ports. | ||
699 | * Iterates through all active TPGs and use active sessions from each TPG | ||
700 | * to list the info fo this table. | ||
701 | */ | ||
702 | static void *scsi_att_intr_port_seq_start(struct seq_file *seq, loff_t *pos) | ||
703 | { | ||
704 | spin_lock_bh(&se_global->se_tpg_lock); | ||
705 | return seq_list_start(&se_global->g_se_tpg_list, *pos); | ||
706 | } | ||
707 | |||
708 | static void *scsi_att_intr_port_seq_next(struct seq_file *seq, void *v, | ||
709 | loff_t *pos) | ||
710 | { | ||
711 | return seq_list_next(v, &se_global->g_se_tpg_list, pos); | ||
712 | } | ||
713 | |||
714 | static void scsi_att_intr_port_seq_stop(struct seq_file *seq, void *v) | ||
715 | { | ||
716 | spin_unlock_bh(&se_global->se_tpg_lock); | ||
717 | } | ||
718 | |||
719 | static int scsi_att_intr_port_seq_show(struct seq_file *seq, void *v) | ||
720 | { | ||
721 | struct se_portal_group *se_tpg = list_entry(v, struct se_portal_group, | ||
722 | se_tpg_list); | ||
723 | struct se_dev_entry *deve; | ||
724 | struct se_lun *lun; | ||
725 | struct se_node_acl *se_nacl; | ||
726 | struct se_session *se_sess; | ||
727 | unsigned char buf[64]; | ||
728 | int j; | ||
729 | |||
730 | if (list_is_first(&se_tpg->se_tpg_list, | ||
731 | &se_global->g_se_tpg_list)) | ||
732 | seq_puts(seq, "inst dev port indx port_auth_indx port_name" | ||
733 | " port_ident\n"); | ||
734 | |||
735 | if (!(se_tpg)) | ||
736 | return 0; | ||
737 | |||
738 | spin_lock(&se_tpg->session_lock); | ||
739 | list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) { | ||
740 | if ((TPG_TFO(se_tpg)->sess_logged_in(se_sess)) || | ||
741 | (!se_sess->se_node_acl) || | ||
742 | (!se_sess->se_node_acl->device_list)) | ||
743 | continue; | ||
744 | |||
745 | atomic_inc(&se_sess->mib_ref_count); | ||
746 | smp_mb__after_atomic_inc(); | ||
747 | se_nacl = se_sess->se_node_acl; | ||
748 | atomic_inc(&se_nacl->mib_ref_count); | ||
749 | smp_mb__after_atomic_inc(); | ||
750 | spin_unlock(&se_tpg->session_lock); | ||
751 | |||
752 | spin_lock_irq(&se_nacl->device_list_lock); | ||
753 | for (j = 0; j < TRANSPORT_MAX_LUNS_PER_TPG; j++) { | ||
754 | deve = &se_nacl->device_list[j]; | ||
755 | if (!(deve->lun_flags & | ||
756 | TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) || | ||
757 | (!deve->se_lun)) | ||
758 | continue; | ||
759 | |||
760 | lun = deve->se_lun; | ||
761 | if (!lun->lun_se_dev) | ||
762 | continue; | ||
763 | |||
764 | memset(buf, 0, 64); | ||
765 | if (TPG_TFO(se_tpg)->sess_get_initiator_sid != NULL) | ||
766 | TPG_TFO(se_tpg)->sess_get_initiator_sid( | ||
767 | se_sess, (unsigned char *)&buf[0], 64); | ||
768 | |||
769 | seq_printf(seq, "%u %u %u %u %u %s+i+%s\n", | ||
770 | /* scsiInstIndex */ | ||
771 | (TPG_TFO(se_tpg)->tpg_get_inst_index != NULL) ? | ||
772 | TPG_TFO(se_tpg)->tpg_get_inst_index(se_tpg) : | ||
773 | 0, | ||
774 | /* scsiDeviceIndex */ | ||
775 | lun->lun_se_dev->dev_index, | ||
776 | /* scsiPortIndex */ | ||
777 | TPG_TFO(se_tpg)->tpg_get_tag(se_tpg), | ||
778 | /* scsiAttIntrPortIndex */ | ||
779 | (TPG_TFO(se_tpg)->sess_get_index != NULL) ? | ||
780 | TPG_TFO(se_tpg)->sess_get_index(se_sess) : | ||
781 | 0, | ||
782 | /* scsiAttIntrPortAuthIntrIdx */ | ||
783 | se_nacl->acl_index, | ||
784 | /* scsiAttIntrPortName */ | ||
785 | se_nacl->initiatorname[0] ? | ||
786 | se_nacl->initiatorname : NONE, | ||
787 | /* scsiAttIntrPortIdentifier */ | ||
788 | buf); | ||
789 | } | ||
790 | spin_unlock_irq(&se_nacl->device_list_lock); | ||
791 | |||
792 | spin_lock(&se_tpg->session_lock); | ||
793 | atomic_dec(&se_nacl->mib_ref_count); | ||
794 | smp_mb__after_atomic_dec(); | ||
795 | atomic_dec(&se_sess->mib_ref_count); | ||
796 | smp_mb__after_atomic_dec(); | ||
797 | } | ||
798 | spin_unlock(&se_tpg->session_lock); | ||
799 | |||
800 | return 0; | ||
801 | } | ||
802 | |||
803 | static const struct seq_operations scsi_att_intr_port_seq_ops = { | ||
804 | .start = scsi_att_intr_port_seq_start, | ||
805 | .next = scsi_att_intr_port_seq_next, | ||
806 | .stop = scsi_att_intr_port_seq_stop, | ||
807 | .show = scsi_att_intr_port_seq_show | ||
808 | }; | ||
809 | |||
810 | static int scsi_att_intr_port_seq_open(struct inode *inode, struct file *file) | ||
811 | { | ||
812 | return seq_open(file, &scsi_att_intr_port_seq_ops); | ||
813 | } | ||
814 | |||
815 | static const struct file_operations scsi_att_intr_port_seq_fops = { | ||
816 | .owner = THIS_MODULE, | ||
817 | .open = scsi_att_intr_port_seq_open, | ||
818 | .read = seq_read, | ||
819 | .llseek = seq_lseek, | ||
820 | .release = seq_release, | ||
821 | }; | ||
822 | |||
823 | /* | ||
824 | * SCSI Logical Unit Table | ||
825 | */ | ||
826 | static void *scsi_lu_seq_start(struct seq_file *seq, loff_t *pos) | ||
827 | { | ||
828 | return locate_hba_start(seq, pos); | ||
829 | } | ||
830 | |||
831 | static void *scsi_lu_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
832 | { | ||
833 | return locate_hba_next(seq, v, pos); | ||
834 | } | ||
835 | |||
836 | static void scsi_lu_seq_stop(struct seq_file *seq, void *v) | ||
837 | { | ||
838 | locate_hba_stop(seq, v); | ||
839 | } | ||
840 | |||
841 | #define SCSI_LU_INDEX 1 | ||
842 | static int scsi_lu_seq_show(struct seq_file *seq, void *v) | ||
843 | { | ||
844 | struct se_hba *hba; | ||
845 | struct se_subsystem_dev *se_dev = list_entry(v, struct se_subsystem_dev, | ||
846 | g_se_dev_list); | ||
847 | struct se_device *dev = se_dev->se_dev_ptr; | ||
848 | int j; | ||
849 | char str[28]; | ||
850 | |||
851 | if (list_is_first(&se_dev->g_se_dev_list, &se_global->g_se_dev_list)) | ||
852 | seq_puts(seq, "inst dev indx LUN lu_name vend prod rev" | ||
853 | " dev_type status state-bit num_cmds read_mbytes" | ||
854 | " write_mbytes resets full_stat hs_num_cmds creation_time\n"); | ||
855 | |||
856 | if (!(dev)) | ||
857 | return 0; | ||
858 | |||
859 | hba = dev->se_hba; | ||
860 | if (!(hba)) { | ||
861 | /* Log error ? */ | ||
862 | return 0; | ||
863 | } | ||
864 | |||
865 | /* Fix LU state, if we can read it from the device */ | ||
866 | seq_printf(seq, "%u %u %u %llu %s", hba->hba_index, | ||
867 | dev->dev_index, SCSI_LU_INDEX, | ||
868 | (unsigned long long)0, /* FIXME: scsiLuDefaultLun */ | ||
869 | (strlen(DEV_T10_WWN(dev)->unit_serial)) ? | ||
870 | /* scsiLuWwnName */ | ||
871 | (char *)&DEV_T10_WWN(dev)->unit_serial[0] : | ||
872 | "None"); | ||
873 | |||
874 | memcpy(&str[0], (void *)DEV_T10_WWN(dev), 28); | ||
875 | /* scsiLuVendorId */ | ||
876 | for (j = 0; j < 8; j++) | ||
877 | str[j] = ISPRINT(DEV_T10_WWN(dev)->vendor[j]) ? | ||
878 | DEV_T10_WWN(dev)->vendor[j] : 0x20; | ||
879 | str[8] = 0; | ||
880 | seq_printf(seq, " %s", str); | ||
881 | |||
882 | /* scsiLuProductId */ | ||
883 | for (j = 0; j < 16; j++) | ||
884 | str[j] = ISPRINT(DEV_T10_WWN(dev)->model[j]) ? | ||
885 | DEV_T10_WWN(dev)->model[j] : 0x20; | ||
886 | str[16] = 0; | ||
887 | seq_printf(seq, " %s", str); | ||
888 | |||
889 | /* scsiLuRevisionId */ | ||
890 | for (j = 0; j < 4; j++) | ||
891 | str[j] = ISPRINT(DEV_T10_WWN(dev)->revision[j]) ? | ||
892 | DEV_T10_WWN(dev)->revision[j] : 0x20; | ||
893 | str[4] = 0; | ||
894 | seq_printf(seq, " %s", str); | ||
895 | |||
896 | seq_printf(seq, " %u %s %s %llu %u %u %u %u %u %u\n", | ||
897 | /* scsiLuPeripheralType */ | ||
898 | TRANSPORT(dev)->get_device_type(dev), | ||
899 | (dev->dev_status == TRANSPORT_DEVICE_ACTIVATED) ? | ||
900 | "available" : "notavailable", /* scsiLuStatus */ | ||
901 | "exposed", /* scsiLuState */ | ||
902 | (unsigned long long)dev->num_cmds, | ||
903 | /* scsiLuReadMegaBytes */ | ||
904 | (u32)(dev->read_bytes >> 20), | ||
905 | /* scsiLuWrittenMegaBytes */ | ||
906 | (u32)(dev->write_bytes >> 20), | ||
907 | dev->num_resets, /* scsiLuInResets */ | ||
908 | 0, /* scsiLuOutTaskSetFullStatus */ | ||
909 | 0, /* scsiLuHSInCommands */ | ||
910 | (u32)(((u32)dev->creation_time - INITIAL_JIFFIES) * | ||
911 | 100 / HZ)); | ||
912 | |||
913 | return 0; | ||
914 | } | ||
915 | |||
916 | static const struct seq_operations scsi_lu_seq_ops = { | ||
917 | .start = scsi_lu_seq_start, | ||
918 | .next = scsi_lu_seq_next, | ||
919 | .stop = scsi_lu_seq_stop, | ||
920 | .show = scsi_lu_seq_show | ||
921 | }; | ||
922 | |||
923 | static int scsi_lu_seq_open(struct inode *inode, struct file *file) | ||
924 | { | ||
925 | return seq_open(file, &scsi_lu_seq_ops); | ||
926 | } | ||
927 | |||
928 | static const struct file_operations scsi_lu_seq_fops = { | ||
929 | .owner = THIS_MODULE, | ||
930 | .open = scsi_lu_seq_open, | ||
931 | .read = seq_read, | ||
932 | .llseek = seq_lseek, | ||
933 | .release = seq_release, | ||
934 | }; | ||
935 | |||
936 | /****************************************************************************/ | ||
937 | |||
938 | /* | ||
939 | * Remove proc fs entries | ||
940 | */ | ||
941 | void remove_scsi_target_mib(void) | ||
942 | { | ||
943 | remove_proc_entry("scsi_target/mib/scsi_inst", NULL); | ||
944 | remove_proc_entry("scsi_target/mib/scsi_dev", NULL); | ||
945 | remove_proc_entry("scsi_target/mib/scsi_port", NULL); | ||
946 | remove_proc_entry("scsi_target/mib/scsi_transport", NULL); | ||
947 | remove_proc_entry("scsi_target/mib/scsi_tgt_dev", NULL); | ||
948 | remove_proc_entry("scsi_target/mib/scsi_tgt_port", NULL); | ||
949 | remove_proc_entry("scsi_target/mib/scsi_auth_intr", NULL); | ||
950 | remove_proc_entry("scsi_target/mib/scsi_att_intr_port", NULL); | ||
951 | remove_proc_entry("scsi_target/mib/scsi_lu", NULL); | ||
952 | remove_proc_entry("scsi_target/mib", NULL); | ||
953 | } | ||
954 | |||
955 | /* | ||
956 | * Create proc fs entries for the mib tables | ||
957 | */ | ||
958 | int init_scsi_target_mib(void) | ||
959 | { | ||
960 | struct proc_dir_entry *dir_entry; | ||
961 | struct proc_dir_entry *scsi_inst_entry; | ||
962 | struct proc_dir_entry *scsi_dev_entry; | ||
963 | struct proc_dir_entry *scsi_port_entry; | ||
964 | struct proc_dir_entry *scsi_transport_entry; | ||
965 | struct proc_dir_entry *scsi_tgt_dev_entry; | ||
966 | struct proc_dir_entry *scsi_tgt_port_entry; | ||
967 | struct proc_dir_entry *scsi_auth_intr_entry; | ||
968 | struct proc_dir_entry *scsi_att_intr_port_entry; | ||
969 | struct proc_dir_entry *scsi_lu_entry; | ||
970 | |||
971 | dir_entry = proc_mkdir("scsi_target/mib", NULL); | ||
972 | if (!(dir_entry)) { | ||
973 | printk(KERN_ERR "proc_mkdir() failed.\n"); | ||
974 | return -1; | ||
975 | } | ||
976 | |||
977 | scsi_inst_entry = | ||
978 | create_proc_entry("scsi_target/mib/scsi_inst", 0, NULL); | ||
979 | if (scsi_inst_entry) | ||
980 | scsi_inst_entry->proc_fops = &scsi_inst_seq_fops; | ||
981 | else | ||
982 | goto error; | ||
983 | |||
984 | scsi_dev_entry = | ||
985 | create_proc_entry("scsi_target/mib/scsi_dev", 0, NULL); | ||
986 | if (scsi_dev_entry) | ||
987 | scsi_dev_entry->proc_fops = &scsi_dev_seq_fops; | ||
988 | else | ||
989 | goto error; | ||
990 | |||
991 | scsi_port_entry = | ||
992 | create_proc_entry("scsi_target/mib/scsi_port", 0, NULL); | ||
993 | if (scsi_port_entry) | ||
994 | scsi_port_entry->proc_fops = &scsi_port_seq_fops; | ||
995 | else | ||
996 | goto error; | ||
997 | |||
998 | scsi_transport_entry = | ||
999 | create_proc_entry("scsi_target/mib/scsi_transport", 0, NULL); | ||
1000 | if (scsi_transport_entry) | ||
1001 | scsi_transport_entry->proc_fops = &scsi_transport_seq_fops; | ||
1002 | else | ||
1003 | goto error; | ||
1004 | |||
1005 | scsi_tgt_dev_entry = | ||
1006 | create_proc_entry("scsi_target/mib/scsi_tgt_dev", 0, NULL); | ||
1007 | if (scsi_tgt_dev_entry) | ||
1008 | scsi_tgt_dev_entry->proc_fops = &scsi_tgt_dev_seq_fops; | ||
1009 | else | ||
1010 | goto error; | ||
1011 | |||
1012 | scsi_tgt_port_entry = | ||
1013 | create_proc_entry("scsi_target/mib/scsi_tgt_port", 0, NULL); | ||
1014 | if (scsi_tgt_port_entry) | ||
1015 | scsi_tgt_port_entry->proc_fops = &scsi_tgt_port_seq_fops; | ||
1016 | else | ||
1017 | goto error; | ||
1018 | |||
1019 | scsi_auth_intr_entry = | ||
1020 | create_proc_entry("scsi_target/mib/scsi_auth_intr", 0, NULL); | ||
1021 | if (scsi_auth_intr_entry) | ||
1022 | scsi_auth_intr_entry->proc_fops = &scsi_auth_intr_seq_fops; | ||
1023 | else | ||
1024 | goto error; | ||
1025 | |||
1026 | scsi_att_intr_port_entry = | ||
1027 | create_proc_entry("scsi_target/mib/scsi_att_intr_port", 0, NULL); | ||
1028 | if (scsi_att_intr_port_entry) | ||
1029 | scsi_att_intr_port_entry->proc_fops = | ||
1030 | &scsi_att_intr_port_seq_fops; | ||
1031 | else | ||
1032 | goto error; | ||
1033 | |||
1034 | scsi_lu_entry = create_proc_entry("scsi_target/mib/scsi_lu", 0, NULL); | ||
1035 | if (scsi_lu_entry) | ||
1036 | scsi_lu_entry->proc_fops = &scsi_lu_seq_fops; | ||
1037 | else | ||
1038 | goto error; | ||
1039 | |||
1040 | return 0; | ||
1041 | |||
1042 | error: | ||
1043 | printk(KERN_ERR "create_proc_entry() failed.\n"); | ||
1044 | remove_scsi_target_mib(); | ||
1045 | return -1; | ||
1046 | } | ||
1047 | |||
1048 | /* | ||
1049 | * Initialize the index table for allocating unique row indexes to various mib | ||
1050 | * tables | ||
1051 | */ | ||
1052 | void init_scsi_index_table(void) | ||
1053 | { | ||
1054 | memset(&scsi_index_table, 0, sizeof(struct scsi_index_table)); | ||
1055 | spin_lock_init(&scsi_index_table.lock); | ||
1056 | } | ||
1057 | |||
1058 | /* | ||
1059 | * Allocate a new row index for the entry type specified | ||
1060 | */ | ||
1061 | u32 scsi_get_new_index(scsi_index_t type) | ||
1062 | { | ||
1063 | u32 new_index; | ||
1064 | |||
1065 | if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) { | ||
1066 | printk(KERN_ERR "Invalid index type %d\n", type); | ||
1067 | return -1; | ||
1068 | } | ||
1069 | |||
1070 | spin_lock(&scsi_index_table.lock); | ||
1071 | new_index = ++scsi_index_table.scsi_mib_index[type]; | ||
1072 | if (new_index == 0) | ||
1073 | new_index = ++scsi_index_table.scsi_mib_index[type]; | ||
1074 | spin_unlock(&scsi_index_table.lock); | ||
1075 | |||
1076 | return new_index; | ||
1077 | } | ||
1078 | EXPORT_SYMBOL(scsi_get_new_index); | ||
diff --git a/drivers/target/target_core_mib.h b/drivers/target/target_core_mib.h deleted file mode 100644 index 277204633850..000000000000 --- a/drivers/target/target_core_mib.h +++ /dev/null | |||
@@ -1,28 +0,0 @@ | |||
1 | #ifndef TARGET_CORE_MIB_H | ||
2 | #define TARGET_CORE_MIB_H | ||
3 | |||
4 | typedef enum { | ||
5 | SCSI_INST_INDEX, | ||
6 | SCSI_DEVICE_INDEX, | ||
7 | SCSI_AUTH_INTR_INDEX, | ||
8 | SCSI_INDEX_TYPE_MAX | ||
9 | } scsi_index_t; | ||
10 | |||
11 | struct scsi_index_table { | ||
12 | spinlock_t lock; | ||
13 | u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; | ||
14 | } ____cacheline_aligned; | ||
15 | |||
16 | /* SCSI Port stats */ | ||
17 | struct scsi_port_stats { | ||
18 | u64 cmd_pdus; | ||
19 | u64 tx_data_octets; | ||
20 | u64 rx_data_octets; | ||
21 | } ____cacheline_aligned; | ||
22 | |||
23 | extern int init_scsi_target_mib(void); | ||
24 | extern void remove_scsi_target_mib(void); | ||
25 | extern void init_scsi_index_table(void); | ||
26 | extern u32 scsi_get_new_index(scsi_index_t); | ||
27 | |||
28 | #endif /*** TARGET_CORE_MIB_H ***/ | ||
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 742d24609a9b..f2a08477a68c 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c | |||
@@ -462,8 +462,8 @@ static struct se_device *pscsi_create_type_disk( | |||
462 | */ | 462 | */ |
463 | bd = blkdev_get_by_path(se_dev->se_dev_udev_path, | 463 | bd = blkdev_get_by_path(se_dev->se_dev_udev_path, |
464 | FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv); | 464 | FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv); |
465 | if (!(bd)) { | 465 | if (IS_ERR(bd)) { |
466 | printk("pSCSI: blkdev_get_by_path() failed\n"); | 466 | printk(KERN_ERR "pSCSI: blkdev_get_by_path() failed\n"); |
467 | scsi_device_put(sd); | 467 | scsi_device_put(sd); |
468 | return NULL; | 468 | return NULL; |
469 | } | 469 | } |
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index abfa81a57115..c26f67467623 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c | |||
@@ -275,7 +275,6 @@ struct se_node_acl *core_tpg_check_initiator_node_acl( | |||
275 | spin_lock_init(&acl->device_list_lock); | 275 | spin_lock_init(&acl->device_list_lock); |
276 | spin_lock_init(&acl->nacl_sess_lock); | 276 | spin_lock_init(&acl->nacl_sess_lock); |
277 | atomic_set(&acl->acl_pr_ref_count, 0); | 277 | atomic_set(&acl->acl_pr_ref_count, 0); |
278 | atomic_set(&acl->mib_ref_count, 0); | ||
279 | acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg); | 278 | acl->queue_depth = TPG_TFO(tpg)->tpg_get_default_depth(tpg); |
280 | snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); | 279 | snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname); |
281 | acl->se_tpg = tpg; | 280 | acl->se_tpg = tpg; |
@@ -318,12 +317,6 @@ void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *nacl) | |||
318 | cpu_relax(); | 317 | cpu_relax(); |
319 | } | 318 | } |
320 | 319 | ||
321 | void core_tpg_wait_for_mib_ref(struct se_node_acl *nacl) | ||
322 | { | ||
323 | while (atomic_read(&nacl->mib_ref_count) != 0) | ||
324 | cpu_relax(); | ||
325 | } | ||
326 | |||
327 | void core_tpg_clear_object_luns(struct se_portal_group *tpg) | 320 | void core_tpg_clear_object_luns(struct se_portal_group *tpg) |
328 | { | 321 | { |
329 | int i, ret; | 322 | int i, ret; |
@@ -480,7 +473,6 @@ int core_tpg_del_initiator_node_acl( | |||
480 | spin_unlock_bh(&tpg->session_lock); | 473 | spin_unlock_bh(&tpg->session_lock); |
481 | 474 | ||
482 | core_tpg_wait_for_nacl_pr_ref(acl); | 475 | core_tpg_wait_for_nacl_pr_ref(acl); |
483 | core_tpg_wait_for_mib_ref(acl); | ||
484 | core_clear_initiator_node_from_tpg(acl, tpg); | 476 | core_clear_initiator_node_from_tpg(acl, tpg); |
485 | core_free_device_list_for_node(acl, tpg); | 477 | core_free_device_list_for_node(acl, tpg); |
486 | 478 | ||
@@ -701,6 +693,8 @@ EXPORT_SYMBOL(core_tpg_register); | |||
701 | 693 | ||
702 | int core_tpg_deregister(struct se_portal_group *se_tpg) | 694 | int core_tpg_deregister(struct se_portal_group *se_tpg) |
703 | { | 695 | { |
696 | struct se_node_acl *nacl, *nacl_tmp; | ||
697 | |||
704 | printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group" | 698 | printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group" |
705 | " for endpoint: %s Portal Tag %u\n", | 699 | " for endpoint: %s Portal Tag %u\n", |
706 | (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? | 700 | (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ? |
@@ -714,6 +708,25 @@ int core_tpg_deregister(struct se_portal_group *se_tpg) | |||
714 | 708 | ||
715 | while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0) | 709 | while (atomic_read(&se_tpg->tpg_pr_ref_count) != 0) |
716 | cpu_relax(); | 710 | cpu_relax(); |
711 | /* | ||
712 | * Release any remaining demo-mode generated se_node_acl that have | ||
713 | * not been released because of TFO->tpg_check_demo_mode_cache() == 1 | ||
714 | * in transport_deregister_session(). | ||
715 | */ | ||
716 | spin_lock_bh(&se_tpg->acl_node_lock); | ||
717 | list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list, | ||
718 | acl_list) { | ||
719 | list_del(&nacl->acl_list); | ||
720 | se_tpg->num_node_acls--; | ||
721 | spin_unlock_bh(&se_tpg->acl_node_lock); | ||
722 | |||
723 | core_tpg_wait_for_nacl_pr_ref(nacl); | ||
724 | core_free_device_list_for_node(nacl, se_tpg); | ||
725 | TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, nacl); | ||
726 | |||
727 | spin_lock_bh(&se_tpg->acl_node_lock); | ||
728 | } | ||
729 | spin_unlock_bh(&se_tpg->acl_node_lock); | ||
717 | 730 | ||
718 | if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) | 731 | if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) |
719 | core_tpg_release_virtual_lun0(se_tpg); | 732 | core_tpg_release_virtual_lun0(se_tpg); |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 28b6292ff298..236e22d8cfae 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -379,6 +379,40 @@ void release_se_global(void) | |||
379 | se_global = NULL; | 379 | se_global = NULL; |
380 | } | 380 | } |
381 | 381 | ||
382 | /* SCSI statistics table index */ | ||
383 | static struct scsi_index_table scsi_index_table; | ||
384 | |||
385 | /* | ||
386 | * Initialize the index table for allocating unique row indexes to various mib | ||
387 | * tables. | ||
388 | */ | ||
389 | void init_scsi_index_table(void) | ||
390 | { | ||
391 | memset(&scsi_index_table, 0, sizeof(struct scsi_index_table)); | ||
392 | spin_lock_init(&scsi_index_table.lock); | ||
393 | } | ||
394 | |||
395 | /* | ||
396 | * Allocate a new row index for the entry type specified | ||
397 | */ | ||
398 | u32 scsi_get_new_index(scsi_index_t type) | ||
399 | { | ||
400 | u32 new_index; | ||
401 | |||
402 | if ((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)) { | ||
403 | printk(KERN_ERR "Invalid index type %d\n", type); | ||
404 | return -EINVAL; | ||
405 | } | ||
406 | |||
407 | spin_lock(&scsi_index_table.lock); | ||
408 | new_index = ++scsi_index_table.scsi_mib_index[type]; | ||
409 | if (new_index == 0) | ||
410 | new_index = ++scsi_index_table.scsi_mib_index[type]; | ||
411 | spin_unlock(&scsi_index_table.lock); | ||
412 | |||
413 | return new_index; | ||
414 | } | ||
415 | |||
382 | void transport_init_queue_obj(struct se_queue_obj *qobj) | 416 | void transport_init_queue_obj(struct se_queue_obj *qobj) |
383 | { | 417 | { |
384 | atomic_set(&qobj->queue_cnt, 0); | 418 | atomic_set(&qobj->queue_cnt, 0); |
@@ -437,7 +471,6 @@ struct se_session *transport_init_session(void) | |||
437 | } | 471 | } |
438 | INIT_LIST_HEAD(&se_sess->sess_list); | 472 | INIT_LIST_HEAD(&se_sess->sess_list); |
439 | INIT_LIST_HEAD(&se_sess->sess_acl_list); | 473 | INIT_LIST_HEAD(&se_sess->sess_acl_list); |
440 | atomic_set(&se_sess->mib_ref_count, 0); | ||
441 | 474 | ||
442 | return se_sess; | 475 | return se_sess; |
443 | } | 476 | } |
@@ -546,12 +579,6 @@ void transport_deregister_session(struct se_session *se_sess) | |||
546 | transport_free_session(se_sess); | 579 | transport_free_session(se_sess); |
547 | return; | 580 | return; |
548 | } | 581 | } |
549 | /* | ||
550 | * Wait for possible reference in drivers/target/target_core_mib.c: | ||
551 | * scsi_att_intr_port_seq_show() | ||
552 | */ | ||
553 | while (atomic_read(&se_sess->mib_ref_count) != 0) | ||
554 | cpu_relax(); | ||
555 | 582 | ||
556 | spin_lock_bh(&se_tpg->session_lock); | 583 | spin_lock_bh(&se_tpg->session_lock); |
557 | list_del(&se_sess->sess_list); | 584 | list_del(&se_sess->sess_list); |
@@ -574,7 +601,6 @@ void transport_deregister_session(struct se_session *se_sess) | |||
574 | spin_unlock_bh(&se_tpg->acl_node_lock); | 601 | spin_unlock_bh(&se_tpg->acl_node_lock); |
575 | 602 | ||
576 | core_tpg_wait_for_nacl_pr_ref(se_nacl); | 603 | core_tpg_wait_for_nacl_pr_ref(se_nacl); |
577 | core_tpg_wait_for_mib_ref(se_nacl); | ||
578 | core_free_device_list_for_node(se_nacl, se_tpg); | 604 | core_free_device_list_for_node(se_nacl, se_tpg); |
579 | TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, | 605 | TPG_TFO(se_tpg)->tpg_release_fabric_acl(se_tpg, |
580 | se_nacl); | 606 | se_nacl); |
@@ -4827,6 +4853,8 @@ static int transport_do_se_mem_map( | |||
4827 | 4853 | ||
4828 | return ret; | 4854 | return ret; |
4829 | } | 4855 | } |
4856 | |||
4857 | BUG_ON(list_empty(se_mem_list)); | ||
4830 | /* | 4858 | /* |
4831 | * This is the normal path for all normal non BIDI and BIDI-COMMAND | 4859 | * This is the normal path for all normal non BIDI and BIDI-COMMAND |
4832 | * WRITE payloads.. If we need to do BIDI READ passthrough for | 4860 | * WRITE payloads.. If we need to do BIDI READ passthrough for |
@@ -5008,7 +5036,9 @@ transport_map_control_cmd_to_task(struct se_cmd *cmd) | |||
5008 | struct se_mem *se_mem = NULL, *se_mem_lout = NULL; | 5036 | struct se_mem *se_mem = NULL, *se_mem_lout = NULL; |
5009 | u32 se_mem_cnt = 0, task_offset = 0; | 5037 | u32 se_mem_cnt = 0, task_offset = 0; |
5010 | 5038 | ||
5011 | BUG_ON(list_empty(cmd->t_task->t_mem_list)); | 5039 | if (!list_empty(T_TASK(cmd)->t_mem_list)) |
5040 | se_mem = list_entry(T_TASK(cmd)->t_mem_list->next, | ||
5041 | struct se_mem, se_list); | ||
5012 | 5042 | ||
5013 | ret = transport_do_se_mem_map(dev, task, | 5043 | ret = transport_do_se_mem_map(dev, task, |
5014 | cmd->t_task->t_mem_list, NULL, se_mem, | 5044 | cmd->t_task->t_mem_list, NULL, se_mem, |
diff --git a/drivers/tty/serial/max3100.c b/drivers/tty/serial/max3100.c index beb1afa27d8d..7b951adac54b 100644 --- a/drivers/tty/serial/max3100.c +++ b/drivers/tty/serial/max3100.c | |||
@@ -601,7 +601,7 @@ static int max3100_startup(struct uart_port *port) | |||
601 | s->rts = 0; | 601 | s->rts = 0; |
602 | 602 | ||
603 | sprintf(b, "max3100-%d", s->minor); | 603 | sprintf(b, "max3100-%d", s->minor); |
604 | s->workqueue = create_freezeable_workqueue(b); | 604 | s->workqueue = create_freezable_workqueue(b); |
605 | if (!s->workqueue) { | 605 | if (!s->workqueue) { |
606 | dev_warn(&s->spi->dev, "cannot create workqueue\n"); | 606 | dev_warn(&s->spi->dev, "cannot create workqueue\n"); |
607 | return -EBUSY; | 607 | return -EBUSY; |
diff --git a/drivers/tty/serial/max3107.c b/drivers/tty/serial/max3107.c index 910870edf708..750b4f627315 100644 --- a/drivers/tty/serial/max3107.c +++ b/drivers/tty/serial/max3107.c | |||
@@ -833,7 +833,7 @@ static int max3107_startup(struct uart_port *port) | |||
833 | struct max3107_port *s = container_of(port, struct max3107_port, port); | 833 | struct max3107_port *s = container_of(port, struct max3107_port, port); |
834 | 834 | ||
835 | /* Initialize work queue */ | 835 | /* Initialize work queue */ |
836 | s->workqueue = create_freezeable_workqueue("max3107"); | 836 | s->workqueue = create_freezable_workqueue("max3107"); |
837 | if (!s->workqueue) { | 837 | if (!s->workqueue) { |
838 | dev_err(&s->spi->dev, "Workqueue creation failed\n"); | 838 | dev_err(&s->spi->dev, "Workqueue creation failed\n"); |
839 | return -EBUSY; | 839 | return -EBUSY; |
diff --git a/drivers/xen/events.c b/drivers/xen/events.c index 74681478100a..89987a7bf26f 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c | |||
@@ -277,7 +277,7 @@ static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu) | |||
277 | 277 | ||
278 | BUG_ON(irq == -1); | 278 | BUG_ON(irq == -1); |
279 | #ifdef CONFIG_SMP | 279 | #ifdef CONFIG_SMP |
280 | cpumask_copy(irq_to_desc(irq)->affinity, cpumask_of(cpu)); | 280 | cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu)); |
281 | #endif | 281 | #endif |
282 | 282 | ||
283 | clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq))); | 283 | clear_bit(chn, cpu_evtchn_mask(cpu_from_irq(irq))); |
@@ -294,7 +294,7 @@ static void init_evtchn_cpu_bindings(void) | |||
294 | 294 | ||
295 | /* By default all event channels notify CPU#0. */ | 295 | /* By default all event channels notify CPU#0. */ |
296 | for_each_irq_desc(i, desc) { | 296 | for_each_irq_desc(i, desc) { |
297 | cpumask_copy(desc->affinity, cpumask_of(0)); | 297 | cpumask_copy(desc->irq_data.affinity, cpumask_of(0)); |
298 | } | 298 | } |
299 | #endif | 299 | #endif |
300 | 300 | ||
@@ -376,81 +376,69 @@ static void unmask_evtchn(int port) | |||
376 | put_cpu(); | 376 | put_cpu(); |
377 | } | 377 | } |
378 | 378 | ||
379 | static int get_nr_hw_irqs(void) | 379 | static int xen_allocate_irq_dynamic(void) |
380 | { | 380 | { |
381 | int ret = 1; | 381 | int first = 0; |
382 | int irq; | ||
382 | 383 | ||
383 | #ifdef CONFIG_X86_IO_APIC | 384 | #ifdef CONFIG_X86_IO_APIC |
384 | ret = get_nr_irqs_gsi(); | 385 | /* |
386 | * For an HVM guest or domain 0 which see "real" (emulated or | ||
387 | * actual repectively) GSIs we allocate dynamic IRQs | ||
388 | * e.g. those corresponding to event channels or MSIs | ||
389 | * etc. from the range above those "real" GSIs to avoid | ||
390 | * collisions. | ||
391 | */ | ||
392 | if (xen_initial_domain() || xen_hvm_domain()) | ||
393 | first = get_nr_irqs_gsi(); | ||
385 | #endif | 394 | #endif |
386 | 395 | ||
387 | return ret; | 396 | retry: |
388 | } | 397 | irq = irq_alloc_desc_from(first, -1); |
389 | 398 | ||
390 | static int find_unbound_pirq(int type) | 399 | if (irq == -ENOMEM && first > NR_IRQS_LEGACY) { |
391 | { | 400 | printk(KERN_ERR "Out of dynamic IRQ space and eating into GSI space. You should increase nr_irqs\n"); |
392 | int rc, i; | 401 | first = max(NR_IRQS_LEGACY, first - NR_IRQS_LEGACY); |
393 | struct physdev_get_free_pirq op_get_free_pirq; | 402 | goto retry; |
394 | op_get_free_pirq.type = type; | 403 | } |
395 | 404 | ||
396 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq); | 405 | if (irq < 0) |
397 | if (!rc) | 406 | panic("No available IRQ to bind to: increase nr_irqs!\n"); |
398 | return op_get_free_pirq.pirq; | ||
399 | 407 | ||
400 | for (i = 0; i < nr_irqs; i++) { | 408 | return irq; |
401 | if (pirq_to_irq[i] < 0) | ||
402 | return i; | ||
403 | } | ||
404 | return -1; | ||
405 | } | 409 | } |
406 | 410 | ||
407 | static int find_unbound_irq(void) | 411 | static int xen_allocate_irq_gsi(unsigned gsi) |
408 | { | 412 | { |
409 | struct irq_data *data; | 413 | int irq; |
410 | int irq, res; | ||
411 | int bottom = get_nr_hw_irqs(); | ||
412 | int top = nr_irqs-1; | ||
413 | |||
414 | if (bottom == nr_irqs) | ||
415 | goto no_irqs; | ||
416 | 414 | ||
417 | /* This loop starts from the top of IRQ space and goes down. | 415 | /* |
418 | * We need this b/c if we have a PCI device in a Xen PV guest | 416 | * A PV guest has no concept of a GSI (since it has no ACPI |
419 | * we do not have an IO-APIC (though the backend might have them) | 417 | * nor access to/knowledge of the physical APICs). Therefore |
420 | * mapped in. To not have a collision of physical IRQs with the Xen | 418 | * all IRQs are dynamically allocated from the entire IRQ |
421 | * event channels start at the top of the IRQ space for virtual IRQs. | 419 | * space. |
422 | */ | 420 | */ |
423 | for (irq = top; irq > bottom; irq--) { | 421 | if (xen_pv_domain() && !xen_initial_domain()) |
424 | data = irq_get_irq_data(irq); | 422 | return xen_allocate_irq_dynamic(); |
425 | /* only 15->0 have init'd desc; handle irq > 16 */ | ||
426 | if (!data) | ||
427 | break; | ||
428 | if (data->chip == &no_irq_chip) | ||
429 | break; | ||
430 | if (data->chip != &xen_dynamic_chip) | ||
431 | continue; | ||
432 | if (irq_info[irq].type == IRQT_UNBOUND) | ||
433 | return irq; | ||
434 | } | ||
435 | |||
436 | if (irq == bottom) | ||
437 | goto no_irqs; | ||
438 | 423 | ||
439 | res = irq_alloc_desc_at(irq, -1); | 424 | /* Legacy IRQ descriptors are already allocated by the arch. */ |
425 | if (gsi < NR_IRQS_LEGACY) | ||
426 | return gsi; | ||
440 | 427 | ||
441 | if (WARN_ON(res != irq)) | 428 | irq = irq_alloc_desc_at(gsi, -1); |
442 | return -1; | 429 | if (irq < 0) |
430 | panic("Unable to allocate to IRQ%d (%d)\n", gsi, irq); | ||
443 | 431 | ||
444 | return irq; | 432 | return irq; |
445 | |||
446 | no_irqs: | ||
447 | panic("No available IRQ to bind to: increase nr_irqs!\n"); | ||
448 | } | 433 | } |
449 | 434 | ||
450 | static bool identity_mapped_irq(unsigned irq) | 435 | static void xen_free_irq(unsigned irq) |
451 | { | 436 | { |
452 | /* identity map all the hardware irqs */ | 437 | /* Legacy IRQ descriptors are managed by the arch. */ |
453 | return irq < get_nr_hw_irqs(); | 438 | if (irq < NR_IRQS_LEGACY) |
439 | return; | ||
440 | |||
441 | irq_free_desc(irq); | ||
454 | } | 442 | } |
455 | 443 | ||
456 | static void pirq_unmask_notify(int irq) | 444 | static void pirq_unmask_notify(int irq) |
@@ -486,7 +474,7 @@ static bool probing_irq(int irq) | |||
486 | return desc && desc->action == NULL; | 474 | return desc && desc->action == NULL; |
487 | } | 475 | } |
488 | 476 | ||
489 | static unsigned int startup_pirq(unsigned int irq) | 477 | static unsigned int __startup_pirq(unsigned int irq) |
490 | { | 478 | { |
491 | struct evtchn_bind_pirq bind_pirq; | 479 | struct evtchn_bind_pirq bind_pirq; |
492 | struct irq_info *info = info_for_irq(irq); | 480 | struct irq_info *info = info_for_irq(irq); |
@@ -524,9 +512,15 @@ out: | |||
524 | return 0; | 512 | return 0; |
525 | } | 513 | } |
526 | 514 | ||
527 | static void shutdown_pirq(unsigned int irq) | 515 | static unsigned int startup_pirq(struct irq_data *data) |
516 | { | ||
517 | return __startup_pirq(data->irq); | ||
518 | } | ||
519 | |||
520 | static void shutdown_pirq(struct irq_data *data) | ||
528 | { | 521 | { |
529 | struct evtchn_close close; | 522 | struct evtchn_close close; |
523 | unsigned int irq = data->irq; | ||
530 | struct irq_info *info = info_for_irq(irq); | 524 | struct irq_info *info = info_for_irq(irq); |
531 | int evtchn = evtchn_from_irq(irq); | 525 | int evtchn = evtchn_from_irq(irq); |
532 | 526 | ||
@@ -546,20 +540,20 @@ static void shutdown_pirq(unsigned int irq) | |||
546 | info->evtchn = 0; | 540 | info->evtchn = 0; |
547 | } | 541 | } |
548 | 542 | ||
549 | static void enable_pirq(unsigned int irq) | 543 | static void enable_pirq(struct irq_data *data) |
550 | { | 544 | { |
551 | startup_pirq(irq); | 545 | startup_pirq(data); |
552 | } | 546 | } |
553 | 547 | ||
554 | static void disable_pirq(unsigned int irq) | 548 | static void disable_pirq(struct irq_data *data) |
555 | { | 549 | { |
556 | } | 550 | } |
557 | 551 | ||
558 | static void ack_pirq(unsigned int irq) | 552 | static void ack_pirq(struct irq_data *data) |
559 | { | 553 | { |
560 | int evtchn = evtchn_from_irq(irq); | 554 | int evtchn = evtchn_from_irq(data->irq); |
561 | 555 | ||
562 | move_native_irq(irq); | 556 | move_native_irq(data->irq); |
563 | 557 | ||
564 | if (VALID_EVTCHN(evtchn)) { | 558 | if (VALID_EVTCHN(evtchn)) { |
565 | mask_evtchn(evtchn); | 559 | mask_evtchn(evtchn); |
@@ -567,23 +561,6 @@ static void ack_pirq(unsigned int irq) | |||
567 | } | 561 | } |
568 | } | 562 | } |
569 | 563 | ||
570 | static void end_pirq(unsigned int irq) | ||
571 | { | ||
572 | int evtchn = evtchn_from_irq(irq); | ||
573 | struct irq_desc *desc = irq_to_desc(irq); | ||
574 | |||
575 | if (WARN_ON(!desc)) | ||
576 | return; | ||
577 | |||
578 | if ((desc->status & (IRQ_DISABLED|IRQ_PENDING)) == | ||
579 | (IRQ_DISABLED|IRQ_PENDING)) { | ||
580 | shutdown_pirq(irq); | ||
581 | } else if (VALID_EVTCHN(evtchn)) { | ||
582 | unmask_evtchn(evtchn); | ||
583 | pirq_unmask_notify(irq); | ||
584 | } | ||
585 | } | ||
586 | |||
587 | static int find_irq_by_gsi(unsigned gsi) | 564 | static int find_irq_by_gsi(unsigned gsi) |
588 | { | 565 | { |
589 | int irq; | 566 | int irq; |
@@ -638,14 +615,7 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name) | |||
638 | goto out; /* XXX need refcount? */ | 615 | goto out; /* XXX need refcount? */ |
639 | } | 616 | } |
640 | 617 | ||
641 | /* If we are a PV guest, we don't have GSIs (no ACPI passed). Therefore | 618 | irq = xen_allocate_irq_gsi(gsi); |
642 | * we are using the !xen_initial_domain() to drop in the function.*/ | ||
643 | if (identity_mapped_irq(gsi) || (!xen_initial_domain() && | ||
644 | xen_pv_domain())) { | ||
645 | irq = gsi; | ||
646 | irq_alloc_desc_at(irq, -1); | ||
647 | } else | ||
648 | irq = find_unbound_irq(); | ||
649 | 619 | ||
650 | set_irq_chip_and_handler_name(irq, &xen_pirq_chip, | 620 | set_irq_chip_and_handler_name(irq, &xen_pirq_chip, |
651 | handle_level_irq, name); | 621 | handle_level_irq, name); |
@@ -658,7 +628,7 @@ int xen_map_pirq_gsi(unsigned pirq, unsigned gsi, int shareable, char *name) | |||
658 | * this in the priv domain. */ | 628 | * this in the priv domain. */ |
659 | if (xen_initial_domain() && | 629 | if (xen_initial_domain() && |
660 | HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) { | 630 | HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) { |
661 | irq_free_desc(irq); | 631 | xen_free_irq(irq); |
662 | irq = -ENOSPC; | 632 | irq = -ENOSPC; |
663 | goto out; | 633 | goto out; |
664 | } | 634 | } |
@@ -677,12 +647,29 @@ out: | |||
677 | #include <linux/msi.h> | 647 | #include <linux/msi.h> |
678 | #include "../pci/msi.h" | 648 | #include "../pci/msi.h" |
679 | 649 | ||
650 | static int find_unbound_pirq(int type) | ||
651 | { | ||
652 | int rc, i; | ||
653 | struct physdev_get_free_pirq op_get_free_pirq; | ||
654 | op_get_free_pirq.type = type; | ||
655 | |||
656 | rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq); | ||
657 | if (!rc) | ||
658 | return op_get_free_pirq.pirq; | ||
659 | |||
660 | for (i = 0; i < nr_irqs; i++) { | ||
661 | if (pirq_to_irq[i] < 0) | ||
662 | return i; | ||
663 | } | ||
664 | return -1; | ||
665 | } | ||
666 | |||
680 | void xen_allocate_pirq_msi(char *name, int *irq, int *pirq, int alloc) | 667 | void xen_allocate_pirq_msi(char *name, int *irq, int *pirq, int alloc) |
681 | { | 668 | { |
682 | spin_lock(&irq_mapping_update_lock); | 669 | spin_lock(&irq_mapping_update_lock); |
683 | 670 | ||
684 | if (alloc & XEN_ALLOC_IRQ) { | 671 | if (alloc & XEN_ALLOC_IRQ) { |
685 | *irq = find_unbound_irq(); | 672 | *irq = xen_allocate_irq_dynamic(); |
686 | if (*irq == -1) | 673 | if (*irq == -1) |
687 | goto out; | 674 | goto out; |
688 | } | 675 | } |
@@ -732,7 +719,7 @@ int xen_create_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int type) | |||
732 | 719 | ||
733 | spin_lock(&irq_mapping_update_lock); | 720 | spin_lock(&irq_mapping_update_lock); |
734 | 721 | ||
735 | irq = find_unbound_irq(); | 722 | irq = xen_allocate_irq_dynamic(); |
736 | 723 | ||
737 | if (irq == -1) | 724 | if (irq == -1) |
738 | goto out; | 725 | goto out; |
@@ -741,7 +728,7 @@ int xen_create_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int type) | |||
741 | if (rc) { | 728 | if (rc) { |
742 | printk(KERN_WARNING "xen map irq failed %d\n", rc); | 729 | printk(KERN_WARNING "xen map irq failed %d\n", rc); |
743 | 730 | ||
744 | irq_free_desc(irq); | 731 | xen_free_irq(irq); |
745 | 732 | ||
746 | irq = -1; | 733 | irq = -1; |
747 | goto out; | 734 | goto out; |
@@ -779,11 +766,12 @@ int xen_destroy_irq(int irq) | |||
779 | printk(KERN_WARNING "unmap irq failed %d\n", rc); | 766 | printk(KERN_WARNING "unmap irq failed %d\n", rc); |
780 | goto out; | 767 | goto out; |
781 | } | 768 | } |
782 | pirq_to_irq[info->u.pirq.pirq] = -1; | ||
783 | } | 769 | } |
770 | pirq_to_irq[info->u.pirq.pirq] = -1; | ||
771 | |||
784 | irq_info[irq] = mk_unbound_info(); | 772 | irq_info[irq] = mk_unbound_info(); |
785 | 773 | ||
786 | irq_free_desc(irq); | 774 | xen_free_irq(irq); |
787 | 775 | ||
788 | out: | 776 | out: |
789 | spin_unlock(&irq_mapping_update_lock); | 777 | spin_unlock(&irq_mapping_update_lock); |
@@ -814,7 +802,7 @@ int bind_evtchn_to_irq(unsigned int evtchn) | |||
814 | irq = evtchn_to_irq[evtchn]; | 802 | irq = evtchn_to_irq[evtchn]; |
815 | 803 | ||
816 | if (irq == -1) { | 804 | if (irq == -1) { |
817 | irq = find_unbound_irq(); | 805 | irq = xen_allocate_irq_dynamic(); |
818 | 806 | ||
819 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, | 807 | set_irq_chip_and_handler_name(irq, &xen_dynamic_chip, |
820 | handle_fasteoi_irq, "event"); | 808 | handle_fasteoi_irq, "event"); |
@@ -839,7 +827,7 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) | |||
839 | irq = per_cpu(ipi_to_irq, cpu)[ipi]; | 827 | irq = per_cpu(ipi_to_irq, cpu)[ipi]; |
840 | 828 | ||
841 | if (irq == -1) { | 829 | if (irq == -1) { |
842 | irq = find_unbound_irq(); | 830 | irq = xen_allocate_irq_dynamic(); |
843 | if (irq < 0) | 831 | if (irq < 0) |
844 | goto out; | 832 | goto out; |
845 | 833 | ||
@@ -875,7 +863,7 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu) | |||
875 | irq = per_cpu(virq_to_irq, cpu)[virq]; | 863 | irq = per_cpu(virq_to_irq, cpu)[virq]; |
876 | 864 | ||
877 | if (irq == -1) { | 865 | if (irq == -1) { |
878 | irq = find_unbound_irq(); | 866 | irq = xen_allocate_irq_dynamic(); |
879 | 867 | ||
880 | set_irq_chip_and_handler_name(irq, &xen_percpu_chip, | 868 | set_irq_chip_and_handler_name(irq, &xen_percpu_chip, |
881 | handle_percpu_irq, "virq"); | 869 | handle_percpu_irq, "virq"); |
@@ -934,7 +922,7 @@ static void unbind_from_irq(unsigned int irq) | |||
934 | if (irq_info[irq].type != IRQT_UNBOUND) { | 922 | if (irq_info[irq].type != IRQT_UNBOUND) { |
935 | irq_info[irq] = mk_unbound_info(); | 923 | irq_info[irq] = mk_unbound_info(); |
936 | 924 | ||
937 | irq_free_desc(irq); | 925 | xen_free_irq(irq); |
938 | } | 926 | } |
939 | 927 | ||
940 | spin_unlock(&irq_mapping_update_lock); | 928 | spin_unlock(&irq_mapping_update_lock); |
@@ -990,7 +978,7 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi, | |||
990 | if (irq < 0) | 978 | if (irq < 0) |
991 | return irq; | 979 | return irq; |
992 | 980 | ||
993 | irqflags |= IRQF_NO_SUSPEND; | 981 | irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME; |
994 | retval = request_irq(irq, handler, irqflags, devname, dev_id); | 982 | retval = request_irq(irq, handler, irqflags, devname, dev_id); |
995 | if (retval != 0) { | 983 | if (retval != 0) { |
996 | unbind_from_irq(irq); | 984 | unbind_from_irq(irq); |
@@ -1234,11 +1222,12 @@ static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu) | |||
1234 | return 0; | 1222 | return 0; |
1235 | } | 1223 | } |
1236 | 1224 | ||
1237 | static int set_affinity_irq(unsigned irq, const struct cpumask *dest) | 1225 | static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest, |
1226 | bool force) | ||
1238 | { | 1227 | { |
1239 | unsigned tcpu = cpumask_first(dest); | 1228 | unsigned tcpu = cpumask_first(dest); |
1240 | 1229 | ||
1241 | return rebind_irq_to_cpu(irq, tcpu); | 1230 | return rebind_irq_to_cpu(data->irq, tcpu); |
1242 | } | 1231 | } |
1243 | 1232 | ||
1244 | int resend_irq_on_evtchn(unsigned int irq) | 1233 | int resend_irq_on_evtchn(unsigned int irq) |
@@ -1257,35 +1246,35 @@ int resend_irq_on_evtchn(unsigned int irq) | |||
1257 | return 1; | 1246 | return 1; |
1258 | } | 1247 | } |
1259 | 1248 | ||
1260 | static void enable_dynirq(unsigned int irq) | 1249 | static void enable_dynirq(struct irq_data *data) |
1261 | { | 1250 | { |
1262 | int evtchn = evtchn_from_irq(irq); | 1251 | int evtchn = evtchn_from_irq(data->irq); |
1263 | 1252 | ||
1264 | if (VALID_EVTCHN(evtchn)) | 1253 | if (VALID_EVTCHN(evtchn)) |
1265 | unmask_evtchn(evtchn); | 1254 | unmask_evtchn(evtchn); |
1266 | } | 1255 | } |
1267 | 1256 | ||
1268 | static void disable_dynirq(unsigned int irq) | 1257 | static void disable_dynirq(struct irq_data *data) |
1269 | { | 1258 | { |
1270 | int evtchn = evtchn_from_irq(irq); | 1259 | int evtchn = evtchn_from_irq(data->irq); |
1271 | 1260 | ||
1272 | if (VALID_EVTCHN(evtchn)) | 1261 | if (VALID_EVTCHN(evtchn)) |
1273 | mask_evtchn(evtchn); | 1262 | mask_evtchn(evtchn); |
1274 | } | 1263 | } |
1275 | 1264 | ||
1276 | static void ack_dynirq(unsigned int irq) | 1265 | static void ack_dynirq(struct irq_data *data) |
1277 | { | 1266 | { |
1278 | int evtchn = evtchn_from_irq(irq); | 1267 | int evtchn = evtchn_from_irq(data->irq); |
1279 | 1268 | ||
1280 | move_masked_irq(irq); | 1269 | move_masked_irq(data->irq); |
1281 | 1270 | ||
1282 | if (VALID_EVTCHN(evtchn)) | 1271 | if (VALID_EVTCHN(evtchn)) |
1283 | unmask_evtchn(evtchn); | 1272 | unmask_evtchn(evtchn); |
1284 | } | 1273 | } |
1285 | 1274 | ||
1286 | static int retrigger_dynirq(unsigned int irq) | 1275 | static int retrigger_dynirq(struct irq_data *data) |
1287 | { | 1276 | { |
1288 | int evtchn = evtchn_from_irq(irq); | 1277 | int evtchn = evtchn_from_irq(data->irq); |
1289 | struct shared_info *sh = HYPERVISOR_shared_info; | 1278 | struct shared_info *sh = HYPERVISOR_shared_info; |
1290 | int ret = 0; | 1279 | int ret = 0; |
1291 | 1280 | ||
@@ -1334,7 +1323,7 @@ static void restore_cpu_pirqs(void) | |||
1334 | 1323 | ||
1335 | printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); | 1324 | printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq); |
1336 | 1325 | ||
1337 | startup_pirq(irq); | 1326 | __startup_pirq(irq); |
1338 | } | 1327 | } |
1339 | } | 1328 | } |
1340 | 1329 | ||
@@ -1445,7 +1434,6 @@ void xen_poll_irq(int irq) | |||
1445 | void xen_irq_resume(void) | 1434 | void xen_irq_resume(void) |
1446 | { | 1435 | { |
1447 | unsigned int cpu, irq, evtchn; | 1436 | unsigned int cpu, irq, evtchn; |
1448 | struct irq_desc *desc; | ||
1449 | 1437 | ||
1450 | init_evtchn_cpu_bindings(); | 1438 | init_evtchn_cpu_bindings(); |
1451 | 1439 | ||
@@ -1465,66 +1453,48 @@ void xen_irq_resume(void) | |||
1465 | restore_cpu_ipis(cpu); | 1453 | restore_cpu_ipis(cpu); |
1466 | } | 1454 | } |
1467 | 1455 | ||
1468 | /* | ||
1469 | * Unmask any IRQF_NO_SUSPEND IRQs which are enabled. These | ||
1470 | * are not handled by the IRQ core. | ||
1471 | */ | ||
1472 | for_each_irq_desc(irq, desc) { | ||
1473 | if (!desc->action || !(desc->action->flags & IRQF_NO_SUSPEND)) | ||
1474 | continue; | ||
1475 | if (desc->status & IRQ_DISABLED) | ||
1476 | continue; | ||
1477 | |||
1478 | evtchn = evtchn_from_irq(irq); | ||
1479 | if (evtchn == -1) | ||
1480 | continue; | ||
1481 | |||
1482 | unmask_evtchn(evtchn); | ||
1483 | } | ||
1484 | |||
1485 | restore_cpu_pirqs(); | 1456 | restore_cpu_pirqs(); |
1486 | } | 1457 | } |
1487 | 1458 | ||
1488 | static struct irq_chip xen_dynamic_chip __read_mostly = { | 1459 | static struct irq_chip xen_dynamic_chip __read_mostly = { |
1489 | .name = "xen-dyn", | 1460 | .name = "xen-dyn", |
1490 | 1461 | ||
1491 | .disable = disable_dynirq, | 1462 | .irq_disable = disable_dynirq, |
1492 | .mask = disable_dynirq, | 1463 | .irq_mask = disable_dynirq, |
1493 | .unmask = enable_dynirq, | 1464 | .irq_unmask = enable_dynirq, |
1494 | 1465 | ||
1495 | .eoi = ack_dynirq, | 1466 | .irq_eoi = ack_dynirq, |
1496 | .set_affinity = set_affinity_irq, | 1467 | .irq_set_affinity = set_affinity_irq, |
1497 | .retrigger = retrigger_dynirq, | 1468 | .irq_retrigger = retrigger_dynirq, |
1498 | }; | 1469 | }; |
1499 | 1470 | ||
1500 | static struct irq_chip xen_pirq_chip __read_mostly = { | 1471 | static struct irq_chip xen_pirq_chip __read_mostly = { |
1501 | .name = "xen-pirq", | 1472 | .name = "xen-pirq", |
1502 | 1473 | ||
1503 | .startup = startup_pirq, | 1474 | .irq_startup = startup_pirq, |
1504 | .shutdown = shutdown_pirq, | 1475 | .irq_shutdown = shutdown_pirq, |
1505 | 1476 | ||
1506 | .enable = enable_pirq, | 1477 | .irq_enable = enable_pirq, |
1507 | .unmask = enable_pirq, | 1478 | .irq_unmask = enable_pirq, |
1508 | 1479 | ||
1509 | .disable = disable_pirq, | 1480 | .irq_disable = disable_pirq, |
1510 | .mask = disable_pirq, | 1481 | .irq_mask = disable_pirq, |
1511 | 1482 | ||
1512 | .ack = ack_pirq, | 1483 | .irq_ack = ack_pirq, |
1513 | .end = end_pirq, | ||
1514 | 1484 | ||
1515 | .set_affinity = set_affinity_irq, | 1485 | .irq_set_affinity = set_affinity_irq, |
1516 | 1486 | ||
1517 | .retrigger = retrigger_dynirq, | 1487 | .irq_retrigger = retrigger_dynirq, |
1518 | }; | 1488 | }; |
1519 | 1489 | ||
1520 | static struct irq_chip xen_percpu_chip __read_mostly = { | 1490 | static struct irq_chip xen_percpu_chip __read_mostly = { |
1521 | .name = "xen-percpu", | 1491 | .name = "xen-percpu", |
1522 | 1492 | ||
1523 | .disable = disable_dynirq, | 1493 | .irq_disable = disable_dynirq, |
1524 | .mask = disable_dynirq, | 1494 | .irq_mask = disable_dynirq, |
1525 | .unmask = enable_dynirq, | 1495 | .irq_unmask = enable_dynirq, |
1526 | 1496 | ||
1527 | .ack = ack_dynirq, | 1497 | .irq_ack = ack_dynirq, |
1528 | }; | 1498 | }; |
1529 | 1499 | ||
1530 | int xen_set_callback_via(uint64_t via) | 1500 | int xen_set_callback_via(uint64_t via) |
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c index db8c4c4ac880..24177272bcb8 100644 --- a/drivers/xen/manage.c +++ b/drivers/xen/manage.c | |||
@@ -37,11 +37,19 @@ static enum shutdown_state shutting_down = SHUTDOWN_INVALID; | |||
37 | #ifdef CONFIG_PM_SLEEP | 37 | #ifdef CONFIG_PM_SLEEP |
38 | static int xen_hvm_suspend(void *data) | 38 | static int xen_hvm_suspend(void *data) |
39 | { | 39 | { |
40 | int err; | ||
40 | struct sched_shutdown r = { .reason = SHUTDOWN_suspend }; | 41 | struct sched_shutdown r = { .reason = SHUTDOWN_suspend }; |
41 | int *cancelled = data; | 42 | int *cancelled = data; |
42 | 43 | ||
43 | BUG_ON(!irqs_disabled()); | 44 | BUG_ON(!irqs_disabled()); |
44 | 45 | ||
46 | err = sysdev_suspend(PMSG_SUSPEND); | ||
47 | if (err) { | ||
48 | printk(KERN_ERR "xen_hvm_suspend: sysdev_suspend failed: %d\n", | ||
49 | err); | ||
50 | return err; | ||
51 | } | ||
52 | |||
45 | *cancelled = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); | 53 | *cancelled = HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); |
46 | 54 | ||
47 | xen_hvm_post_suspend(*cancelled); | 55 | xen_hvm_post_suspend(*cancelled); |
@@ -53,6 +61,8 @@ static int xen_hvm_suspend(void *data) | |||
53 | xen_timer_resume(); | 61 | xen_timer_resume(); |
54 | } | 62 | } |
55 | 63 | ||
64 | sysdev_resume(); | ||
65 | |||
56 | return 0; | 66 | return 0; |
57 | } | 67 | } |
58 | 68 | ||
diff --git a/fs/block_dev.c b/fs/block_dev.c index 333a7bb4cb9c..4fb8a3431531 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -1215,12 +1215,6 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) | |||
1215 | 1215 | ||
1216 | res = __blkdev_get(bdev, mode, 0); | 1216 | res = __blkdev_get(bdev, mode, 0); |
1217 | 1217 | ||
1218 | /* __blkdev_get() may alter read only status, check it afterwards */ | ||
1219 | if (!res && (mode & FMODE_WRITE) && bdev_read_only(bdev)) { | ||
1220 | __blkdev_put(bdev, mode, 0); | ||
1221 | res = -EACCES; | ||
1222 | } | ||
1223 | |||
1224 | if (whole) { | 1218 | if (whole) { |
1225 | /* finish claiming */ | 1219 | /* finish claiming */ |
1226 | mutex_lock(&bdev->bd_mutex); | 1220 | mutex_lock(&bdev->bd_mutex); |
@@ -1298,6 +1292,11 @@ struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, | |||
1298 | if (err) | 1292 | if (err) |
1299 | return ERR_PTR(err); | 1293 | return ERR_PTR(err); |
1300 | 1294 | ||
1295 | if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) { | ||
1296 | blkdev_put(bdev, mode); | ||
1297 | return ERR_PTR(-EACCES); | ||
1298 | } | ||
1299 | |||
1301 | return bdev; | 1300 | return bdev; |
1302 | } | 1301 | } |
1303 | EXPORT_SYMBOL(blkdev_get_by_path); | 1302 | EXPORT_SYMBOL(blkdev_get_by_path); |
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 0bc68de8edd7..f0aef787a102 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c | |||
@@ -60,6 +60,7 @@ int ceph_init_dentry(struct dentry *dentry) | |||
60 | } | 60 | } |
61 | di->dentry = dentry; | 61 | di->dentry = dentry; |
62 | di->lease_session = NULL; | 62 | di->lease_session = NULL; |
63 | di->parent_inode = igrab(dentry->d_parent->d_inode); | ||
63 | dentry->d_fsdata = di; | 64 | dentry->d_fsdata = di; |
64 | dentry->d_time = jiffies; | 65 | dentry->d_time = jiffies; |
65 | ceph_dentry_lru_add(dentry); | 66 | ceph_dentry_lru_add(dentry); |
@@ -1033,7 +1034,7 @@ static void ceph_dentry_release(struct dentry *dentry) | |||
1033 | u64 snapid = CEPH_NOSNAP; | 1034 | u64 snapid = CEPH_NOSNAP; |
1034 | 1035 | ||
1035 | if (!IS_ROOT(dentry)) { | 1036 | if (!IS_ROOT(dentry)) { |
1036 | parent_inode = dentry->d_parent->d_inode; | 1037 | parent_inode = di->parent_inode; |
1037 | if (parent_inode) | 1038 | if (parent_inode) |
1038 | snapid = ceph_snap(parent_inode); | 1039 | snapid = ceph_snap(parent_inode); |
1039 | } | 1040 | } |
@@ -1058,6 +1059,8 @@ static void ceph_dentry_release(struct dentry *dentry) | |||
1058 | kmem_cache_free(ceph_dentry_cachep, di); | 1059 | kmem_cache_free(ceph_dentry_cachep, di); |
1059 | dentry->d_fsdata = NULL; | 1060 | dentry->d_fsdata = NULL; |
1060 | } | 1061 | } |
1062 | if (parent_inode) | ||
1063 | iput(parent_inode); | ||
1061 | } | 1064 | } |
1062 | 1065 | ||
1063 | static int ceph_snapdir_d_revalidate(struct dentry *dentry, | 1066 | static int ceph_snapdir_d_revalidate(struct dentry *dentry, |
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c index 39c243acd062..f40b9139e437 100644 --- a/fs/ceph/snap.c +++ b/fs/ceph/snap.c | |||
@@ -584,10 +584,14 @@ static void queue_realm_cap_snaps(struct ceph_snap_realm *realm) | |||
584 | if (lastinode) | 584 | if (lastinode) |
585 | iput(lastinode); | 585 | iput(lastinode); |
586 | 586 | ||
587 | dout("queue_realm_cap_snaps %p %llx children\n", realm, realm->ino); | 587 | list_for_each_entry(child, &realm->children, child_item) { |
588 | list_for_each_entry(child, &realm->children, child_item) | 588 | dout("queue_realm_cap_snaps %p %llx queue child %p %llx\n", |
589 | queue_realm_cap_snaps(child); | 589 | realm, realm->ino, child, child->ino); |
590 | list_del_init(&child->dirty_item); | ||
591 | list_add(&child->dirty_item, &realm->dirty_item); | ||
592 | } | ||
590 | 593 | ||
594 | list_del_init(&realm->dirty_item); | ||
591 | dout("queue_realm_cap_snaps %p %llx done\n", realm, realm->ino); | 595 | dout("queue_realm_cap_snaps %p %llx done\n", realm, realm->ino); |
592 | } | 596 | } |
593 | 597 | ||
@@ -683,7 +687,9 @@ more: | |||
683 | * queue cap snaps _after_ we've built the new snap contexts, | 687 | * queue cap snaps _after_ we've built the new snap contexts, |
684 | * so that i_head_snapc can be set appropriately. | 688 | * so that i_head_snapc can be set appropriately. |
685 | */ | 689 | */ |
686 | list_for_each_entry(realm, &dirty_realms, dirty_item) { | 690 | while (!list_empty(&dirty_realms)) { |
691 | realm = list_first_entry(&dirty_realms, struct ceph_snap_realm, | ||
692 | dirty_item); | ||
687 | queue_realm_cap_snaps(realm); | 693 | queue_realm_cap_snaps(realm); |
688 | } | 694 | } |
689 | 695 | ||
diff --git a/fs/ceph/super.h b/fs/ceph/super.h index 20b907d76ae2..88fcaa21b801 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h | |||
@@ -207,6 +207,7 @@ struct ceph_dentry_info { | |||
207 | struct dentry *dentry; | 207 | struct dentry *dentry; |
208 | u64 time; | 208 | u64 time; |
209 | u64 offset; | 209 | u64 offset; |
210 | struct inode *parent_inode; | ||
210 | }; | 211 | }; |
211 | 212 | ||
212 | struct ceph_inode_xattrs_info { | 213 | struct ceph_inode_xattrs_info { |
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h index 4a3330235d55..a9371b6578c0 100644 --- a/fs/cifs/cifsfs.h +++ b/fs/cifs/cifsfs.h | |||
@@ -127,5 +127,5 @@ extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg); | |||
127 | extern const struct export_operations cifs_export_ops; | 127 | extern const struct export_operations cifs_export_ops; |
128 | #endif /* EXPERIMENTAL */ | 128 | #endif /* EXPERIMENTAL */ |
129 | 129 | ||
130 | #define CIFS_VERSION "1.70" | 130 | #define CIFS_VERSION "1.71" |
131 | #endif /* _CIFSFS_H */ | 131 | #endif /* _CIFSFS_H */ |
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c index 8d9189f64477..79f641eeda30 100644 --- a/fs/cifs/netmisc.c +++ b/fs/cifs/netmisc.c | |||
@@ -170,7 +170,7 @@ cifs_convert_address(struct sockaddr *dst, const char *src, int len) | |||
170 | { | 170 | { |
171 | int rc, alen, slen; | 171 | int rc, alen, slen; |
172 | const char *pct; | 172 | const char *pct; |
173 | char *endp, scope_id[13]; | 173 | char scope_id[13]; |
174 | struct sockaddr_in *s4 = (struct sockaddr_in *) dst; | 174 | struct sockaddr_in *s4 = (struct sockaddr_in *) dst; |
175 | struct sockaddr_in6 *s6 = (struct sockaddr_in6 *) dst; | 175 | struct sockaddr_in6 *s6 = (struct sockaddr_in6 *) dst; |
176 | 176 | ||
@@ -197,9 +197,9 @@ cifs_convert_address(struct sockaddr *dst, const char *src, int len) | |||
197 | memcpy(scope_id, pct + 1, slen); | 197 | memcpy(scope_id, pct + 1, slen); |
198 | scope_id[slen] = '\0'; | 198 | scope_id[slen] = '\0'; |
199 | 199 | ||
200 | s6->sin6_scope_id = (u32) simple_strtoul(pct, &endp, 0); | 200 | rc = strict_strtoul(scope_id, 0, |
201 | if (endp != scope_id + slen) | 201 | (unsigned long *)&s6->sin6_scope_id); |
202 | return 0; | 202 | rc = (rc == 0) ? 1 : 0; |
203 | } | 203 | } |
204 | 204 | ||
205 | return rc; | 205 | return rc; |
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 1adc9625a344..16765703131b 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c | |||
@@ -656,13 +656,13 @@ ssetup_ntlmssp_authenticate: | |||
656 | 656 | ||
657 | if (type == LANMAN) { | 657 | if (type == LANMAN) { |
658 | #ifdef CONFIG_CIFS_WEAK_PW_HASH | 658 | #ifdef CONFIG_CIFS_WEAK_PW_HASH |
659 | char lnm_session_key[CIFS_SESS_KEY_SIZE]; | 659 | char lnm_session_key[CIFS_AUTH_RESP_SIZE]; |
660 | 660 | ||
661 | pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE; | 661 | pSMB->req.hdr.Flags2 &= ~SMBFLG2_UNICODE; |
662 | 662 | ||
663 | /* no capabilities flags in old lanman negotiation */ | 663 | /* no capabilities flags in old lanman negotiation */ |
664 | 664 | ||
665 | pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_SESS_KEY_SIZE); | 665 | pSMB->old_req.PasswordLength = cpu_to_le16(CIFS_AUTH_RESP_SIZE); |
666 | 666 | ||
667 | /* Calculate hash with password and copy into bcc_ptr. | 667 | /* Calculate hash with password and copy into bcc_ptr. |
668 | * Encryption Key (stored as in cryptkey) gets used if the | 668 | * Encryption Key (stored as in cryptkey) gets used if the |
@@ -675,8 +675,8 @@ ssetup_ntlmssp_authenticate: | |||
675 | true : false, lnm_session_key); | 675 | true : false, lnm_session_key); |
676 | 676 | ||
677 | ses->flags |= CIFS_SES_LANMAN; | 677 | ses->flags |= CIFS_SES_LANMAN; |
678 | memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_SESS_KEY_SIZE); | 678 | memcpy(bcc_ptr, (char *)lnm_session_key, CIFS_AUTH_RESP_SIZE); |
679 | bcc_ptr += CIFS_SESS_KEY_SIZE; | 679 | bcc_ptr += CIFS_AUTH_RESP_SIZE; |
680 | 680 | ||
681 | /* can not sign if LANMAN negotiated so no need | 681 | /* can not sign if LANMAN negotiated so no need |
682 | to calculate signing key? but what if server | 682 | to calculate signing key? but what if server |
diff --git a/fs/ecryptfs/dentry.c b/fs/ecryptfs/dentry.c index 6fc4f319b550..534c1d46e69e 100644 --- a/fs/ecryptfs/dentry.c +++ b/fs/ecryptfs/dentry.c | |||
@@ -46,24 +46,28 @@ static int ecryptfs_d_revalidate(struct dentry *dentry, struct nameidata *nd) | |||
46 | { | 46 | { |
47 | struct dentry *lower_dentry; | 47 | struct dentry *lower_dentry; |
48 | struct vfsmount *lower_mnt; | 48 | struct vfsmount *lower_mnt; |
49 | struct dentry *dentry_save; | 49 | struct dentry *dentry_save = NULL; |
50 | struct vfsmount *vfsmount_save; | 50 | struct vfsmount *vfsmount_save = NULL; |
51 | int rc = 1; | 51 | int rc = 1; |
52 | 52 | ||
53 | if (nd->flags & LOOKUP_RCU) | 53 | if (nd && nd->flags & LOOKUP_RCU) |
54 | return -ECHILD; | 54 | return -ECHILD; |
55 | 55 | ||
56 | lower_dentry = ecryptfs_dentry_to_lower(dentry); | 56 | lower_dentry = ecryptfs_dentry_to_lower(dentry); |
57 | lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry); | 57 | lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry); |
58 | if (!lower_dentry->d_op || !lower_dentry->d_op->d_revalidate) | 58 | if (!lower_dentry->d_op || !lower_dentry->d_op->d_revalidate) |
59 | goto out; | 59 | goto out; |
60 | dentry_save = nd->path.dentry; | 60 | if (nd) { |
61 | vfsmount_save = nd->path.mnt; | 61 | dentry_save = nd->path.dentry; |
62 | nd->path.dentry = lower_dentry; | 62 | vfsmount_save = nd->path.mnt; |
63 | nd->path.mnt = lower_mnt; | 63 | nd->path.dentry = lower_dentry; |
64 | nd->path.mnt = lower_mnt; | ||
65 | } | ||
64 | rc = lower_dentry->d_op->d_revalidate(lower_dentry, nd); | 66 | rc = lower_dentry->d_op->d_revalidate(lower_dentry, nd); |
65 | nd->path.dentry = dentry_save; | 67 | if (nd) { |
66 | nd->path.mnt = vfsmount_save; | 68 | nd->path.dentry = dentry_save; |
69 | nd->path.mnt = vfsmount_save; | ||
70 | } | ||
67 | if (dentry->d_inode) { | 71 | if (dentry->d_inode) { |
68 | struct inode *lower_inode = | 72 | struct inode *lower_inode = |
69 | ecryptfs_inode_to_lower(dentry->d_inode); | 73 | ecryptfs_inode_to_lower(dentry->d_inode); |
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h index dbc84ed96336..e00753496e3e 100644 --- a/fs/ecryptfs/ecryptfs_kernel.h +++ b/fs/ecryptfs/ecryptfs_kernel.h | |||
@@ -632,8 +632,7 @@ int ecryptfs_interpose(struct dentry *hidden_dentry, | |||
632 | u32 flags); | 632 | u32 flags); |
633 | int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, | 633 | int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, |
634 | struct dentry *lower_dentry, | 634 | struct dentry *lower_dentry, |
635 | struct inode *ecryptfs_dir_inode, | 635 | struct inode *ecryptfs_dir_inode); |
636 | struct nameidata *ecryptfs_nd); | ||
637 | int ecryptfs_decode_and_decrypt_filename(char **decrypted_name, | 636 | int ecryptfs_decode_and_decrypt_filename(char **decrypted_name, |
638 | size_t *decrypted_name_size, | 637 | size_t *decrypted_name_size, |
639 | struct dentry *ecryptfs_dentry, | 638 | struct dentry *ecryptfs_dentry, |
diff --git a/fs/ecryptfs/file.c b/fs/ecryptfs/file.c index 81e10e6a9443..7d1050e254f9 100644 --- a/fs/ecryptfs/file.c +++ b/fs/ecryptfs/file.c | |||
@@ -317,6 +317,7 @@ ecryptfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
317 | 317 | ||
318 | const struct file_operations ecryptfs_dir_fops = { | 318 | const struct file_operations ecryptfs_dir_fops = { |
319 | .readdir = ecryptfs_readdir, | 319 | .readdir = ecryptfs_readdir, |
320 | .read = generic_read_dir, | ||
320 | .unlocked_ioctl = ecryptfs_unlocked_ioctl, | 321 | .unlocked_ioctl = ecryptfs_unlocked_ioctl, |
321 | #ifdef CONFIG_COMPAT | 322 | #ifdef CONFIG_COMPAT |
322 | .compat_ioctl = ecryptfs_compat_ioctl, | 323 | .compat_ioctl = ecryptfs_compat_ioctl, |
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index bd33f87a1907..b592938a84bc 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c | |||
@@ -74,16 +74,20 @@ ecryptfs_create_underlying_file(struct inode *lower_dir_inode, | |||
74 | unsigned int flags_save; | 74 | unsigned int flags_save; |
75 | int rc; | 75 | int rc; |
76 | 76 | ||
77 | dentry_save = nd->path.dentry; | 77 | if (nd) { |
78 | vfsmount_save = nd->path.mnt; | 78 | dentry_save = nd->path.dentry; |
79 | flags_save = nd->flags; | 79 | vfsmount_save = nd->path.mnt; |
80 | nd->path.dentry = lower_dentry; | 80 | flags_save = nd->flags; |
81 | nd->path.mnt = lower_mnt; | 81 | nd->path.dentry = lower_dentry; |
82 | nd->flags &= ~LOOKUP_OPEN; | 82 | nd->path.mnt = lower_mnt; |
83 | nd->flags &= ~LOOKUP_OPEN; | ||
84 | } | ||
83 | rc = vfs_create(lower_dir_inode, lower_dentry, mode, nd); | 85 | rc = vfs_create(lower_dir_inode, lower_dentry, mode, nd); |
84 | nd->path.dentry = dentry_save; | 86 | if (nd) { |
85 | nd->path.mnt = vfsmount_save; | 87 | nd->path.dentry = dentry_save; |
86 | nd->flags = flags_save; | 88 | nd->path.mnt = vfsmount_save; |
89 | nd->flags = flags_save; | ||
90 | } | ||
87 | return rc; | 91 | return rc; |
88 | } | 92 | } |
89 | 93 | ||
@@ -241,8 +245,7 @@ out: | |||
241 | */ | 245 | */ |
242 | int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, | 246 | int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, |
243 | struct dentry *lower_dentry, | 247 | struct dentry *lower_dentry, |
244 | struct inode *ecryptfs_dir_inode, | 248 | struct inode *ecryptfs_dir_inode) |
245 | struct nameidata *ecryptfs_nd) | ||
246 | { | 249 | { |
247 | struct dentry *lower_dir_dentry; | 250 | struct dentry *lower_dir_dentry; |
248 | struct vfsmount *lower_mnt; | 251 | struct vfsmount *lower_mnt; |
@@ -290,8 +293,6 @@ int ecryptfs_lookup_and_interpose_lower(struct dentry *ecryptfs_dentry, | |||
290 | goto out; | 293 | goto out; |
291 | if (special_file(lower_inode->i_mode)) | 294 | if (special_file(lower_inode->i_mode)) |
292 | goto out; | 295 | goto out; |
293 | if (!ecryptfs_nd) | ||
294 | goto out; | ||
295 | /* Released in this function */ | 296 | /* Released in this function */ |
296 | page_virt = kmem_cache_zalloc(ecryptfs_header_cache_2, GFP_USER); | 297 | page_virt = kmem_cache_zalloc(ecryptfs_header_cache_2, GFP_USER); |
297 | if (!page_virt) { | 298 | if (!page_virt) { |
@@ -349,75 +350,6 @@ out: | |||
349 | } | 350 | } |
350 | 351 | ||
351 | /** | 352 | /** |
352 | * ecryptfs_new_lower_dentry | ||
353 | * @name: The name of the new dentry. | ||
354 | * @lower_dir_dentry: Parent directory of the new dentry. | ||
355 | * @nd: nameidata from last lookup. | ||
356 | * | ||
357 | * Create a new dentry or get it from lower parent dir. | ||
358 | */ | ||
359 | static struct dentry * | ||
360 | ecryptfs_new_lower_dentry(struct qstr *name, struct dentry *lower_dir_dentry, | ||
361 | struct nameidata *nd) | ||
362 | { | ||
363 | struct dentry *new_dentry; | ||
364 | struct dentry *tmp; | ||
365 | struct inode *lower_dir_inode; | ||
366 | |||
367 | lower_dir_inode = lower_dir_dentry->d_inode; | ||
368 | |||
369 | tmp = d_alloc(lower_dir_dentry, name); | ||
370 | if (!tmp) | ||
371 | return ERR_PTR(-ENOMEM); | ||
372 | |||
373 | mutex_lock(&lower_dir_inode->i_mutex); | ||
374 | new_dentry = lower_dir_inode->i_op->lookup(lower_dir_inode, tmp, nd); | ||
375 | mutex_unlock(&lower_dir_inode->i_mutex); | ||
376 | |||
377 | if (!new_dentry) | ||
378 | new_dentry = tmp; | ||
379 | else | ||
380 | dput(tmp); | ||
381 | |||
382 | return new_dentry; | ||
383 | } | ||
384 | |||
385 | |||
386 | /** | ||
387 | * ecryptfs_lookup_one_lower | ||
388 | * @ecryptfs_dentry: The eCryptfs dentry that we are looking up | ||
389 | * @lower_dir_dentry: lower parent directory | ||
390 | * @name: lower file name | ||
391 | * | ||
392 | * Get the lower dentry from vfs. If lower dentry does not exist yet, | ||
393 | * create it. | ||
394 | */ | ||
395 | static struct dentry * | ||
396 | ecryptfs_lookup_one_lower(struct dentry *ecryptfs_dentry, | ||
397 | struct dentry *lower_dir_dentry, struct qstr *name) | ||
398 | { | ||
399 | struct nameidata nd; | ||
400 | struct vfsmount *lower_mnt; | ||
401 | int err; | ||
402 | |||
403 | lower_mnt = mntget(ecryptfs_dentry_to_lower_mnt( | ||
404 | ecryptfs_dentry->d_parent)); | ||
405 | err = vfs_path_lookup(lower_dir_dentry, lower_mnt, name->name , 0, &nd); | ||
406 | mntput(lower_mnt); | ||
407 | |||
408 | if (!err) { | ||
409 | /* we dont need the mount */ | ||
410 | mntput(nd.path.mnt); | ||
411 | return nd.path.dentry; | ||
412 | } | ||
413 | if (err != -ENOENT) | ||
414 | return ERR_PTR(err); | ||
415 | |||
416 | /* create a new lower dentry */ | ||
417 | return ecryptfs_new_lower_dentry(name, lower_dir_dentry, &nd); | ||
418 | } | ||
419 | |||
420 | /** | ||
421 | * ecryptfs_lookup | 353 | * ecryptfs_lookup |
422 | * @ecryptfs_dir_inode: The eCryptfs directory inode | 354 | * @ecryptfs_dir_inode: The eCryptfs directory inode |
423 | * @ecryptfs_dentry: The eCryptfs dentry that we are looking up | 355 | * @ecryptfs_dentry: The eCryptfs dentry that we are looking up |
@@ -434,7 +366,6 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, | |||
434 | size_t encrypted_and_encoded_name_size; | 366 | size_t encrypted_and_encoded_name_size; |
435 | struct ecryptfs_mount_crypt_stat *mount_crypt_stat = NULL; | 367 | struct ecryptfs_mount_crypt_stat *mount_crypt_stat = NULL; |
436 | struct dentry *lower_dir_dentry, *lower_dentry; | 368 | struct dentry *lower_dir_dentry, *lower_dentry; |
437 | struct qstr lower_name; | ||
438 | int rc = 0; | 369 | int rc = 0; |
439 | 370 | ||
440 | if ((ecryptfs_dentry->d_name.len == 1 | 371 | if ((ecryptfs_dentry->d_name.len == 1 |
@@ -444,20 +375,14 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, | |||
444 | goto out_d_drop; | 375 | goto out_d_drop; |
445 | } | 376 | } |
446 | lower_dir_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry->d_parent); | 377 | lower_dir_dentry = ecryptfs_dentry_to_lower(ecryptfs_dentry->d_parent); |
447 | lower_name.name = ecryptfs_dentry->d_name.name; | 378 | mutex_lock(&lower_dir_dentry->d_inode->i_mutex); |
448 | lower_name.len = ecryptfs_dentry->d_name.len; | 379 | lower_dentry = lookup_one_len(ecryptfs_dentry->d_name.name, |
449 | lower_name.hash = ecryptfs_dentry->d_name.hash; | 380 | lower_dir_dentry, |
450 | if (lower_dir_dentry->d_op && lower_dir_dentry->d_op->d_hash) { | 381 | ecryptfs_dentry->d_name.len); |
451 | rc = lower_dir_dentry->d_op->d_hash(lower_dir_dentry, | 382 | mutex_unlock(&lower_dir_dentry->d_inode->i_mutex); |
452 | lower_dir_dentry->d_inode, &lower_name); | ||
453 | if (rc < 0) | ||
454 | goto out_d_drop; | ||
455 | } | ||
456 | lower_dentry = ecryptfs_lookup_one_lower(ecryptfs_dentry, | ||
457 | lower_dir_dentry, &lower_name); | ||
458 | if (IS_ERR(lower_dentry)) { | 383 | if (IS_ERR(lower_dentry)) { |
459 | rc = PTR_ERR(lower_dentry); | 384 | rc = PTR_ERR(lower_dentry); |
460 | ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_lower() returned " | 385 | ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned " |
461 | "[%d] on lower_dentry = [%s]\n", __func__, rc, | 386 | "[%d] on lower_dentry = [%s]\n", __func__, rc, |
462 | encrypted_and_encoded_name); | 387 | encrypted_and_encoded_name); |
463 | goto out_d_drop; | 388 | goto out_d_drop; |
@@ -479,28 +404,21 @@ static struct dentry *ecryptfs_lookup(struct inode *ecryptfs_dir_inode, | |||
479 | "filename; rc = [%d]\n", __func__, rc); | 404 | "filename; rc = [%d]\n", __func__, rc); |
480 | goto out_d_drop; | 405 | goto out_d_drop; |
481 | } | 406 | } |
482 | lower_name.name = encrypted_and_encoded_name; | 407 | mutex_lock(&lower_dir_dentry->d_inode->i_mutex); |
483 | lower_name.len = encrypted_and_encoded_name_size; | 408 | lower_dentry = lookup_one_len(encrypted_and_encoded_name, |
484 | lower_name.hash = full_name_hash(lower_name.name, lower_name.len); | 409 | lower_dir_dentry, |
485 | if (lower_dir_dentry->d_op && lower_dir_dentry->d_op->d_hash) { | 410 | encrypted_and_encoded_name_size); |
486 | rc = lower_dir_dentry->d_op->d_hash(lower_dir_dentry, | 411 | mutex_unlock(&lower_dir_dentry->d_inode->i_mutex); |
487 | lower_dir_dentry->d_inode, &lower_name); | ||
488 | if (rc < 0) | ||
489 | goto out_d_drop; | ||
490 | } | ||
491 | lower_dentry = ecryptfs_lookup_one_lower(ecryptfs_dentry, | ||
492 | lower_dir_dentry, &lower_name); | ||
493 | if (IS_ERR(lower_dentry)) { | 412 | if (IS_ERR(lower_dentry)) { |
494 | rc = PTR_ERR(lower_dentry); | 413 | rc = PTR_ERR(lower_dentry); |
495 | ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_lower() returned " | 414 | ecryptfs_printk(KERN_DEBUG, "%s: lookup_one_len() returned " |
496 | "[%d] on lower_dentry = [%s]\n", __func__, rc, | 415 | "[%d] on lower_dentry = [%s]\n", __func__, rc, |
497 | encrypted_and_encoded_name); | 416 | encrypted_and_encoded_name); |
498 | goto out_d_drop; | 417 | goto out_d_drop; |
499 | } | 418 | } |
500 | lookup_and_interpose: | 419 | lookup_and_interpose: |
501 | rc = ecryptfs_lookup_and_interpose_lower(ecryptfs_dentry, lower_dentry, | 420 | rc = ecryptfs_lookup_and_interpose_lower(ecryptfs_dentry, lower_dentry, |
502 | ecryptfs_dir_inode, | 421 | ecryptfs_dir_inode); |
503 | ecryptfs_nd); | ||
504 | goto out; | 422 | goto out; |
505 | out_d_drop: | 423 | out_d_drop: |
506 | d_drop(ecryptfs_dentry); | 424 | d_drop(ecryptfs_dentry); |
@@ -1092,6 +1010,8 @@ int ecryptfs_getattr(struct vfsmount *mnt, struct dentry *dentry, | |||
1092 | rc = vfs_getattr(ecryptfs_dentry_to_lower_mnt(dentry), | 1010 | rc = vfs_getattr(ecryptfs_dentry_to_lower_mnt(dentry), |
1093 | ecryptfs_dentry_to_lower(dentry), &lower_stat); | 1011 | ecryptfs_dentry_to_lower(dentry), &lower_stat); |
1094 | if (!rc) { | 1012 | if (!rc) { |
1013 | fsstack_copy_attr_all(dentry->d_inode, | ||
1014 | ecryptfs_inode_to_lower(dentry->d_inode)); | ||
1095 | generic_fillattr(dentry->d_inode, stat); | 1015 | generic_fillattr(dentry->d_inode, stat); |
1096 | stat->blocks = lower_stat.blocks; | 1016 | stat->blocks = lower_stat.blocks; |
1097 | } | 1017 | } |
diff --git a/fs/eventfd.c b/fs/eventfd.c index e0194b3e14d6..d9a591773919 100644 --- a/fs/eventfd.c +++ b/fs/eventfd.c | |||
@@ -99,7 +99,7 @@ EXPORT_SYMBOL_GPL(eventfd_ctx_get); | |||
99 | * @ctx: [in] Pointer to eventfd context. | 99 | * @ctx: [in] Pointer to eventfd context. |
100 | * | 100 | * |
101 | * The eventfd context reference must have been previously acquired either | 101 | * The eventfd context reference must have been previously acquired either |
102 | * with eventfd_ctx_get() or eventfd_ctx_fdget()). | 102 | * with eventfd_ctx_get() or eventfd_ctx_fdget(). |
103 | */ | 103 | */ |
104 | void eventfd_ctx_put(struct eventfd_ctx *ctx) | 104 | void eventfd_ctx_put(struct eventfd_ctx *ctx) |
105 | { | 105 | { |
@@ -146,9 +146,9 @@ static void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt) | |||
146 | * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue. | 146 | * eventfd_ctx_remove_wait_queue - Read the current counter and removes wait queue. |
147 | * @ctx: [in] Pointer to eventfd context. | 147 | * @ctx: [in] Pointer to eventfd context. |
148 | * @wait: [in] Wait queue to be removed. | 148 | * @wait: [in] Wait queue to be removed. |
149 | * @cnt: [out] Pointer to the 64bit conter value. | 149 | * @cnt: [out] Pointer to the 64-bit counter value. |
150 | * | 150 | * |
151 | * Returns zero if successful, or the following error codes: | 151 | * Returns %0 if successful, or the following error codes: |
152 | * | 152 | * |
153 | * -EAGAIN : The operation would have blocked. | 153 | * -EAGAIN : The operation would have blocked. |
154 | * | 154 | * |
@@ -175,11 +175,11 @@ EXPORT_SYMBOL_GPL(eventfd_ctx_remove_wait_queue); | |||
175 | * eventfd_ctx_read - Reads the eventfd counter or wait if it is zero. | 175 | * eventfd_ctx_read - Reads the eventfd counter or wait if it is zero. |
176 | * @ctx: [in] Pointer to eventfd context. | 176 | * @ctx: [in] Pointer to eventfd context. |
177 | * @no_wait: [in] Different from zero if the operation should not block. | 177 | * @no_wait: [in] Different from zero if the operation should not block. |
178 | * @cnt: [out] Pointer to the 64bit conter value. | 178 | * @cnt: [out] Pointer to the 64-bit counter value. |
179 | * | 179 | * |
180 | * Returns zero if successful, or the following error codes: | 180 | * Returns %0 if successful, or the following error codes: |
181 | * | 181 | * |
182 | * -EAGAIN : The operation would have blocked but @no_wait was nonzero. | 182 | * -EAGAIN : The operation would have blocked but @no_wait was non-zero. |
183 | * -ERESTARTSYS : A signal interrupted the wait operation. | 183 | * -ERESTARTSYS : A signal interrupted the wait operation. |
184 | * | 184 | * |
185 | * If @no_wait is zero, the function might sleep until the eventfd internal | 185 | * If @no_wait is zero, the function might sleep until the eventfd internal |
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c index 08a8beb152e6..7cd9a5a68d59 100644 --- a/fs/gfs2/glock.c +++ b/fs/gfs2/glock.c | |||
@@ -1779,11 +1779,11 @@ int __init gfs2_glock_init(void) | |||
1779 | #endif | 1779 | #endif |
1780 | 1780 | ||
1781 | glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | | 1781 | glock_workqueue = alloc_workqueue("glock_workqueue", WQ_MEM_RECLAIM | |
1782 | WQ_HIGHPRI | WQ_FREEZEABLE, 0); | 1782 | WQ_HIGHPRI | WQ_FREEZABLE, 0); |
1783 | if (IS_ERR(glock_workqueue)) | 1783 | if (IS_ERR(glock_workqueue)) |
1784 | return PTR_ERR(glock_workqueue); | 1784 | return PTR_ERR(glock_workqueue); |
1785 | gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", | 1785 | gfs2_delete_workqueue = alloc_workqueue("delete_workqueue", |
1786 | WQ_MEM_RECLAIM | WQ_FREEZEABLE, | 1786 | WQ_MEM_RECLAIM | WQ_FREEZABLE, |
1787 | 0); | 1787 | 0); |
1788 | if (IS_ERR(gfs2_delete_workqueue)) { | 1788 | if (IS_ERR(gfs2_delete_workqueue)) { |
1789 | destroy_workqueue(glock_workqueue); | 1789 | destroy_workqueue(glock_workqueue); |
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index ebef7ab6e17e..85ba027d1c4d 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c | |||
@@ -144,7 +144,7 @@ static int __init init_gfs2_fs(void) | |||
144 | 144 | ||
145 | error = -ENOMEM; | 145 | error = -ENOMEM; |
146 | gfs_recovery_wq = alloc_workqueue("gfs_recovery", | 146 | gfs_recovery_wq = alloc_workqueue("gfs_recovery", |
147 | WQ_MEM_RECLAIM | WQ_FREEZEABLE, 0); | 147 | WQ_MEM_RECLAIM | WQ_FREEZABLE, 0); |
148 | if (!gfs_recovery_wq) | 148 | if (!gfs_recovery_wq) |
149 | goto fail_wq; | 149 | goto fail_wq; |
150 | 150 | ||
diff --git a/fs/namei.c b/fs/namei.c index 9e701e28a329..0087cf9c2c6b 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -795,7 +795,7 @@ __do_follow_link(const struct path *link, struct nameidata *nd, void **p) | |||
795 | * Without that kind of total limit, nasty chains of consecutive | 795 | * Without that kind of total limit, nasty chains of consecutive |
796 | * symlinks can cause almost arbitrarily long lookups. | 796 | * symlinks can cause almost arbitrarily long lookups. |
797 | */ | 797 | */ |
798 | static inline int do_follow_link(struct path *path, struct nameidata *nd) | 798 | static inline int do_follow_link(struct inode *inode, struct path *path, struct nameidata *nd) |
799 | { | 799 | { |
800 | void *cookie; | 800 | void *cookie; |
801 | int err = -ELOOP; | 801 | int err = -ELOOP; |
@@ -803,6 +803,7 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd) | |||
803 | /* We drop rcu-walk here */ | 803 | /* We drop rcu-walk here */ |
804 | if (nameidata_dentry_drop_rcu_maybe(nd, path->dentry)) | 804 | if (nameidata_dentry_drop_rcu_maybe(nd, path->dentry)) |
805 | return -ECHILD; | 805 | return -ECHILD; |
806 | BUG_ON(inode != path->dentry->d_inode); | ||
806 | 807 | ||
807 | if (current->link_count >= MAX_NESTED_LINKS) | 808 | if (current->link_count >= MAX_NESTED_LINKS) |
808 | goto loop; | 809 | goto loop; |
@@ -1413,8 +1414,7 @@ exec_again: | |||
1413 | goto out_dput; | 1414 | goto out_dput; |
1414 | 1415 | ||
1415 | if (inode->i_op->follow_link) { | 1416 | if (inode->i_op->follow_link) { |
1416 | BUG_ON(inode != next.dentry->d_inode); | 1417 | err = do_follow_link(inode, &next, nd); |
1417 | err = do_follow_link(&next, nd); | ||
1418 | if (err) | 1418 | if (err) |
1419 | goto return_err; | 1419 | goto return_err; |
1420 | nd->inode = nd->path.dentry->d_inode; | 1420 | nd->inode = nd->path.dentry->d_inode; |
@@ -1458,8 +1458,7 @@ last_component: | |||
1458 | break; | 1458 | break; |
1459 | if (inode && unlikely(inode->i_op->follow_link) && | 1459 | if (inode && unlikely(inode->i_op->follow_link) && |
1460 | (lookup_flags & LOOKUP_FOLLOW)) { | 1460 | (lookup_flags & LOOKUP_FOLLOW)) { |
1461 | BUG_ON(inode != next.dentry->d_inode); | 1461 | err = do_follow_link(inode, &next, nd); |
1462 | err = do_follow_link(&next, nd); | ||
1463 | if (err) | 1462 | if (err) |
1464 | goto return_err; | 1463 | goto return_err; |
1465 | nd->inode = nd->path.dentry->d_inode; | 1464 | nd->inode = nd->path.dentry->d_inode; |
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 956629b9cdc9..1275b8655070 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c | |||
@@ -317,8 +317,8 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, | |||
317 | READ_BUF(dummy32); | 317 | READ_BUF(dummy32); |
318 | len += (XDR_QUADLEN(dummy32) << 2); | 318 | len += (XDR_QUADLEN(dummy32) << 2); |
319 | READMEM(buf, dummy32); | 319 | READMEM(buf, dummy32); |
320 | if ((host_err = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid))) | 320 | if ((status = nfsd_map_name_to_uid(argp->rqstp, buf, dummy32, &iattr->ia_uid))) |
321 | goto out_nfserr; | 321 | return status; |
322 | iattr->ia_valid |= ATTR_UID; | 322 | iattr->ia_valid |= ATTR_UID; |
323 | } | 323 | } |
324 | if (bmval[1] & FATTR4_WORD1_OWNER_GROUP) { | 324 | if (bmval[1] & FATTR4_WORD1_OWNER_GROUP) { |
@@ -328,8 +328,8 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, | |||
328 | READ_BUF(dummy32); | 328 | READ_BUF(dummy32); |
329 | len += (XDR_QUADLEN(dummy32) << 2); | 329 | len += (XDR_QUADLEN(dummy32) << 2); |
330 | READMEM(buf, dummy32); | 330 | READMEM(buf, dummy32); |
331 | if ((host_err = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid))) | 331 | if ((status = nfsd_map_name_to_gid(argp->rqstp, buf, dummy32, &iattr->ia_gid))) |
332 | goto out_nfserr; | 332 | return status; |
333 | iattr->ia_valid |= ATTR_GID; | 333 | iattr->ia_valid |= ATTR_GID; |
334 | } | 334 | } |
335 | if (bmval[1] & FATTR4_WORD1_TIME_ACCESS_SET) { | 335 | if (bmval[1] & FATTR4_WORD1_TIME_ACCESS_SET) { |
diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c index 68d6a216ee79..11f688bd76c5 100644 --- a/fs/partitions/mac.c +++ b/fs/partitions/mac.c | |||
@@ -29,10 +29,9 @@ static inline void mac_fix_string(char *stg, int len) | |||
29 | 29 | ||
30 | int mac_partition(struct parsed_partitions *state) | 30 | int mac_partition(struct parsed_partitions *state) |
31 | { | 31 | { |
32 | int slot = 1; | ||
33 | Sector sect; | 32 | Sector sect; |
34 | unsigned char *data; | 33 | unsigned char *data; |
35 | int blk, blocks_in_map; | 34 | int slot, blocks_in_map; |
36 | unsigned secsize; | 35 | unsigned secsize; |
37 | #ifdef CONFIG_PPC_PMAC | 36 | #ifdef CONFIG_PPC_PMAC |
38 | int found_root = 0; | 37 | int found_root = 0; |
@@ -59,10 +58,14 @@ int mac_partition(struct parsed_partitions *state) | |||
59 | put_dev_sector(sect); | 58 | put_dev_sector(sect); |
60 | return 0; /* not a MacOS disk */ | 59 | return 0; /* not a MacOS disk */ |
61 | } | 60 | } |
62 | strlcat(state->pp_buf, " [mac]", PAGE_SIZE); | ||
63 | blocks_in_map = be32_to_cpu(part->map_count); | 61 | blocks_in_map = be32_to_cpu(part->map_count); |
64 | for (blk = 1; blk <= blocks_in_map; ++blk) { | 62 | if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) { |
65 | int pos = blk * secsize; | 63 | put_dev_sector(sect); |
64 | return 0; | ||
65 | } | ||
66 | strlcat(state->pp_buf, " [mac]", PAGE_SIZE); | ||
67 | for (slot = 1; slot <= blocks_in_map; ++slot) { | ||
68 | int pos = slot * secsize; | ||
66 | put_dev_sector(sect); | 69 | put_dev_sector(sect); |
67 | data = read_part_sector(state, pos/512, §); | 70 | data = read_part_sector(state, pos/512, §); |
68 | if (!data) | 71 | if (!data) |
@@ -113,13 +116,11 @@ int mac_partition(struct parsed_partitions *state) | |||
113 | } | 116 | } |
114 | 117 | ||
115 | if (goodness > found_root_goodness) { | 118 | if (goodness > found_root_goodness) { |
116 | found_root = blk; | 119 | found_root = slot; |
117 | found_root_goodness = goodness; | 120 | found_root_goodness = goodness; |
118 | } | 121 | } |
119 | } | 122 | } |
120 | #endif /* CONFIG_PPC_PMAC */ | 123 | #endif /* CONFIG_PPC_PMAC */ |
121 | |||
122 | ++slot; | ||
123 | } | 124 | } |
124 | #ifdef CONFIG_PPC_PMAC | 125 | #ifdef CONFIG_PPC_PMAC |
125 | if (found_root_goodness) | 126 | if (found_root_goodness) |
diff --git a/include/linux/freezer.h b/include/linux/freezer.h index da7e52b099f3..1effc8b56b4e 100644 --- a/include/linux/freezer.h +++ b/include/linux/freezer.h | |||
@@ -109,7 +109,7 @@ static inline void freezer_count(void) | |||
109 | } | 109 | } |
110 | 110 | ||
111 | /* | 111 | /* |
112 | * Check if the task should be counted as freezeable by the freezer | 112 | * Check if the task should be counted as freezable by the freezer |
113 | */ | 113 | */ |
114 | static inline int freezer_should_skip(struct task_struct *p) | 114 | static inline int freezer_should_skip(struct task_struct *p) |
115 | { | 115 | { |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 55e0d4253e49..d746da19c6a2 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -55,7 +55,7 @@ | |||
55 | * Used by threaded interrupts which need to keep the | 55 | * Used by threaded interrupts which need to keep the |
56 | * irq line disabled until the threaded handler has been run. | 56 | * irq line disabled until the threaded handler has been run. |
57 | * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend | 57 | * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend |
58 | * | 58 | * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set |
59 | */ | 59 | */ |
60 | #define IRQF_DISABLED 0x00000020 | 60 | #define IRQF_DISABLED 0x00000020 |
61 | #define IRQF_SAMPLE_RANDOM 0x00000040 | 61 | #define IRQF_SAMPLE_RANDOM 0x00000040 |
@@ -67,6 +67,7 @@ | |||
67 | #define IRQF_IRQPOLL 0x00001000 | 67 | #define IRQF_IRQPOLL 0x00001000 |
68 | #define IRQF_ONESHOT 0x00002000 | 68 | #define IRQF_ONESHOT 0x00002000 |
69 | #define IRQF_NO_SUSPEND 0x00004000 | 69 | #define IRQF_NO_SUSPEND 0x00004000 |
70 | #define IRQF_FORCE_RESUME 0x00008000 | ||
70 | 71 | ||
71 | #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND) | 72 | #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND) |
72 | 73 | ||
diff --git a/include/linux/list.h b/include/linux/list.h index 9a5f8a71810c..3a54266a1e85 100644 --- a/include/linux/list.h +++ b/include/linux/list.h | |||
@@ -96,6 +96,11 @@ static inline void __list_del(struct list_head * prev, struct list_head * next) | |||
96 | * in an undefined state. | 96 | * in an undefined state. |
97 | */ | 97 | */ |
98 | #ifndef CONFIG_DEBUG_LIST | 98 | #ifndef CONFIG_DEBUG_LIST |
99 | static inline void __list_del_entry(struct list_head *entry) | ||
100 | { | ||
101 | __list_del(entry->prev, entry->next); | ||
102 | } | ||
103 | |||
99 | static inline void list_del(struct list_head *entry) | 104 | static inline void list_del(struct list_head *entry) |
100 | { | 105 | { |
101 | __list_del(entry->prev, entry->next); | 106 | __list_del(entry->prev, entry->next); |
@@ -103,6 +108,7 @@ static inline void list_del(struct list_head *entry) | |||
103 | entry->prev = LIST_POISON2; | 108 | entry->prev = LIST_POISON2; |
104 | } | 109 | } |
105 | #else | 110 | #else |
111 | extern void __list_del_entry(struct list_head *entry); | ||
106 | extern void list_del(struct list_head *entry); | 112 | extern void list_del(struct list_head *entry); |
107 | #endif | 113 | #endif |
108 | 114 | ||
@@ -135,7 +141,7 @@ static inline void list_replace_init(struct list_head *old, | |||
135 | */ | 141 | */ |
136 | static inline void list_del_init(struct list_head *entry) | 142 | static inline void list_del_init(struct list_head *entry) |
137 | { | 143 | { |
138 | __list_del(entry->prev, entry->next); | 144 | __list_del_entry(entry); |
139 | INIT_LIST_HEAD(entry); | 145 | INIT_LIST_HEAD(entry); |
140 | } | 146 | } |
141 | 147 | ||
@@ -146,7 +152,7 @@ static inline void list_del_init(struct list_head *entry) | |||
146 | */ | 152 | */ |
147 | static inline void list_move(struct list_head *list, struct list_head *head) | 153 | static inline void list_move(struct list_head *list, struct list_head *head) |
148 | { | 154 | { |
149 | __list_del(list->prev, list->next); | 155 | __list_del_entry(list); |
150 | list_add(list, head); | 156 | list_add(list, head); |
151 | } | 157 | } |
152 | 158 | ||
@@ -158,7 +164,7 @@ static inline void list_move(struct list_head *list, struct list_head *head) | |||
158 | static inline void list_move_tail(struct list_head *list, | 164 | static inline void list_move_tail(struct list_head *list, |
159 | struct list_head *head) | 165 | struct list_head *head) |
160 | { | 166 | { |
161 | __list_del(list->prev, list->next); | 167 | __list_del_entry(list); |
162 | list_add_tail(list, head); | 168 | list_add_tail(list, head); |
163 | } | 169 | } |
164 | 170 | ||
diff --git a/include/linux/module.h b/include/linux/module.h index 9bdf27c7615b..5de42043dff0 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
@@ -62,7 +62,7 @@ struct module_version_attribute { | |||
62 | struct module_attribute mattr; | 62 | struct module_attribute mattr; |
63 | const char *module_name; | 63 | const char *module_name; |
64 | const char *version; | 64 | const char *version; |
65 | }; | 65 | } __attribute__ ((__aligned__(sizeof(void *)))); |
66 | 66 | ||
67 | struct module_kobject | 67 | struct module_kobject |
68 | { | 68 | { |
diff --git a/include/linux/rtc.h b/include/linux/rtc.h index a0b639f8e805..89c3e5182991 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h | |||
@@ -203,6 +203,18 @@ struct rtc_device | |||
203 | struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */ | 203 | struct hrtimer pie_timer; /* sub second exp, so needs hrtimer */ |
204 | int pie_enabled; | 204 | int pie_enabled; |
205 | struct work_struct irqwork; | 205 | struct work_struct irqwork; |
206 | |||
207 | |||
208 | #ifdef CONFIG_RTC_INTF_DEV_UIE_EMUL | ||
209 | struct work_struct uie_task; | ||
210 | struct timer_list uie_timer; | ||
211 | /* Those fields are protected by rtc->irq_lock */ | ||
212 | unsigned int oldsecs; | ||
213 | unsigned int uie_irq_active:1; | ||
214 | unsigned int stop_uie_polling:1; | ||
215 | unsigned int uie_task_active:1; | ||
216 | unsigned int uie_timer_active:1; | ||
217 | #endif | ||
206 | }; | 218 | }; |
207 | #define to_rtc_device(d) container_of(d, struct rtc_device, dev) | 219 | #define to_rtc_device(d) container_of(d, struct rtc_device, dev) |
208 | 220 | ||
@@ -235,7 +247,10 @@ extern int rtc_irq_set_freq(struct rtc_device *rtc, | |||
235 | struct rtc_task *task, int freq); | 247 | struct rtc_task *task, int freq); |
236 | extern int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled); | 248 | extern int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled); |
237 | extern int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled); | 249 | extern int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled); |
250 | extern int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc, | ||
251 | unsigned int enabled); | ||
238 | 252 | ||
253 | void rtc_handle_legacy_irq(struct rtc_device *rtc, int num, int mode); | ||
239 | void rtc_aie_update_irq(void *private); | 254 | void rtc_aie_update_irq(void *private); |
240 | void rtc_uie_update_irq(void *private); | 255 | void rtc_uie_update_irq(void *private); |
241 | enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer); | 256 | enum hrtimer_restart rtc_pie_update_irq(struct hrtimer *timer); |
diff --git a/include/linux/sched.h b/include/linux/sched.h index d747f948b34e..777d8a5ed06b 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1744,7 +1744,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * | |||
1744 | #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ | 1744 | #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ |
1745 | #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ | 1745 | #define PF_MEMPOLICY 0x10000000 /* Non-default NUMA mempolicy */ |
1746 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ | 1746 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ |
1747 | #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezeable */ | 1747 | #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ |
1748 | #define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */ | 1748 | #define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */ |
1749 | 1749 | ||
1750 | /* | 1750 | /* |
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 1ac11586a2f5..f7998a3bf020 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -250,7 +250,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } | |||
250 | enum { | 250 | enum { |
251 | WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ | 251 | WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ |
252 | WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ | 252 | WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ |
253 | WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */ | 253 | WQ_FREEZABLE = 1 << 2, /* freeze during suspend */ |
254 | WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ | 254 | WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ |
255 | WQ_HIGHPRI = 1 << 4, /* high priority */ | 255 | WQ_HIGHPRI = 1 << 4, /* high priority */ |
256 | WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ | 256 | WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ |
@@ -318,7 +318,7 @@ __alloc_workqueue_key(const char *name, unsigned int flags, int max_active, | |||
318 | /** | 318 | /** |
319 | * alloc_ordered_workqueue - allocate an ordered workqueue | 319 | * alloc_ordered_workqueue - allocate an ordered workqueue |
320 | * @name: name of the workqueue | 320 | * @name: name of the workqueue |
321 | * @flags: WQ_* flags (only WQ_FREEZEABLE and WQ_MEM_RECLAIM are meaningful) | 321 | * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) |
322 | * | 322 | * |
323 | * Allocate an ordered workqueue. An ordered workqueue executes at | 323 | * Allocate an ordered workqueue. An ordered workqueue executes at |
324 | * most one work item at any given time in the queued order. They are | 324 | * most one work item at any given time in the queued order. They are |
@@ -335,8 +335,8 @@ alloc_ordered_workqueue(const char *name, unsigned int flags) | |||
335 | 335 | ||
336 | #define create_workqueue(name) \ | 336 | #define create_workqueue(name) \ |
337 | alloc_workqueue((name), WQ_MEM_RECLAIM, 1) | 337 | alloc_workqueue((name), WQ_MEM_RECLAIM, 1) |
338 | #define create_freezeable_workqueue(name) \ | 338 | #define create_freezable_workqueue(name) \ |
339 | alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1) | 339 | alloc_workqueue((name), WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1) |
340 | #define create_singlethread_workqueue(name) \ | 340 | #define create_singlethread_workqueue(name) \ |
341 | alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1) | 341 | alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1) |
342 | 342 | ||
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 07fdfb6b9a9a..0828b6c8610a 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h | |||
@@ -8,7 +8,6 @@ | |||
8 | #include <scsi/scsi_cmnd.h> | 8 | #include <scsi/scsi_cmnd.h> |
9 | #include <net/sock.h> | 9 | #include <net/sock.h> |
10 | #include <net/tcp.h> | 10 | #include <net/tcp.h> |
11 | #include "target_core_mib.h" | ||
12 | 11 | ||
13 | #define TARGET_CORE_MOD_VERSION "v4.0.0-rc6" | 12 | #define TARGET_CORE_MOD_VERSION "v4.0.0-rc6" |
14 | #define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT)) | 13 | #define SHUTDOWN_SIGS (sigmask(SIGKILL)|sigmask(SIGINT)|sigmask(SIGABRT)) |
@@ -195,6 +194,21 @@ typedef enum { | |||
195 | SAM_TASK_ATTR_EMULATED | 194 | SAM_TASK_ATTR_EMULATED |
196 | } t10_task_attr_index_t; | 195 | } t10_task_attr_index_t; |
197 | 196 | ||
197 | /* | ||
198 | * Used for target SCSI statistics | ||
199 | */ | ||
200 | typedef enum { | ||
201 | SCSI_INST_INDEX, | ||
202 | SCSI_DEVICE_INDEX, | ||
203 | SCSI_AUTH_INTR_INDEX, | ||
204 | SCSI_INDEX_TYPE_MAX | ||
205 | } scsi_index_t; | ||
206 | |||
207 | struct scsi_index_table { | ||
208 | spinlock_t lock; | ||
209 | u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; | ||
210 | } ____cacheline_aligned; | ||
211 | |||
198 | struct se_cmd; | 212 | struct se_cmd; |
199 | 213 | ||
200 | struct t10_alua { | 214 | struct t10_alua { |
@@ -578,8 +592,6 @@ struct se_node_acl { | |||
578 | spinlock_t stats_lock; | 592 | spinlock_t stats_lock; |
579 | /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ | 593 | /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */ |
580 | atomic_t acl_pr_ref_count; | 594 | atomic_t acl_pr_ref_count; |
581 | /* Used for MIB access */ | ||
582 | atomic_t mib_ref_count; | ||
583 | struct se_dev_entry *device_list; | 595 | struct se_dev_entry *device_list; |
584 | struct se_session *nacl_sess; | 596 | struct se_session *nacl_sess; |
585 | struct se_portal_group *se_tpg; | 597 | struct se_portal_group *se_tpg; |
@@ -595,8 +607,6 @@ struct se_node_acl { | |||
595 | } ____cacheline_aligned; | 607 | } ____cacheline_aligned; |
596 | 608 | ||
597 | struct se_session { | 609 | struct se_session { |
598 | /* Used for MIB access */ | ||
599 | atomic_t mib_ref_count; | ||
600 | u64 sess_bin_isid; | 610 | u64 sess_bin_isid; |
601 | struct se_node_acl *se_node_acl; | 611 | struct se_node_acl *se_node_acl; |
602 | struct se_portal_group *se_tpg; | 612 | struct se_portal_group *se_tpg; |
@@ -806,7 +816,6 @@ struct se_hba { | |||
806 | /* Virtual iSCSI devices attached. */ | 816 | /* Virtual iSCSI devices attached. */ |
807 | u32 dev_count; | 817 | u32 dev_count; |
808 | u32 hba_index; | 818 | u32 hba_index; |
809 | atomic_t dev_mib_access_count; | ||
810 | atomic_t load_balance_queue; | 819 | atomic_t load_balance_queue; |
811 | atomic_t left_queue_depth; | 820 | atomic_t left_queue_depth; |
812 | /* Maximum queue depth the HBA can handle. */ | 821 | /* Maximum queue depth the HBA can handle. */ |
@@ -845,6 +854,12 @@ struct se_lun { | |||
845 | 854 | ||
846 | #define SE_LUN(c) ((struct se_lun *)(c)->se_lun) | 855 | #define SE_LUN(c) ((struct se_lun *)(c)->se_lun) |
847 | 856 | ||
857 | struct scsi_port_stats { | ||
858 | u64 cmd_pdus; | ||
859 | u64 tx_data_octets; | ||
860 | u64 rx_data_octets; | ||
861 | } ____cacheline_aligned; | ||
862 | |||
848 | struct se_port { | 863 | struct se_port { |
849 | /* RELATIVE TARGET PORT IDENTIFER */ | 864 | /* RELATIVE TARGET PORT IDENTIFER */ |
850 | u16 sep_rtpi; | 865 | u16 sep_rtpi; |
@@ -867,6 +882,7 @@ struct se_port { | |||
867 | } ____cacheline_aligned; | 882 | } ____cacheline_aligned; |
868 | 883 | ||
869 | struct se_tpg_np { | 884 | struct se_tpg_np { |
885 | struct se_portal_group *tpg_np_parent; | ||
870 | struct config_group tpg_np_group; | 886 | struct config_group tpg_np_group; |
871 | } ____cacheline_aligned; | 887 | } ____cacheline_aligned; |
872 | 888 | ||
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h index 66f44e56eb80..246940511579 100644 --- a/include/target/target_core_transport.h +++ b/include/target/target_core_transport.h | |||
@@ -111,6 +111,8 @@ struct se_subsystem_api; | |||
111 | 111 | ||
112 | extern int init_se_global(void); | 112 | extern int init_se_global(void); |
113 | extern void release_se_global(void); | 113 | extern void release_se_global(void); |
114 | extern void init_scsi_index_table(void); | ||
115 | extern u32 scsi_get_new_index(scsi_index_t); | ||
114 | extern void transport_init_queue_obj(struct se_queue_obj *); | 116 | extern void transport_init_queue_obj(struct se_queue_obj *); |
115 | extern int transport_subsystem_check_init(void); | 117 | extern int transport_subsystem_check_init(void); |
116 | extern int transport_subsystem_register(struct se_subsystem_api *); | 118 | extern int transport_subsystem_register(struct se_subsystem_api *); |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 0caa59f747dd..b4198ee8cfdf 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -282,8 +282,17 @@ EXPORT_SYMBOL(disable_irq); | |||
282 | 282 | ||
283 | void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) | 283 | void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) |
284 | { | 284 | { |
285 | if (resume) | 285 | if (resume) { |
286 | if (!(desc->status & IRQ_SUSPENDED)) { | ||
287 | if (!desc->action) | ||
288 | return; | ||
289 | if (!(desc->action->flags & IRQF_FORCE_RESUME)) | ||
290 | return; | ||
291 | /* Pretend that it got disabled ! */ | ||
292 | desc->depth++; | ||
293 | } | ||
286 | desc->status &= ~IRQ_SUSPENDED; | 294 | desc->status &= ~IRQ_SUSPENDED; |
295 | } | ||
287 | 296 | ||
288 | switch (desc->depth) { | 297 | switch (desc->depth) { |
289 | case 0: | 298 | case 0: |
diff --git a/kernel/irq/pm.c b/kernel/irq/pm.c index 0d4005d85b03..d6bfb89cce91 100644 --- a/kernel/irq/pm.c +++ b/kernel/irq/pm.c | |||
@@ -53,9 +53,6 @@ void resume_device_irqs(void) | |||
53 | for_each_irq_desc(irq, desc) { | 53 | for_each_irq_desc(irq, desc) { |
54 | unsigned long flags; | 54 | unsigned long flags; |
55 | 55 | ||
56 | if (!(desc->status & IRQ_SUSPENDED)) | ||
57 | continue; | ||
58 | |||
59 | raw_spin_lock_irqsave(&desc->lock, flags); | 56 | raw_spin_lock_irqsave(&desc->lock, flags); |
60 | __enable_irq(desc, irq, true); | 57 | __enable_irq(desc, irq, true); |
61 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 58 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
diff --git a/kernel/power/main.c b/kernel/power/main.c index 7b5db6a8561e..701853042c28 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c | |||
@@ -326,7 +326,7 @@ EXPORT_SYMBOL_GPL(pm_wq); | |||
326 | 326 | ||
327 | static int __init pm_start_workqueue(void) | 327 | static int __init pm_start_workqueue(void) |
328 | { | 328 | { |
329 | pm_wq = alloc_workqueue("pm", WQ_FREEZEABLE, 0); | 329 | pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0); |
330 | 330 | ||
331 | return pm_wq ? 0 : -ENOMEM; | 331 | return pm_wq ? 0 : -ENOMEM; |
332 | } | 332 | } |
diff --git a/kernel/power/process.c b/kernel/power/process.c index d6d2a10320e0..0cf3a27a6c9d 100644 --- a/kernel/power/process.c +++ b/kernel/power/process.c | |||
@@ -22,7 +22,7 @@ | |||
22 | */ | 22 | */ |
23 | #define TIMEOUT (20 * HZ) | 23 | #define TIMEOUT (20 * HZ) |
24 | 24 | ||
25 | static inline int freezeable(struct task_struct * p) | 25 | static inline int freezable(struct task_struct * p) |
26 | { | 26 | { |
27 | if ((p == current) || | 27 | if ((p == current) || |
28 | (p->flags & PF_NOFREEZE) || | 28 | (p->flags & PF_NOFREEZE) || |
@@ -53,7 +53,7 @@ static int try_to_freeze_tasks(bool sig_only) | |||
53 | todo = 0; | 53 | todo = 0; |
54 | read_lock(&tasklist_lock); | 54 | read_lock(&tasklist_lock); |
55 | do_each_thread(g, p) { | 55 | do_each_thread(g, p) { |
56 | if (frozen(p) || !freezeable(p)) | 56 | if (frozen(p) || !freezable(p)) |
57 | continue; | 57 | continue; |
58 | 58 | ||
59 | if (!freeze_task(p, sig_only)) | 59 | if (!freeze_task(p, sig_only)) |
@@ -167,7 +167,7 @@ static void thaw_tasks(bool nosig_only) | |||
167 | 167 | ||
168 | read_lock(&tasklist_lock); | 168 | read_lock(&tasklist_lock); |
169 | do_each_thread(g, p) { | 169 | do_each_thread(g, p) { |
170 | if (!freezeable(p)) | 170 | if (!freezable(p)) |
171 | continue; | 171 | continue; |
172 | 172 | ||
173 | if (nosig_only && should_send_signal(p)) | 173 | if (nosig_only && should_send_signal(p)) |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 0dac75ea4456..64db648ff911 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
@@ -1519,11 +1519,8 @@ static int | |||
1519 | swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm, | 1519 | swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm, |
1520 | unsigned int nr_pages, unsigned int nr_highmem) | 1520 | unsigned int nr_pages, unsigned int nr_highmem) |
1521 | { | 1521 | { |
1522 | int error = 0; | ||
1523 | |||
1524 | if (nr_highmem > 0) { | 1522 | if (nr_highmem > 0) { |
1525 | error = get_highmem_buffer(PG_ANY); | 1523 | if (get_highmem_buffer(PG_ANY)) |
1526 | if (error) | ||
1527 | goto err_out; | 1524 | goto err_out; |
1528 | if (nr_highmem > alloc_highmem) { | 1525 | if (nr_highmem > alloc_highmem) { |
1529 | nr_highmem -= alloc_highmem; | 1526 | nr_highmem -= alloc_highmem; |
@@ -1546,7 +1543,7 @@ swsusp_alloc(struct memory_bitmap *orig_bm, struct memory_bitmap *copy_bm, | |||
1546 | 1543 | ||
1547 | err_out: | 1544 | err_out: |
1548 | swsusp_free(); | 1545 | swsusp_free(); |
1549 | return error; | 1546 | return -ENOMEM; |
1550 | } | 1547 | } |
1551 | 1548 | ||
1552 | asmlinkage int swsusp_save(void) | 1549 | asmlinkage int swsusp_save(void) |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 11869faa6819..ee6578b578ad 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -79,7 +79,9 @@ enum { | |||
79 | MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ | 79 | MAX_IDLE_WORKERS_RATIO = 4, /* 1/4 of busy can be idle */ |
80 | IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ | 80 | IDLE_WORKER_TIMEOUT = 300 * HZ, /* keep idle ones for 5 mins */ |
81 | 81 | ||
82 | MAYDAY_INITIAL_TIMEOUT = HZ / 100, /* call for help after 10ms */ | 82 | MAYDAY_INITIAL_TIMEOUT = HZ / 100 >= 2 ? HZ / 100 : 2, |
83 | /* call for help after 10ms | ||
84 | (min two ticks) */ | ||
83 | MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ | 85 | MAYDAY_INTERVAL = HZ / 10, /* and then every 100ms */ |
84 | CREATE_COOLDOWN = HZ, /* time to breath after fail */ | 86 | CREATE_COOLDOWN = HZ, /* time to breath after fail */ |
85 | TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */ | 87 | TRUSTEE_COOLDOWN = HZ / 10, /* for trustee draining */ |
@@ -2047,6 +2049,15 @@ repeat: | |||
2047 | move_linked_works(work, scheduled, &n); | 2049 | move_linked_works(work, scheduled, &n); |
2048 | 2050 | ||
2049 | process_scheduled_works(rescuer); | 2051 | process_scheduled_works(rescuer); |
2052 | |||
2053 | /* | ||
2054 | * Leave this gcwq. If keep_working() is %true, notify a | ||
2055 | * regular worker; otherwise, we end up with 0 concurrency | ||
2056 | * and stalling the execution. | ||
2057 | */ | ||
2058 | if (keep_working(gcwq)) | ||
2059 | wake_up_worker(gcwq); | ||
2060 | |||
2050 | spin_unlock_irq(&gcwq->lock); | 2061 | spin_unlock_irq(&gcwq->lock); |
2051 | } | 2062 | } |
2052 | 2063 | ||
@@ -2956,7 +2967,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name, | |||
2956 | */ | 2967 | */ |
2957 | spin_lock(&workqueue_lock); | 2968 | spin_lock(&workqueue_lock); |
2958 | 2969 | ||
2959 | if (workqueue_freezing && wq->flags & WQ_FREEZEABLE) | 2970 | if (workqueue_freezing && wq->flags & WQ_FREEZABLE) |
2960 | for_each_cwq_cpu(cpu, wq) | 2971 | for_each_cwq_cpu(cpu, wq) |
2961 | get_cwq(cpu, wq)->max_active = 0; | 2972 | get_cwq(cpu, wq)->max_active = 0; |
2962 | 2973 | ||
@@ -3068,7 +3079,7 @@ void workqueue_set_max_active(struct workqueue_struct *wq, int max_active) | |||
3068 | 3079 | ||
3069 | spin_lock_irq(&gcwq->lock); | 3080 | spin_lock_irq(&gcwq->lock); |
3070 | 3081 | ||
3071 | if (!(wq->flags & WQ_FREEZEABLE) || | 3082 | if (!(wq->flags & WQ_FREEZABLE) || |
3072 | !(gcwq->flags & GCWQ_FREEZING)) | 3083 | !(gcwq->flags & GCWQ_FREEZING)) |
3073 | get_cwq(gcwq->cpu, wq)->max_active = max_active; | 3084 | get_cwq(gcwq->cpu, wq)->max_active = max_active; |
3074 | 3085 | ||
@@ -3318,7 +3329,7 @@ static int __cpuinit trustee_thread(void *__gcwq) | |||
3318 | * want to get it over with ASAP - spam rescuers, wake up as | 3329 | * want to get it over with ASAP - spam rescuers, wake up as |
3319 | * many idlers as necessary and create new ones till the | 3330 | * many idlers as necessary and create new ones till the |
3320 | * worklist is empty. Note that if the gcwq is frozen, there | 3331 | * worklist is empty. Note that if the gcwq is frozen, there |
3321 | * may be frozen works in freezeable cwqs. Don't declare | 3332 | * may be frozen works in freezable cwqs. Don't declare |
3322 | * completion while frozen. | 3333 | * completion while frozen. |
3323 | */ | 3334 | */ |
3324 | while (gcwq->nr_workers != gcwq->nr_idle || | 3335 | while (gcwq->nr_workers != gcwq->nr_idle || |
@@ -3576,9 +3587,9 @@ EXPORT_SYMBOL_GPL(work_on_cpu); | |||
3576 | /** | 3587 | /** |
3577 | * freeze_workqueues_begin - begin freezing workqueues | 3588 | * freeze_workqueues_begin - begin freezing workqueues |
3578 | * | 3589 | * |
3579 | * Start freezing workqueues. After this function returns, all | 3590 | * Start freezing workqueues. After this function returns, all freezable |
3580 | * freezeable workqueues will queue new works to their frozen_works | 3591 | * workqueues will queue new works to their frozen_works list instead of |
3581 | * list instead of gcwq->worklist. | 3592 | * gcwq->worklist. |
3582 | * | 3593 | * |
3583 | * CONTEXT: | 3594 | * CONTEXT: |
3584 | * Grabs and releases workqueue_lock and gcwq->lock's. | 3595 | * Grabs and releases workqueue_lock and gcwq->lock's. |
@@ -3604,7 +3615,7 @@ void freeze_workqueues_begin(void) | |||
3604 | list_for_each_entry(wq, &workqueues, list) { | 3615 | list_for_each_entry(wq, &workqueues, list) { |
3605 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 3616 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |
3606 | 3617 | ||
3607 | if (cwq && wq->flags & WQ_FREEZEABLE) | 3618 | if (cwq && wq->flags & WQ_FREEZABLE) |
3608 | cwq->max_active = 0; | 3619 | cwq->max_active = 0; |
3609 | } | 3620 | } |
3610 | 3621 | ||
@@ -3615,7 +3626,7 @@ void freeze_workqueues_begin(void) | |||
3615 | } | 3626 | } |
3616 | 3627 | ||
3617 | /** | 3628 | /** |
3618 | * freeze_workqueues_busy - are freezeable workqueues still busy? | 3629 | * freeze_workqueues_busy - are freezable workqueues still busy? |
3619 | * | 3630 | * |
3620 | * Check whether freezing is complete. This function must be called | 3631 | * Check whether freezing is complete. This function must be called |
3621 | * between freeze_workqueues_begin() and thaw_workqueues(). | 3632 | * between freeze_workqueues_begin() and thaw_workqueues(). |
@@ -3624,8 +3635,8 @@ void freeze_workqueues_begin(void) | |||
3624 | * Grabs and releases workqueue_lock. | 3635 | * Grabs and releases workqueue_lock. |
3625 | * | 3636 | * |
3626 | * RETURNS: | 3637 | * RETURNS: |
3627 | * %true if some freezeable workqueues are still busy. %false if | 3638 | * %true if some freezable workqueues are still busy. %false if freezing |
3628 | * freezing is complete. | 3639 | * is complete. |
3629 | */ | 3640 | */ |
3630 | bool freeze_workqueues_busy(void) | 3641 | bool freeze_workqueues_busy(void) |
3631 | { | 3642 | { |
@@ -3645,7 +3656,7 @@ bool freeze_workqueues_busy(void) | |||
3645 | list_for_each_entry(wq, &workqueues, list) { | 3656 | list_for_each_entry(wq, &workqueues, list) { |
3646 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 3657 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |
3647 | 3658 | ||
3648 | if (!cwq || !(wq->flags & WQ_FREEZEABLE)) | 3659 | if (!cwq || !(wq->flags & WQ_FREEZABLE)) |
3649 | continue; | 3660 | continue; |
3650 | 3661 | ||
3651 | BUG_ON(cwq->nr_active < 0); | 3662 | BUG_ON(cwq->nr_active < 0); |
@@ -3690,7 +3701,7 @@ void thaw_workqueues(void) | |||
3690 | list_for_each_entry(wq, &workqueues, list) { | 3701 | list_for_each_entry(wq, &workqueues, list) { |
3691 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 3702 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |
3692 | 3703 | ||
3693 | if (!cwq || !(wq->flags & WQ_FREEZEABLE)) | 3704 | if (!cwq || !(wq->flags & WQ_FREEZABLE)) |
3694 | continue; | 3705 | continue; |
3695 | 3706 | ||
3696 | /* restore max_active and repopulate worklist */ | 3707 | /* restore max_active and repopulate worklist */ |
diff --git a/lib/list_debug.c b/lib/list_debug.c index 344c710d16ca..b8029a5583ff 100644 --- a/lib/list_debug.c +++ b/lib/list_debug.c | |||
@@ -35,6 +35,31 @@ void __list_add(struct list_head *new, | |||
35 | } | 35 | } |
36 | EXPORT_SYMBOL(__list_add); | 36 | EXPORT_SYMBOL(__list_add); |
37 | 37 | ||
38 | void __list_del_entry(struct list_head *entry) | ||
39 | { | ||
40 | struct list_head *prev, *next; | ||
41 | |||
42 | prev = entry->prev; | ||
43 | next = entry->next; | ||
44 | |||
45 | if (WARN(next == LIST_POISON1, | ||
46 | "list_del corruption, %p->next is LIST_POISON1 (%p)\n", | ||
47 | entry, LIST_POISON1) || | ||
48 | WARN(prev == LIST_POISON2, | ||
49 | "list_del corruption, %p->prev is LIST_POISON2 (%p)\n", | ||
50 | entry, LIST_POISON2) || | ||
51 | WARN(prev->next != entry, | ||
52 | "list_del corruption. prev->next should be %p, " | ||
53 | "but was %p\n", entry, prev->next) || | ||
54 | WARN(next->prev != entry, | ||
55 | "list_del corruption. next->prev should be %p, " | ||
56 | "but was %p\n", entry, next->prev)) | ||
57 | return; | ||
58 | |||
59 | __list_del(prev, next); | ||
60 | } | ||
61 | EXPORT_SYMBOL(__list_del_entry); | ||
62 | |||
38 | /** | 63 | /** |
39 | * list_del - deletes entry from list. | 64 | * list_del - deletes entry from list. |
40 | * @entry: the element to delete from the list. | 65 | * @entry: the element to delete from the list. |
@@ -43,19 +68,7 @@ EXPORT_SYMBOL(__list_add); | |||
43 | */ | 68 | */ |
44 | void list_del(struct list_head *entry) | 69 | void list_del(struct list_head *entry) |
45 | { | 70 | { |
46 | WARN(entry->next == LIST_POISON1, | 71 | __list_del_entry(entry); |
47 | "list_del corruption, next is LIST_POISON1 (%p)\n", | ||
48 | LIST_POISON1); | ||
49 | WARN(entry->next != LIST_POISON1 && entry->prev == LIST_POISON2, | ||
50 | "list_del corruption, prev is LIST_POISON2 (%p)\n", | ||
51 | LIST_POISON2); | ||
52 | WARN(entry->prev->next != entry, | ||
53 | "list_del corruption. prev->next should be %p, " | ||
54 | "but was %p\n", entry, entry->prev->next); | ||
55 | WARN(entry->next->prev != entry, | ||
56 | "list_del corruption. next->prev should be %p, " | ||
57 | "but was %p\n", entry, entry->next->prev); | ||
58 | __list_del(entry->prev, entry->next); | ||
59 | entry->next = LIST_POISON1; | 72 | entry->next = LIST_POISON1; |
60 | entry->prev = LIST_POISON2; | 73 | entry->prev = LIST_POISON2; |
61 | } | 74 | } |
diff --git a/net/bluetooth/l2cap.c b/net/bluetooth/l2cap.c index 7550abb0c96a..675614e38e14 100644 --- a/net/bluetooth/l2cap.c +++ b/net/bluetooth/l2cap.c | |||
@@ -859,6 +859,7 @@ static void __l2cap_sock_close(struct sock *sk, int reason) | |||
859 | result = L2CAP_CR_SEC_BLOCK; | 859 | result = L2CAP_CR_SEC_BLOCK; |
860 | else | 860 | else |
861 | result = L2CAP_CR_BAD_PSM; | 861 | result = L2CAP_CR_BAD_PSM; |
862 | sk->sk_state = BT_DISCONN; | ||
862 | 863 | ||
863 | rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); | 864 | rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid); |
864 | rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); | 865 | rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid); |
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 6f6d8e1b776f..88e4aa9cb1f9 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c | |||
@@ -80,7 +80,7 @@ int br_handle_frame_finish(struct sk_buff *skb) | |||
80 | if (is_multicast_ether_addr(dest)) { | 80 | if (is_multicast_ether_addr(dest)) { |
81 | mdst = br_mdb_get(br, skb); | 81 | mdst = br_mdb_get(br, skb); |
82 | if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { | 82 | if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) { |
83 | if ((mdst && !hlist_unhashed(&mdst->mglist)) || | 83 | if ((mdst && mdst->mglist) || |
84 | br_multicast_is_router(br)) | 84 | br_multicast_is_router(br)) |
85 | skb2 = skb; | 85 | skb2 = skb; |
86 | br_multicast_forward(mdst, skb, skb2); | 86 | br_multicast_forward(mdst, skb, skb2); |
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index f701a21acb34..09d5c0987925 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c | |||
@@ -232,8 +232,7 @@ static void br_multicast_group_expired(unsigned long data) | |||
232 | if (!netif_running(br->dev) || timer_pending(&mp->timer)) | 232 | if (!netif_running(br->dev) || timer_pending(&mp->timer)) |
233 | goto out; | 233 | goto out; |
234 | 234 | ||
235 | if (!hlist_unhashed(&mp->mglist)) | 235 | mp->mglist = false; |
236 | hlist_del_init(&mp->mglist); | ||
237 | 236 | ||
238 | if (mp->ports) | 237 | if (mp->ports) |
239 | goto out; | 238 | goto out; |
@@ -276,7 +275,7 @@ static void br_multicast_del_pg(struct net_bridge *br, | |||
276 | del_timer(&p->query_timer); | 275 | del_timer(&p->query_timer); |
277 | call_rcu_bh(&p->rcu, br_multicast_free_pg); | 276 | call_rcu_bh(&p->rcu, br_multicast_free_pg); |
278 | 277 | ||
279 | if (!mp->ports && hlist_unhashed(&mp->mglist) && | 278 | if (!mp->ports && !mp->mglist && |
280 | netif_running(br->dev)) | 279 | netif_running(br->dev)) |
281 | mod_timer(&mp->timer, jiffies); | 280 | mod_timer(&mp->timer, jiffies); |
282 | 281 | ||
@@ -528,7 +527,7 @@ static void br_multicast_group_query_expired(unsigned long data) | |||
528 | struct net_bridge *br = mp->br; | 527 | struct net_bridge *br = mp->br; |
529 | 528 | ||
530 | spin_lock(&br->multicast_lock); | 529 | spin_lock(&br->multicast_lock); |
531 | if (!netif_running(br->dev) || hlist_unhashed(&mp->mglist) || | 530 | if (!netif_running(br->dev) || !mp->mglist || |
532 | mp->queries_sent >= br->multicast_last_member_count) | 531 | mp->queries_sent >= br->multicast_last_member_count) |
533 | goto out; | 532 | goto out; |
534 | 533 | ||
@@ -719,7 +718,7 @@ static int br_multicast_add_group(struct net_bridge *br, | |||
719 | goto err; | 718 | goto err; |
720 | 719 | ||
721 | if (!port) { | 720 | if (!port) { |
722 | hlist_add_head(&mp->mglist, &br->mglist); | 721 | mp->mglist = true; |
723 | mod_timer(&mp->timer, now + br->multicast_membership_interval); | 722 | mod_timer(&mp->timer, now + br->multicast_membership_interval); |
724 | goto out; | 723 | goto out; |
725 | } | 724 | } |
@@ -1165,7 +1164,7 @@ static int br_ip4_multicast_query(struct net_bridge *br, | |||
1165 | 1164 | ||
1166 | max_delay *= br->multicast_last_member_count; | 1165 | max_delay *= br->multicast_last_member_count; |
1167 | 1166 | ||
1168 | if (!hlist_unhashed(&mp->mglist) && | 1167 | if (mp->mglist && |
1169 | (timer_pending(&mp->timer) ? | 1168 | (timer_pending(&mp->timer) ? |
1170 | time_after(mp->timer.expires, now + max_delay) : | 1169 | time_after(mp->timer.expires, now + max_delay) : |
1171 | try_to_del_timer_sync(&mp->timer) >= 0)) | 1170 | try_to_del_timer_sync(&mp->timer) >= 0)) |
@@ -1177,7 +1176,7 @@ static int br_ip4_multicast_query(struct net_bridge *br, | |||
1177 | if (timer_pending(&p->timer) ? | 1176 | if (timer_pending(&p->timer) ? |
1178 | time_after(p->timer.expires, now + max_delay) : | 1177 | time_after(p->timer.expires, now + max_delay) : |
1179 | try_to_del_timer_sync(&p->timer) >= 0) | 1178 | try_to_del_timer_sync(&p->timer) >= 0) |
1180 | mod_timer(&mp->timer, now + max_delay); | 1179 | mod_timer(&p->timer, now + max_delay); |
1181 | } | 1180 | } |
1182 | 1181 | ||
1183 | out: | 1182 | out: |
@@ -1236,7 +1235,7 @@ static int br_ip6_multicast_query(struct net_bridge *br, | |||
1236 | goto out; | 1235 | goto out; |
1237 | 1236 | ||
1238 | max_delay *= br->multicast_last_member_count; | 1237 | max_delay *= br->multicast_last_member_count; |
1239 | if (!hlist_unhashed(&mp->mglist) && | 1238 | if (mp->mglist && |
1240 | (timer_pending(&mp->timer) ? | 1239 | (timer_pending(&mp->timer) ? |
1241 | time_after(mp->timer.expires, now + max_delay) : | 1240 | time_after(mp->timer.expires, now + max_delay) : |
1242 | try_to_del_timer_sync(&mp->timer) >= 0)) | 1241 | try_to_del_timer_sync(&mp->timer) >= 0)) |
@@ -1248,7 +1247,7 @@ static int br_ip6_multicast_query(struct net_bridge *br, | |||
1248 | if (timer_pending(&p->timer) ? | 1247 | if (timer_pending(&p->timer) ? |
1249 | time_after(p->timer.expires, now + max_delay) : | 1248 | time_after(p->timer.expires, now + max_delay) : |
1250 | try_to_del_timer_sync(&p->timer) >= 0) | 1249 | try_to_del_timer_sync(&p->timer) >= 0) |
1251 | mod_timer(&mp->timer, now + max_delay); | 1250 | mod_timer(&p->timer, now + max_delay); |
1252 | } | 1251 | } |
1253 | 1252 | ||
1254 | out: | 1253 | out: |
@@ -1283,7 +1282,7 @@ static void br_multicast_leave_group(struct net_bridge *br, | |||
1283 | br->multicast_last_member_interval; | 1282 | br->multicast_last_member_interval; |
1284 | 1283 | ||
1285 | if (!port) { | 1284 | if (!port) { |
1286 | if (!hlist_unhashed(&mp->mglist) && | 1285 | if (mp->mglist && |
1287 | (timer_pending(&mp->timer) ? | 1286 | (timer_pending(&mp->timer) ? |
1288 | time_after(mp->timer.expires, time) : | 1287 | time_after(mp->timer.expires, time) : |
1289 | try_to_del_timer_sync(&mp->timer) >= 0)) { | 1288 | try_to_del_timer_sync(&mp->timer) >= 0)) { |
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 84aac7734bfc..4e1b620b6be6 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h | |||
@@ -84,13 +84,13 @@ struct net_bridge_port_group { | |||
84 | struct net_bridge_mdb_entry | 84 | struct net_bridge_mdb_entry |
85 | { | 85 | { |
86 | struct hlist_node hlist[2]; | 86 | struct hlist_node hlist[2]; |
87 | struct hlist_node mglist; | ||
88 | struct net_bridge *br; | 87 | struct net_bridge *br; |
89 | struct net_bridge_port_group __rcu *ports; | 88 | struct net_bridge_port_group __rcu *ports; |
90 | struct rcu_head rcu; | 89 | struct rcu_head rcu; |
91 | struct timer_list timer; | 90 | struct timer_list timer; |
92 | struct timer_list query_timer; | 91 | struct timer_list query_timer; |
93 | struct br_ip addr; | 92 | struct br_ip addr; |
93 | bool mglist; | ||
94 | u32 queries_sent; | 94 | u32 queries_sent; |
95 | }; | 95 | }; |
96 | 96 | ||
@@ -238,7 +238,6 @@ struct net_bridge | |||
238 | spinlock_t multicast_lock; | 238 | spinlock_t multicast_lock; |
239 | struct net_bridge_mdb_htable __rcu *mdb; | 239 | struct net_bridge_mdb_htable __rcu *mdb; |
240 | struct hlist_head router_list; | 240 | struct hlist_head router_list; |
241 | struct hlist_head mglist; | ||
242 | 241 | ||
243 | struct timer_list multicast_router_timer; | 242 | struct timer_list multicast_router_timer; |
244 | struct timer_list multicast_querier_timer; | 243 | struct timer_list multicast_querier_timer; |
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index dff633d62e5b..35b36b86d762 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c | |||
@@ -252,8 +252,12 @@ static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len) | |||
252 | { | 252 | { |
253 | struct kvec iov = {buf, len}; | 253 | struct kvec iov = {buf, len}; |
254 | struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; | 254 | struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; |
255 | int r; | ||
255 | 256 | ||
256 | return kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags); | 257 | r = kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags); |
258 | if (r == -EAGAIN) | ||
259 | r = 0; | ||
260 | return r; | ||
257 | } | 261 | } |
258 | 262 | ||
259 | /* | 263 | /* |
@@ -264,13 +268,17 @@ static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov, | |||
264 | size_t kvlen, size_t len, int more) | 268 | size_t kvlen, size_t len, int more) |
265 | { | 269 | { |
266 | struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; | 270 | struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL }; |
271 | int r; | ||
267 | 272 | ||
268 | if (more) | 273 | if (more) |
269 | msg.msg_flags |= MSG_MORE; | 274 | msg.msg_flags |= MSG_MORE; |
270 | else | 275 | else |
271 | msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */ | 276 | msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */ |
272 | 277 | ||
273 | return kernel_sendmsg(sock, &msg, iov, kvlen, len); | 278 | r = kernel_sendmsg(sock, &msg, iov, kvlen, len); |
279 | if (r == -EAGAIN) | ||
280 | r = 0; | ||
281 | return r; | ||
274 | } | 282 | } |
275 | 283 | ||
276 | 284 | ||
@@ -847,6 +855,8 @@ static int write_partial_msg_pages(struct ceph_connection *con) | |||
847 | (msg->pages || msg->pagelist || msg->bio || in_trail)) | 855 | (msg->pages || msg->pagelist || msg->bio || in_trail)) |
848 | kunmap(page); | 856 | kunmap(page); |
849 | 857 | ||
858 | if (ret == -EAGAIN) | ||
859 | ret = 0; | ||
850 | if (ret <= 0) | 860 | if (ret <= 0) |
851 | goto out; | 861 | goto out; |
852 | 862 | ||
@@ -1737,16 +1747,12 @@ more_kvec: | |||
1737 | if (con->out_skip) { | 1747 | if (con->out_skip) { |
1738 | ret = write_partial_skip(con); | 1748 | ret = write_partial_skip(con); |
1739 | if (ret <= 0) | 1749 | if (ret <= 0) |
1740 | goto done; | 1750 | goto out; |
1741 | if (ret < 0) { | ||
1742 | dout("try_write write_partial_skip err %d\n", ret); | ||
1743 | goto done; | ||
1744 | } | ||
1745 | } | 1751 | } |
1746 | if (con->out_kvec_left) { | 1752 | if (con->out_kvec_left) { |
1747 | ret = write_partial_kvec(con); | 1753 | ret = write_partial_kvec(con); |
1748 | if (ret <= 0) | 1754 | if (ret <= 0) |
1749 | goto done; | 1755 | goto out; |
1750 | } | 1756 | } |
1751 | 1757 | ||
1752 | /* msg pages? */ | 1758 | /* msg pages? */ |
@@ -1761,11 +1767,11 @@ more_kvec: | |||
1761 | if (ret == 1) | 1767 | if (ret == 1) |
1762 | goto more_kvec; /* we need to send the footer, too! */ | 1768 | goto more_kvec; /* we need to send the footer, too! */ |
1763 | if (ret == 0) | 1769 | if (ret == 0) |
1764 | goto done; | 1770 | goto out; |
1765 | if (ret < 0) { | 1771 | if (ret < 0) { |
1766 | dout("try_write write_partial_msg_pages err %d\n", | 1772 | dout("try_write write_partial_msg_pages err %d\n", |
1767 | ret); | 1773 | ret); |
1768 | goto done; | 1774 | goto out; |
1769 | } | 1775 | } |
1770 | } | 1776 | } |
1771 | 1777 | ||
@@ -1789,10 +1795,9 @@ do_next: | |||
1789 | /* Nothing to do! */ | 1795 | /* Nothing to do! */ |
1790 | clear_bit(WRITE_PENDING, &con->state); | 1796 | clear_bit(WRITE_PENDING, &con->state); |
1791 | dout("try_write nothing else to write.\n"); | 1797 | dout("try_write nothing else to write.\n"); |
1792 | done: | ||
1793 | ret = 0; | 1798 | ret = 0; |
1794 | out: | 1799 | out: |
1795 | dout("try_write done on %p\n", con); | 1800 | dout("try_write done on %p ret %d\n", con, ret); |
1796 | return ret; | 1801 | return ret; |
1797 | } | 1802 | } |
1798 | 1803 | ||
@@ -1821,19 +1826,17 @@ more: | |||
1821 | dout("try_read connecting\n"); | 1826 | dout("try_read connecting\n"); |
1822 | ret = read_partial_banner(con); | 1827 | ret = read_partial_banner(con); |
1823 | if (ret <= 0) | 1828 | if (ret <= 0) |
1824 | goto done; | ||
1825 | if (process_banner(con) < 0) { | ||
1826 | ret = -1; | ||
1827 | goto out; | 1829 | goto out; |
1828 | } | 1830 | ret = process_banner(con); |
1831 | if (ret < 0) | ||
1832 | goto out; | ||
1829 | } | 1833 | } |
1830 | ret = read_partial_connect(con); | 1834 | ret = read_partial_connect(con); |
1831 | if (ret <= 0) | 1835 | if (ret <= 0) |
1832 | goto done; | ||
1833 | if (process_connect(con) < 0) { | ||
1834 | ret = -1; | ||
1835 | goto out; | 1836 | goto out; |
1836 | } | 1837 | ret = process_connect(con); |
1838 | if (ret < 0) | ||
1839 | goto out; | ||
1837 | goto more; | 1840 | goto more; |
1838 | } | 1841 | } |
1839 | 1842 | ||
@@ -1848,7 +1851,7 @@ more: | |||
1848 | dout("skipping %d / %d bytes\n", skip, -con->in_base_pos); | 1851 | dout("skipping %d / %d bytes\n", skip, -con->in_base_pos); |
1849 | ret = ceph_tcp_recvmsg(con->sock, buf, skip); | 1852 | ret = ceph_tcp_recvmsg(con->sock, buf, skip); |
1850 | if (ret <= 0) | 1853 | if (ret <= 0) |
1851 | goto done; | 1854 | goto out; |
1852 | con->in_base_pos += ret; | 1855 | con->in_base_pos += ret; |
1853 | if (con->in_base_pos) | 1856 | if (con->in_base_pos) |
1854 | goto more; | 1857 | goto more; |
@@ -1859,7 +1862,7 @@ more: | |||
1859 | */ | 1862 | */ |
1860 | ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1); | 1863 | ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1); |
1861 | if (ret <= 0) | 1864 | if (ret <= 0) |
1862 | goto done; | 1865 | goto out; |
1863 | dout("try_read got tag %d\n", (int)con->in_tag); | 1866 | dout("try_read got tag %d\n", (int)con->in_tag); |
1864 | switch (con->in_tag) { | 1867 | switch (con->in_tag) { |
1865 | case CEPH_MSGR_TAG_MSG: | 1868 | case CEPH_MSGR_TAG_MSG: |
@@ -1870,7 +1873,7 @@ more: | |||
1870 | break; | 1873 | break; |
1871 | case CEPH_MSGR_TAG_CLOSE: | 1874 | case CEPH_MSGR_TAG_CLOSE: |
1872 | set_bit(CLOSED, &con->state); /* fixme */ | 1875 | set_bit(CLOSED, &con->state); /* fixme */ |
1873 | goto done; | 1876 | goto out; |
1874 | default: | 1877 | default: |
1875 | goto bad_tag; | 1878 | goto bad_tag; |
1876 | } | 1879 | } |
@@ -1882,13 +1885,12 @@ more: | |||
1882 | case -EBADMSG: | 1885 | case -EBADMSG: |
1883 | con->error_msg = "bad crc"; | 1886 | con->error_msg = "bad crc"; |
1884 | ret = -EIO; | 1887 | ret = -EIO; |
1885 | goto out; | 1888 | break; |
1886 | case -EIO: | 1889 | case -EIO: |
1887 | con->error_msg = "io error"; | 1890 | con->error_msg = "io error"; |
1888 | goto out; | 1891 | break; |
1889 | default: | ||
1890 | goto done; | ||
1891 | } | 1892 | } |
1893 | goto out; | ||
1892 | } | 1894 | } |
1893 | if (con->in_tag == CEPH_MSGR_TAG_READY) | 1895 | if (con->in_tag == CEPH_MSGR_TAG_READY) |
1894 | goto more; | 1896 | goto more; |
@@ -1898,15 +1900,13 @@ more: | |||
1898 | if (con->in_tag == CEPH_MSGR_TAG_ACK) { | 1900 | if (con->in_tag == CEPH_MSGR_TAG_ACK) { |
1899 | ret = read_partial_ack(con); | 1901 | ret = read_partial_ack(con); |
1900 | if (ret <= 0) | 1902 | if (ret <= 0) |
1901 | goto done; | 1903 | goto out; |
1902 | process_ack(con); | 1904 | process_ack(con); |
1903 | goto more; | 1905 | goto more; |
1904 | } | 1906 | } |
1905 | 1907 | ||
1906 | done: | ||
1907 | ret = 0; | ||
1908 | out: | 1908 | out: |
1909 | dout("try_read done on %p\n", con); | 1909 | dout("try_read done on %p ret %d\n", con, ret); |
1910 | return ret; | 1910 | return ret; |
1911 | 1911 | ||
1912 | bad_tag: | 1912 | bad_tag: |
diff --git a/net/core/dev.c b/net/core/dev.c index 8e726cb47ed7..8ae6631abcc2 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -1280,10 +1280,13 @@ static int __dev_close_many(struct list_head *head) | |||
1280 | 1280 | ||
1281 | static int __dev_close(struct net_device *dev) | 1281 | static int __dev_close(struct net_device *dev) |
1282 | { | 1282 | { |
1283 | int retval; | ||
1283 | LIST_HEAD(single); | 1284 | LIST_HEAD(single); |
1284 | 1285 | ||
1285 | list_add(&dev->unreg_list, &single); | 1286 | list_add(&dev->unreg_list, &single); |
1286 | return __dev_close_many(&single); | 1287 | retval = __dev_close_many(&single); |
1288 | list_del(&single); | ||
1289 | return retval; | ||
1287 | } | 1290 | } |
1288 | 1291 | ||
1289 | int dev_close_many(struct list_head *head) | 1292 | int dev_close_many(struct list_head *head) |
@@ -1325,7 +1328,7 @@ int dev_close(struct net_device *dev) | |||
1325 | 1328 | ||
1326 | list_add(&dev->unreg_list, &single); | 1329 | list_add(&dev->unreg_list, &single); |
1327 | dev_close_many(&single); | 1330 | dev_close_many(&single); |
1328 | 1331 | list_del(&single); | |
1329 | return 0; | 1332 | return 0; |
1330 | } | 1333 | } |
1331 | EXPORT_SYMBOL(dev_close); | 1334 | EXPORT_SYMBOL(dev_close); |
@@ -5063,6 +5066,7 @@ static void rollback_registered(struct net_device *dev) | |||
5063 | 5066 | ||
5064 | list_add(&dev->unreg_list, &single); | 5067 | list_add(&dev->unreg_list, &single); |
5065 | rollback_registered_many(&single); | 5068 | rollback_registered_many(&single); |
5069 | list_del(&single); | ||
5066 | } | 5070 | } |
5067 | 5071 | ||
5068 | unsigned long netdev_fix_features(unsigned long features, const char *name) | 5072 | unsigned long netdev_fix_features(unsigned long features, const char *name) |
@@ -6216,6 +6220,7 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list) | |||
6216 | } | 6220 | } |
6217 | } | 6221 | } |
6218 | unregister_netdevice_many(&dev_kill_list); | 6222 | unregister_netdevice_many(&dev_kill_list); |
6223 | list_del(&dev_kill_list); | ||
6219 | rtnl_unlock(); | 6224 | rtnl_unlock(); |
6220 | } | 6225 | } |
6221 | 6226 | ||
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index 6b03f561caec..d5074a567289 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c | |||
@@ -626,6 +626,9 @@ static int dcbnl_getapp(struct net_device *netdev, struct nlattr **tb, | |||
626 | dcb->cmd = DCB_CMD_GAPP; | 626 | dcb->cmd = DCB_CMD_GAPP; |
627 | 627 | ||
628 | app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP); | 628 | app_nest = nla_nest_start(dcbnl_skb, DCB_ATTR_APP); |
629 | if (!app_nest) | ||
630 | goto out_cancel; | ||
631 | |||
629 | ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype); | 632 | ret = nla_put_u8(dcbnl_skb, DCB_APP_ATTR_IDTYPE, idtype); |
630 | if (ret) | 633 | if (ret) |
631 | goto out_cancel; | 634 | goto out_cancel; |
@@ -1613,6 +1616,10 @@ EXPORT_SYMBOL(dcb_getapp); | |||
1613 | u8 dcb_setapp(struct net_device *dev, struct dcb_app *new) | 1616 | u8 dcb_setapp(struct net_device *dev, struct dcb_app *new) |
1614 | { | 1617 | { |
1615 | struct dcb_app_type *itr; | 1618 | struct dcb_app_type *itr; |
1619 | struct dcb_app_type event; | ||
1620 | |||
1621 | memcpy(&event.name, dev->name, sizeof(event.name)); | ||
1622 | memcpy(&event.app, new, sizeof(event.app)); | ||
1616 | 1623 | ||
1617 | spin_lock(&dcb_lock); | 1624 | spin_lock(&dcb_lock); |
1618 | /* Search for existing match and replace */ | 1625 | /* Search for existing match and replace */ |
@@ -1644,7 +1651,7 @@ u8 dcb_setapp(struct net_device *dev, struct dcb_app *new) | |||
1644 | } | 1651 | } |
1645 | out: | 1652 | out: |
1646 | spin_unlock(&dcb_lock); | 1653 | spin_unlock(&dcb_lock); |
1647 | call_dcbevent_notifiers(DCB_APP_EVENT, new); | 1654 | call_dcbevent_notifiers(DCB_APP_EVENT, &event); |
1648 | return 0; | 1655 | return 0; |
1649 | } | 1656 | } |
1650 | EXPORT_SYMBOL(dcb_setapp); | 1657 | EXPORT_SYMBOL(dcb_setapp); |
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index 748cb5b337bd..df4616fce929 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c | |||
@@ -1030,6 +1030,21 @@ static inline bool inetdev_valid_mtu(unsigned mtu) | |||
1030 | return mtu >= 68; | 1030 | return mtu >= 68; |
1031 | } | 1031 | } |
1032 | 1032 | ||
1033 | static void inetdev_send_gratuitous_arp(struct net_device *dev, | ||
1034 | struct in_device *in_dev) | ||
1035 | |||
1036 | { | ||
1037 | struct in_ifaddr *ifa = in_dev->ifa_list; | ||
1038 | |||
1039 | if (!ifa) | ||
1040 | return; | ||
1041 | |||
1042 | arp_send(ARPOP_REQUEST, ETH_P_ARP, | ||
1043 | ifa->ifa_address, dev, | ||
1044 | ifa->ifa_address, NULL, | ||
1045 | dev->dev_addr, NULL); | ||
1046 | } | ||
1047 | |||
1033 | /* Called only under RTNL semaphore */ | 1048 | /* Called only under RTNL semaphore */ |
1034 | 1049 | ||
1035 | static int inetdev_event(struct notifier_block *this, unsigned long event, | 1050 | static int inetdev_event(struct notifier_block *this, unsigned long event, |
@@ -1082,18 +1097,13 @@ static int inetdev_event(struct notifier_block *this, unsigned long event, | |||
1082 | } | 1097 | } |
1083 | ip_mc_up(in_dev); | 1098 | ip_mc_up(in_dev); |
1084 | /* fall through */ | 1099 | /* fall through */ |
1085 | case NETDEV_NOTIFY_PEERS: | ||
1086 | case NETDEV_CHANGEADDR: | 1100 | case NETDEV_CHANGEADDR: |
1101 | if (!IN_DEV_ARP_NOTIFY(in_dev)) | ||
1102 | break; | ||
1103 | /* fall through */ | ||
1104 | case NETDEV_NOTIFY_PEERS: | ||
1087 | /* Send gratuitous ARP to notify of link change */ | 1105 | /* Send gratuitous ARP to notify of link change */ |
1088 | if (IN_DEV_ARP_NOTIFY(in_dev)) { | 1106 | inetdev_send_gratuitous_arp(dev, in_dev); |
1089 | struct in_ifaddr *ifa = in_dev->ifa_list; | ||
1090 | |||
1091 | if (ifa) | ||
1092 | arp_send(ARPOP_REQUEST, ETH_P_ARP, | ||
1093 | ifa->ifa_address, dev, | ||
1094 | ifa->ifa_address, NULL, | ||
1095 | dev->dev_addr, NULL); | ||
1096 | } | ||
1097 | break; | 1107 | break; |
1098 | case NETDEV_DOWN: | 1108 | case NETDEV_DOWN: |
1099 | ip_mc_down(in_dev); | 1109 | ip_mc_down(in_dev); |
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index eb68a0e34e49..6613edfac28c 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c | |||
@@ -775,6 +775,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev | |||
775 | .fl4_dst = dst, | 775 | .fl4_dst = dst, |
776 | .fl4_src = tiph->saddr, | 776 | .fl4_src = tiph->saddr, |
777 | .fl4_tos = RT_TOS(tos), | 777 | .fl4_tos = RT_TOS(tos), |
778 | .proto = IPPROTO_GRE, | ||
778 | .fl_gre_key = tunnel->parms.o_key | 779 | .fl_gre_key = tunnel->parms.o_key |
779 | }; | 780 | }; |
780 | if (ip_route_output_key(dev_net(dev), &rt, &fl)) { | 781 | if (ip_route_output_key(dev_net(dev), &rt, &fl)) { |
diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 788a3e74834e..6ed6603c2f6d 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c | |||
@@ -2722,6 +2722,7 @@ static struct dst_ops ipv4_dst_blackhole_ops = { | |||
2722 | .destroy = ipv4_dst_destroy, | 2722 | .destroy = ipv4_dst_destroy, |
2723 | .check = ipv4_blackhole_dst_check, | 2723 | .check = ipv4_blackhole_dst_check, |
2724 | .default_mtu = ipv4_blackhole_default_mtu, | 2724 | .default_mtu = ipv4_blackhole_default_mtu, |
2725 | .default_advmss = ipv4_default_advmss, | ||
2725 | .update_pmtu = ipv4_rt_blackhole_update_pmtu, | 2726 | .update_pmtu = ipv4_rt_blackhole_update_pmtu, |
2726 | }; | 2727 | }; |
2727 | 2728 | ||
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 1c29f95695de..a998db6e7895 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
@@ -128,6 +128,7 @@ static struct dst_ops ip6_dst_blackhole_ops = { | |||
128 | .destroy = ip6_dst_destroy, | 128 | .destroy = ip6_dst_destroy, |
129 | .check = ip6_dst_check, | 129 | .check = ip6_dst_check, |
130 | .default_mtu = ip6_blackhole_default_mtu, | 130 | .default_mtu = ip6_blackhole_default_mtu, |
131 | .default_advmss = ip6_default_advmss, | ||
131 | .update_pmtu = ip6_rt_blackhole_update_pmtu, | 132 | .update_pmtu = ip6_rt_blackhole_update_pmtu, |
132 | }; | 133 | }; |
133 | 134 | ||
diff --git a/net/mac80211/util.c b/net/mac80211/util.c index cf68700abffa..d036597aabbe 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c | |||
@@ -1210,7 +1210,9 @@ int ieee80211_reconfig(struct ieee80211_local *local) | |||
1210 | switch (sdata->vif.type) { | 1210 | switch (sdata->vif.type) { |
1211 | case NL80211_IFTYPE_STATION: | 1211 | case NL80211_IFTYPE_STATION: |
1212 | changed |= BSS_CHANGED_ASSOC; | 1212 | changed |= BSS_CHANGED_ASSOC; |
1213 | mutex_lock(&sdata->u.mgd.mtx); | ||
1213 | ieee80211_bss_info_change_notify(sdata, changed); | 1214 | ieee80211_bss_info_change_notify(sdata, changed); |
1215 | mutex_unlock(&sdata->u.mgd.mtx); | ||
1214 | break; | 1216 | break; |
1215 | case NL80211_IFTYPE_ADHOC: | 1217 | case NL80211_IFTYPE_ADHOC: |
1216 | changed |= BSS_CHANGED_IBSS; | 1218 | changed |= BSS_CHANGED_IBSS; |
diff --git a/net/netfilter/core.c b/net/netfilter/core.c index 32fcbe290c04..4aa614b8a96a 100644 --- a/net/netfilter/core.c +++ b/net/netfilter/core.c | |||
@@ -133,6 +133,7 @@ unsigned int nf_iterate(struct list_head *head, | |||
133 | 133 | ||
134 | /* Optimization: we don't need to hold module | 134 | /* Optimization: we don't need to hold module |
135 | reference here, since function can't sleep. --RR */ | 135 | reference here, since function can't sleep. --RR */ |
136 | repeat: | ||
136 | verdict = elem->hook(hook, skb, indev, outdev, okfn); | 137 | verdict = elem->hook(hook, skb, indev, outdev, okfn); |
137 | if (verdict != NF_ACCEPT) { | 138 | if (verdict != NF_ACCEPT) { |
138 | #ifdef CONFIG_NETFILTER_DEBUG | 139 | #ifdef CONFIG_NETFILTER_DEBUG |
@@ -145,7 +146,7 @@ unsigned int nf_iterate(struct list_head *head, | |||
145 | #endif | 146 | #endif |
146 | if (verdict != NF_REPEAT) | 147 | if (verdict != NF_REPEAT) |
147 | return verdict; | 148 | return verdict; |
148 | *i = (*i)->prev; | 149 | goto repeat; |
149 | } | 150 | } |
150 | } | 151 | } |
151 | return NF_ACCEPT; | 152 | return NF_ACCEPT; |
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 8b3ef404c794..6459588befc3 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c | |||
@@ -1340,10 +1340,13 @@ static inline struct xfrm_dst *xfrm_alloc_dst(struct net *net, int family) | |||
1340 | default: | 1340 | default: |
1341 | BUG(); | 1341 | BUG(); |
1342 | } | 1342 | } |
1343 | xdst = dst_alloc(dst_ops) ?: ERR_PTR(-ENOBUFS); | 1343 | xdst = dst_alloc(dst_ops); |
1344 | xfrm_policy_put_afinfo(afinfo); | 1344 | xfrm_policy_put_afinfo(afinfo); |
1345 | 1345 | ||
1346 | xdst->flo.ops = &xfrm_bundle_fc_ops; | 1346 | if (likely(xdst)) |
1347 | xdst->flo.ops = &xfrm_bundle_fc_ops; | ||
1348 | else | ||
1349 | xdst = ERR_PTR(-ENOBUFS); | ||
1347 | 1350 | ||
1348 | return xdst; | 1351 | return xdst; |
1349 | } | 1352 | } |
diff --git a/scripts/basic/fixdep.c b/scripts/basic/fixdep.c index c9a16abacab4..6c94c6ce2925 100644 --- a/scripts/basic/fixdep.c +++ b/scripts/basic/fixdep.c | |||
@@ -315,6 +315,7 @@ static void parse_dep_file(void *map, size_t len) | |||
315 | char *end = m + len; | 315 | char *end = m + len; |
316 | char *p; | 316 | char *p; |
317 | char s[PATH_MAX]; | 317 | char s[PATH_MAX]; |
318 | int first; | ||
318 | 319 | ||
319 | p = strchr(m, ':'); | 320 | p = strchr(m, ':'); |
320 | if (!p) { | 321 | if (!p) { |
@@ -327,6 +328,7 @@ static void parse_dep_file(void *map, size_t len) | |||
327 | 328 | ||
328 | clear_config(); | 329 | clear_config(); |
329 | 330 | ||
331 | first = 1; | ||
330 | while (m < end) { | 332 | while (m < end) { |
331 | while (m < end && (*m == ' ' || *m == '\\' || *m == '\n')) | 333 | while (m < end && (*m == ' ' || *m == '\\' || *m == '\n')) |
332 | m++; | 334 | m++; |
@@ -340,9 +342,17 @@ static void parse_dep_file(void *map, size_t len) | |||
340 | if (strrcmp(s, "include/generated/autoconf.h") && | 342 | if (strrcmp(s, "include/generated/autoconf.h") && |
341 | strrcmp(s, "arch/um/include/uml-config.h") && | 343 | strrcmp(s, "arch/um/include/uml-config.h") && |
342 | strrcmp(s, ".ver")) { | 344 | strrcmp(s, ".ver")) { |
343 | printf(" %s \\\n", s); | 345 | /* |
346 | * Do not output the first dependency (the | ||
347 | * source file), so that kbuild is not confused | ||
348 | * if a .c file is rewritten into .S or vice | ||
349 | * versa. | ||
350 | */ | ||
351 | if (!first) | ||
352 | printf(" %s \\\n", s); | ||
344 | do_config_file(s); | 353 | do_config_file(s); |
345 | } | 354 | } |
355 | first = 0; | ||
346 | m = p + 1; | 356 | m = p + 1; |
347 | } | 357 | } |
348 | printf("\n%s: $(deps_%s)\n\n", target, target); | 358 | printf("\n%s: $(deps_%s)\n\n", target, target); |
diff --git a/sound/pci/au88x0/au88x0_core.c b/sound/pci/au88x0/au88x0_core.c index 23f49f356e0f..16c0bdfbb164 100644 --- a/sound/pci/au88x0/au88x0_core.c +++ b/sound/pci/au88x0/au88x0_core.c | |||
@@ -1252,11 +1252,19 @@ static void vortex_adbdma_resetup(vortex_t *vortex, int adbdma) { | |||
1252 | static int inline vortex_adbdma_getlinearpos(vortex_t * vortex, int adbdma) | 1252 | static int inline vortex_adbdma_getlinearpos(vortex_t * vortex, int adbdma) |
1253 | { | 1253 | { |
1254 | stream_t *dma = &vortex->dma_adb[adbdma]; | 1254 | stream_t *dma = &vortex->dma_adb[adbdma]; |
1255 | int temp; | 1255 | int temp, page, delta; |
1256 | 1256 | ||
1257 | temp = hwread(vortex->mmio, VORTEX_ADBDMA_STAT + (adbdma << 2)); | 1257 | temp = hwread(vortex->mmio, VORTEX_ADBDMA_STAT + (adbdma << 2)); |
1258 | temp = (dma->period_virt * dma->period_bytes) + (temp & (dma->period_bytes - 1)); | 1258 | page = (temp & ADB_SUBBUF_MASK) >> ADB_SUBBUF_SHIFT; |
1259 | return temp; | 1259 | if (dma->nr_periods >= 4) |
1260 | delta = (page - dma->period_real) & 3; | ||
1261 | else { | ||
1262 | delta = (page - dma->period_real); | ||
1263 | if (delta < 0) | ||
1264 | delta += dma->nr_periods; | ||
1265 | } | ||
1266 | return (dma->period_virt + delta) * dma->period_bytes | ||
1267 | + (temp & (dma->period_bytes - 1)); | ||
1260 | } | 1268 | } |
1261 | 1269 | ||
1262 | static void vortex_adbdma_startfifo(vortex_t * vortex, int adbdma) | 1270 | static void vortex_adbdma_startfifo(vortex_t * vortex, int adbdma) |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 0baffcdee8f9..fcedad9a5fef 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -2308,6 +2308,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = { | |||
2308 | SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB), | 2308 | SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB), |
2309 | SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB), | 2309 | SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB), |
2310 | SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB), | 2310 | SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB), |
2311 | SND_PCI_QUIRK(0x1043, 0x8410, "ASUS", POS_FIX_LPIB), | ||
2311 | SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB), | 2312 | SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB), |
2312 | SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB), | 2313 | SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB), |
2313 | SND_PCI_QUIRK(0x1179, 0xff10, "Toshiba A100-259", POS_FIX_LPIB), | 2314 | SND_PCI_QUIRK(0x1179, 0xff10, "Toshiba A100-259", POS_FIX_LPIB), |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index fbe97d32140d..dd7c5c12225d 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
@@ -3410,7 +3410,7 @@ static void cx_auto_parse_output(struct hda_codec *codec) | |||
3410 | } | 3410 | } |
3411 | } | 3411 | } |
3412 | spec->multiout.dac_nids = spec->private_dac_nids; | 3412 | spec->multiout.dac_nids = spec->private_dac_nids; |
3413 | spec->multiout.max_channels = nums * 2; | 3413 | spec->multiout.max_channels = spec->multiout.num_dacs * 2; |
3414 | 3414 | ||
3415 | if (cfg->hp_outs > 0) | 3415 | if (cfg->hp_outs > 0) |
3416 | spec->auto_mute = 1; | 3416 | spec->auto_mute = 1; |
@@ -3729,9 +3729,9 @@ static int cx_auto_init(struct hda_codec *codec) | |||
3729 | return 0; | 3729 | return 0; |
3730 | } | 3730 | } |
3731 | 3731 | ||
3732 | static int cx_auto_add_volume(struct hda_codec *codec, const char *basename, | 3732 | static int cx_auto_add_volume_idx(struct hda_codec *codec, const char *basename, |
3733 | const char *dir, int cidx, | 3733 | const char *dir, int cidx, |
3734 | hda_nid_t nid, int hda_dir) | 3734 | hda_nid_t nid, int hda_dir, int amp_idx) |
3735 | { | 3735 | { |
3736 | static char name[32]; | 3736 | static char name[32]; |
3737 | static struct snd_kcontrol_new knew[] = { | 3737 | static struct snd_kcontrol_new knew[] = { |
@@ -3743,7 +3743,8 @@ static int cx_auto_add_volume(struct hda_codec *codec, const char *basename, | |||
3743 | 3743 | ||
3744 | for (i = 0; i < 2; i++) { | 3744 | for (i = 0; i < 2; i++) { |
3745 | struct snd_kcontrol *kctl; | 3745 | struct snd_kcontrol *kctl; |
3746 | knew[i].private_value = HDA_COMPOSE_AMP_VAL(nid, 3, 0, hda_dir); | 3746 | knew[i].private_value = HDA_COMPOSE_AMP_VAL(nid, 3, amp_idx, |
3747 | hda_dir); | ||
3747 | knew[i].subdevice = HDA_SUBDEV_AMP_FLAG; | 3748 | knew[i].subdevice = HDA_SUBDEV_AMP_FLAG; |
3748 | knew[i].index = cidx; | 3749 | knew[i].index = cidx; |
3749 | snprintf(name, sizeof(name), "%s%s %s", basename, dir, sfx[i]); | 3750 | snprintf(name, sizeof(name), "%s%s %s", basename, dir, sfx[i]); |
@@ -3759,6 +3760,9 @@ static int cx_auto_add_volume(struct hda_codec *codec, const char *basename, | |||
3759 | return 0; | 3760 | return 0; |
3760 | } | 3761 | } |
3761 | 3762 | ||
3763 | #define cx_auto_add_volume(codec, str, dir, cidx, nid, hda_dir) \ | ||
3764 | cx_auto_add_volume_idx(codec, str, dir, cidx, nid, hda_dir, 0) | ||
3765 | |||
3762 | #define cx_auto_add_pb_volume(codec, nid, str, idx) \ | 3766 | #define cx_auto_add_pb_volume(codec, nid, str, idx) \ |
3763 | cx_auto_add_volume(codec, str, " Playback", idx, nid, HDA_OUTPUT) | 3767 | cx_auto_add_volume(codec, str, " Playback", idx, nid, HDA_OUTPUT) |
3764 | 3768 | ||
@@ -3808,29 +3812,60 @@ static int cx_auto_build_input_controls(struct hda_codec *codec) | |||
3808 | struct conexant_spec *spec = codec->spec; | 3812 | struct conexant_spec *spec = codec->spec; |
3809 | struct auto_pin_cfg *cfg = &spec->autocfg; | 3813 | struct auto_pin_cfg *cfg = &spec->autocfg; |
3810 | static const char *prev_label; | 3814 | static const char *prev_label; |
3811 | int i, err, cidx; | 3815 | int i, err, cidx, conn_len; |
3816 | hda_nid_t conn[HDA_MAX_CONNECTIONS]; | ||
3817 | |||
3818 | int multi_adc_volume = 0; /* If the ADC nid has several input volumes */ | ||
3819 | int adc_nid = spec->adc_nids[0]; | ||
3820 | |||
3821 | conn_len = snd_hda_get_connections(codec, adc_nid, conn, | ||
3822 | HDA_MAX_CONNECTIONS); | ||
3823 | if (conn_len < 0) | ||
3824 | return conn_len; | ||
3825 | |||
3826 | multi_adc_volume = cfg->num_inputs > 1 && conn_len > 1; | ||
3827 | if (!multi_adc_volume) { | ||
3828 | err = cx_auto_add_volume(codec, "Capture", "", 0, adc_nid, | ||
3829 | HDA_INPUT); | ||
3830 | if (err < 0) | ||
3831 | return err; | ||
3832 | } | ||
3812 | 3833 | ||
3813 | err = cx_auto_add_volume(codec, "Capture", "", 0, spec->adc_nids[0], | ||
3814 | HDA_INPUT); | ||
3815 | if (err < 0) | ||
3816 | return err; | ||
3817 | prev_label = NULL; | 3834 | prev_label = NULL; |
3818 | cidx = 0; | 3835 | cidx = 0; |
3819 | for (i = 0; i < cfg->num_inputs; i++) { | 3836 | for (i = 0; i < cfg->num_inputs; i++) { |
3820 | hda_nid_t nid = cfg->inputs[i].pin; | 3837 | hda_nid_t nid = cfg->inputs[i].pin; |
3821 | const char *label; | 3838 | const char *label; |
3822 | if (!(get_wcaps(codec, nid) & AC_WCAP_IN_AMP)) | 3839 | int j; |
3840 | int pin_amp = get_wcaps(codec, nid) & AC_WCAP_IN_AMP; | ||
3841 | if (!pin_amp && !multi_adc_volume) | ||
3823 | continue; | 3842 | continue; |
3843 | |||
3824 | label = hda_get_autocfg_input_label(codec, cfg, i); | 3844 | label = hda_get_autocfg_input_label(codec, cfg, i); |
3825 | if (label == prev_label) | 3845 | if (label == prev_label) |
3826 | cidx++; | 3846 | cidx++; |
3827 | else | 3847 | else |
3828 | cidx = 0; | 3848 | cidx = 0; |
3829 | prev_label = label; | 3849 | prev_label = label; |
3830 | err = cx_auto_add_volume(codec, label, " Capture", cidx, | 3850 | |
3831 | nid, HDA_INPUT); | 3851 | if (pin_amp) { |
3832 | if (err < 0) | 3852 | err = cx_auto_add_volume(codec, label, " Boost", cidx, |
3833 | return err; | 3853 | nid, HDA_INPUT); |
3854 | if (err < 0) | ||
3855 | return err; | ||
3856 | } | ||
3857 | |||
3858 | if (!multi_adc_volume) | ||
3859 | continue; | ||
3860 | for (j = 0; j < conn_len; j++) { | ||
3861 | if (conn[j] == nid) { | ||
3862 | err = cx_auto_add_volume_idx(codec, label, | ||
3863 | " Capture", cidx, adc_nid, HDA_INPUT, j); | ||
3864 | if (err < 0) | ||
3865 | return err; | ||
3866 | break; | ||
3867 | } | ||
3868 | } | ||
3834 | } | 3869 | } |
3835 | return 0; | 3870 | return 0; |
3836 | } | 3871 | } |
diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c index 68b97477577b..66eabafb1c24 100644 --- a/sound/usb/caiaq/audio.c +++ b/sound/usb/caiaq/audio.c | |||
@@ -785,7 +785,7 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *dev) | |||
785 | } | 785 | } |
786 | 786 | ||
787 | dev->pcm->private_data = dev; | 787 | dev->pcm->private_data = dev; |
788 | strcpy(dev->pcm->name, dev->product_name); | 788 | strlcpy(dev->pcm->name, dev->product_name, sizeof(dev->pcm->name)); |
789 | 789 | ||
790 | memset(dev->sub_playback, 0, sizeof(dev->sub_playback)); | 790 | memset(dev->sub_playback, 0, sizeof(dev->sub_playback)); |
791 | memset(dev->sub_capture, 0, sizeof(dev->sub_capture)); | 791 | memset(dev->sub_capture, 0, sizeof(dev->sub_capture)); |
diff --git a/sound/usb/caiaq/midi.c b/sound/usb/caiaq/midi.c index 2f218c77fff2..a1a47088fd0c 100644 --- a/sound/usb/caiaq/midi.c +++ b/sound/usb/caiaq/midi.c | |||
@@ -136,7 +136,7 @@ int snd_usb_caiaq_midi_init(struct snd_usb_caiaqdev *device) | |||
136 | if (ret < 0) | 136 | if (ret < 0) |
137 | return ret; | 137 | return ret; |
138 | 138 | ||
139 | strcpy(rmidi->name, device->product_name); | 139 | strlcpy(rmidi->name, device->product_name, sizeof(rmidi->name)); |
140 | 140 | ||
141 | rmidi->info_flags = SNDRV_RAWMIDI_INFO_DUPLEX; | 141 | rmidi->info_flags = SNDRV_RAWMIDI_INFO_DUPLEX; |
142 | rmidi->private_data = device; | 142 | rmidi->private_data = device; |