diff options
119 files changed, 3291 insertions, 2062 deletions
diff --git a/Documentation/devicetree/bindings/timer/marvell,armada-370-xp-timer.txt b/Documentation/devicetree/bindings/timer/marvell,armada-370-xp-timer.txt index 36381129d141..f455182b1086 100644 --- a/Documentation/devicetree/bindings/timer/marvell,armada-370-xp-timer.txt +++ b/Documentation/devicetree/bindings/timer/marvell,armada-370-xp-timer.txt | |||
@@ -2,14 +2,40 @@ Marvell Armada 370 and Armada XP Timers | |||
2 | --------------------------------------- | 2 | --------------------------------------- |
3 | 3 | ||
4 | Required properties: | 4 | Required properties: |
5 | - compatible: Should be "marvell,armada-370-xp-timer" | 5 | - compatible: Should be either "marvell,armada-370-timer" or |
6 | "marvell,armada-xp-timer" as appropriate. | ||
6 | - interrupts: Should contain the list of Global Timer interrupts and | 7 | - interrupts: Should contain the list of Global Timer interrupts and |
7 | then local timer interrupts | 8 | then local timer interrupts |
8 | - reg: Should contain location and length for timers register. First | 9 | - reg: Should contain location and length for timers register. First |
9 | pair for the Global Timer registers, second pair for the | 10 | pair for the Global Timer registers, second pair for the |
10 | local/private timers. | 11 | local/private timers. |
11 | - clocks: clock driving the timer hardware | ||
12 | 12 | ||
13 | Optional properties: | 13 | Clocks required for compatible = "marvell,armada-370-timer": |
14 | - marvell,timer-25Mhz: Tells whether the Global timer supports the 25 | 14 | - clocks : Must contain a single entry describing the clock input |
15 | Mhz fixed mode (available on Armada XP and not on Armada 370) | 15 | |
16 | Clocks required for compatible = "marvell,armada-xp-timer": | ||
17 | - clocks : Must contain an entry for each entry in clock-names. | ||
18 | - clock-names : Must include the following entries: | ||
19 | "nbclk" (L2/coherency fabric clock), | ||
20 | "fixed" (Reference 25 MHz fixed-clock). | ||
21 | |||
22 | Examples: | ||
23 | |||
24 | - Armada 370: | ||
25 | |||
26 | timer { | ||
27 | compatible = "marvell,armada-370-timer"; | ||
28 | reg = <0x20300 0x30>, <0x21040 0x30>; | ||
29 | interrupts = <37>, <38>, <39>, <40>, <5>, <6>; | ||
30 | clocks = <&coreclk 2>; | ||
31 | }; | ||
32 | |||
33 | - Armada XP: | ||
34 | |||
35 | timer { | ||
36 | compatible = "marvell,armada-xp-timer"; | ||
37 | reg = <0x20300 0x30>, <0x21040 0x30>; | ||
38 | interrupts = <37>, <38>, <39>, <40>, <5>, <6>; | ||
39 | clocks = <&coreclk 2>, <&refclk>; | ||
40 | clock-names = "nbclk", "fixed"; | ||
41 | }; | ||
diff --git a/Documentation/filesystems/cifs/cifs.txt b/Documentation/filesystems/cifs/cifs.txt index 49cc923a93e3..2fac91ac96cf 100644 --- a/Documentation/filesystems/cifs/cifs.txt +++ b/Documentation/filesystems/cifs/cifs.txt | |||
@@ -1,18 +1,14 @@ | |||
1 | This is the client VFS module for the Common Internet File System | 1 | This is the client VFS module for the Common Internet File System |
2 | (CIFS) protocol which is the successor to the Server Message Block | 2 | (CIFS) protocol which is the successor to the Server Message Block |
3 | (SMB) protocol, the native file sharing mechanism for most early | 3 | (SMB) protocol, the native file sharing mechanism for most early |
4 | PC operating systems. CIFS is fully supported by current network | 4 | PC operating systems. New and improved versions of CIFS are now |
5 | file servers such as Windows 2000, Windows 2003 (including | 5 | called SMB2 and SMB3. These dialects are also supported by the |
6 | Windows XP) as well by Samba (which provides excellent CIFS | 6 | CIFS VFS module. CIFS is fully supported by network |
7 | file servers such as Windows 2000, 2003, 2008 and 2012 | ||
8 | as well by Samba (which provides excellent CIFS | ||
7 | server support for Linux and many other operating systems), so | 9 | server support for Linux and many other operating systems), so |
8 | this network filesystem client can mount to a wide variety of | 10 | this network filesystem client can mount to a wide variety of |
9 | servers. The smbfs module should be used instead of this cifs module | 11 | servers. |
10 | for mounting to older SMB servers such as OS/2. The smbfs and cifs | ||
11 | modules can coexist and do not conflict. The CIFS VFS filesystem | ||
12 | module is designed to work well with servers that implement the | ||
13 | newer versions (dialects) of the SMB/CIFS protocol such as Samba, | ||
14 | the program written by Andrew Tridgell that turns any Unix host | ||
15 | into a SMB/CIFS file server. | ||
16 | 12 | ||
17 | The intent of this module is to provide the most advanced network | 13 | The intent of this module is to provide the most advanced network |
18 | file system function for CIFS compliant servers, including better | 14 | file system function for CIFS compliant servers, including better |
@@ -24,28 +20,12 @@ | |||
24 | alternative to NFSv4 for fileserving in some Linux to Linux environments, | 20 | alternative to NFSv4 for fileserving in some Linux to Linux environments, |
25 | not just in Linux to Windows environments. | 21 | not just in Linux to Windows environments. |
26 | 22 | ||
27 | This filesystem has an optional mount utility (mount.cifs) that can | 23 | This filesystem has an mount utility (mount.cifs) that can be obtained from |
28 | be obtained from the project page and installed in the path in the same | ||
29 | directory with the other mount helpers (such as mount.smbfs). | ||
30 | Mounting using the cifs filesystem without installing the mount helper | ||
31 | requires specifying the server's ip address. | ||
32 | 24 | ||
33 | For Linux 2.4: | 25 | https://ftp.samba.org/pub/linux-cifs/cifs-utils/ |
34 | mount //anything/here /mnt_target -o | ||
35 | user=username,pass=password,unc=//ip_address_of_server/sharename | ||
36 | 26 | ||
37 | For Linux 2.5: | 27 | It must be installed in the directory with the other mount helpers. |
38 | mount //ip_address_of_server/sharename /mnt_target -o user=username, pass=password | ||
39 | 28 | ||
29 | For more information on the module see the project wiki page at | ||
40 | 30 | ||
41 | For more information on the module see the project page at | 31 | https://wiki.samba.org/index.php/LinuxCIFS_utils |
42 | |||
43 | http://us1.samba.org/samba/Linux_CIFS_client.html | ||
44 | |||
45 | For more information on CIFS see: | ||
46 | |||
47 | http://www.snia.org/tech_activities/CIFS | ||
48 | |||
49 | or the Samba site: | ||
50 | |||
51 | http://www.samba.org | ||
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas index cc92ca8c8963..6edaa65b0818 100644 --- a/Documentation/scsi/ChangeLog.megaraid_sas +++ b/Documentation/scsi/ChangeLog.megaraid_sas | |||
@@ -1,3 +1,13 @@ | |||
1 | Release Date : Sat. Aug 31, 2013 17:00:00 PST 2013 - | ||
2 | (emaild-id:megaraidlinux@lsi.com) | ||
3 | Adam Radford | ||
4 | Kashyap Desai | ||
5 | Sumit Saxena | ||
6 | Current Version : 06.700.06.00-rc1 | ||
7 | Old Version : 06.600.18.00-rc1 | ||
8 | 1. Add High Availability clustering support using shared Logical Disks. | ||
9 | 2. Version and Changelog update. | ||
10 | ------------------------------------------------------------------------------- | ||
1 | Release Date : Wed. May 15, 2013 17:00:00 PST 2013 - | 11 | Release Date : Wed. May 15, 2013 17:00:00 PST 2013 - |
2 | (emaild-id:megaraidlinux@lsi.com) | 12 | (emaild-id:megaraidlinux@lsi.com) |
3 | Adam Radford | 13 | Adam Radford |
@@ -1,8 +1,8 @@ | |||
1 | VERSION = 3 | 1 | VERSION = 3 |
2 | PATCHLEVEL = 11 | 2 | PATCHLEVEL = 12 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = | 4 | EXTRAVERSION = -rc1 |
5 | NAME = Suicidal Squirrel | 5 | NAME = One Giant Leap for Frogkind |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
8 | # To see a list of typical targets execute "make help" | 8 | # To see a list of typical targets execute "make help" |
diff --git a/arch/arm/common/timer-sp.c b/arch/arm/common/timer-sp.c index 023ee63827a2..e901d0f3e0bb 100644 --- a/arch/arm/common/timer-sp.c +++ b/arch/arm/common/timer-sp.c | |||
@@ -166,7 +166,8 @@ static int sp804_set_next_event(unsigned long next, | |||
166 | } | 166 | } |
167 | 167 | ||
168 | static struct clock_event_device sp804_clockevent = { | 168 | static struct clock_event_device sp804_clockevent = { |
169 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, | 169 | .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | |
170 | CLOCK_EVT_FEAT_DYNIRQ, | ||
170 | .set_mode = sp804_set_mode, | 171 | .set_mode = sp804_set_mode, |
171 | .set_next_event = sp804_set_next_event, | 172 | .set_next_event = sp804_set_next_event, |
172 | .rating = 300, | 173 | .rating = 300, |
diff --git a/arch/arm/mach-mvebu/armada-370-xp.c b/arch/arm/mach-mvebu/armada-370-xp.c index 829b57306328..e2acff98e750 100644 --- a/arch/arm/mach-mvebu/armada-370-xp.c +++ b/arch/arm/mach-mvebu/armada-370-xp.c | |||
@@ -18,7 +18,7 @@ | |||
18 | #include <linux/of_address.h> | 18 | #include <linux/of_address.h> |
19 | #include <linux/of_platform.h> | 19 | #include <linux/of_platform.h> |
20 | #include <linux/io.h> | 20 | #include <linux/io.h> |
21 | #include <linux/time-armada-370-xp.h> | 21 | #include <linux/clocksource.h> |
22 | #include <linux/dma-mapping.h> | 22 | #include <linux/dma-mapping.h> |
23 | #include <linux/mbus.h> | 23 | #include <linux/mbus.h> |
24 | #include <asm/hardware/cache-l2x0.h> | 24 | #include <asm/hardware/cache-l2x0.h> |
@@ -37,7 +37,7 @@ static void __init armada_370_xp_map_io(void) | |||
37 | static void __init armada_370_xp_timer_and_clk_init(void) | 37 | static void __init armada_370_xp_timer_and_clk_init(void) |
38 | { | 38 | { |
39 | of_clk_init(NULL); | 39 | of_clk_init(NULL); |
40 | armada_370_xp_timer_init(); | 40 | clocksource_of_init(); |
41 | coherency_init(); | 41 | coherency_init(); |
42 | BUG_ON(mvebu_mbus_dt_init()); | 42 | BUG_ON(mvebu_mbus_dt_init()); |
43 | #ifdef CONFIG_CACHE_L2X0 | 43 | #ifdef CONFIG_CACHE_L2X0 |
diff --git a/arch/mips/dec/ioasic-irq.c b/arch/mips/dec/ioasic-irq.c index 824e08c73798..4b3e3a4375a6 100644 --- a/arch/mips/dec/ioasic-irq.c +++ b/arch/mips/dec/ioasic-irq.c | |||
@@ -51,6 +51,14 @@ static struct irq_chip ioasic_irq_type = { | |||
51 | .irq_unmask = unmask_ioasic_irq, | 51 | .irq_unmask = unmask_ioasic_irq, |
52 | }; | 52 | }; |
53 | 53 | ||
54 | void clear_ioasic_dma_irq(unsigned int irq) | ||
55 | { | ||
56 | u32 sir; | ||
57 | |||
58 | sir = ~(1 << (irq - ioasic_irq_base)); | ||
59 | ioasic_write(IO_REG_SIR, sir); | ||
60 | } | ||
61 | |||
54 | static struct irq_chip ioasic_dma_irq_type = { | 62 | static struct irq_chip ioasic_dma_irq_type = { |
55 | .name = "IO-ASIC-DMA", | 63 | .name = "IO-ASIC-DMA", |
56 | .irq_ack = ack_ioasic_irq, | 64 | .irq_ack = ack_ioasic_irq, |
diff --git a/arch/mips/dec/time.c b/arch/mips/dec/time.c index 56ebc7f2bede..1914e56f0d96 100644 --- a/arch/mips/dec/time.c +++ b/arch/mips/dec/time.c | |||
@@ -125,12 +125,16 @@ int rtc_mips_set_mmss(unsigned long nowtime) | |||
125 | 125 | ||
126 | void __init plat_time_init(void) | 126 | void __init plat_time_init(void) |
127 | { | 127 | { |
128 | int ioasic_clock = 0; | ||
128 | u32 start, end; | 129 | u32 start, end; |
129 | int i = HZ / 8; | 130 | int i = HZ / 8; |
130 | 131 | ||
131 | /* Set up the rate of periodic DS1287 interrupts. */ | 132 | /* Set up the rate of periodic DS1287 interrupts. */ |
132 | ds1287_set_base_clock(HZ); | 133 | ds1287_set_base_clock(HZ); |
133 | 134 | ||
135 | /* On some I/O ASIC systems we have the I/O ASIC's counter. */ | ||
136 | if (IOASIC) | ||
137 | ioasic_clock = dec_ioasic_clocksource_init() == 0; | ||
134 | if (cpu_has_counter) { | 138 | if (cpu_has_counter) { |
135 | ds1287_timer_state(); | 139 | ds1287_timer_state(); |
136 | while (!ds1287_timer_state()) | 140 | while (!ds1287_timer_state()) |
@@ -147,9 +151,21 @@ void __init plat_time_init(void) | |||
147 | mips_hpt_frequency = (end - start) * 8; | 151 | mips_hpt_frequency = (end - start) * 8; |
148 | printk(KERN_INFO "MIPS counter frequency %dHz\n", | 152 | printk(KERN_INFO "MIPS counter frequency %dHz\n", |
149 | mips_hpt_frequency); | 153 | mips_hpt_frequency); |
150 | } else if (IOASIC) | 154 | |
151 | /* For pre-R4k systems we use the I/O ASIC's counter. */ | 155 | /* |
152 | dec_ioasic_clocksource_init(); | 156 | * All R4k DECstations suffer from the CP0 Count erratum, |
157 | * so we can't use the timer as a clock source, and a clock | ||
158 | * event both at a time. An accurate wall clock is more | ||
159 | * important than a high-precision interval timer so only | ||
160 | * use the timer as a clock source, and not a clock event | ||
161 | * if there's no I/O ASIC counter available to serve as a | ||
162 | * clock source. | ||
163 | */ | ||
164 | if (!ioasic_clock) { | ||
165 | init_r4k_clocksource(); | ||
166 | mips_hpt_frequency = 0; | ||
167 | } | ||
168 | } | ||
153 | 169 | ||
154 | ds1287_clockevent_init(dec_interrupt[DEC_IRQ_RTC]); | 170 | ds1287_clockevent_init(dec_interrupt[DEC_IRQ_RTC]); |
155 | } | 171 | } |
diff --git a/arch/mips/include/asm/dec/ioasic.h b/arch/mips/include/asm/dec/ioasic.h index 98badd6bf22d..a6e505a0e44b 100644 --- a/arch/mips/include/asm/dec/ioasic.h +++ b/arch/mips/include/asm/dec/ioasic.h | |||
@@ -31,8 +31,10 @@ static inline u32 ioasic_read(unsigned int reg) | |||
31 | return ioasic_base[reg / 4]; | 31 | return ioasic_base[reg / 4]; |
32 | } | 32 | } |
33 | 33 | ||
34 | extern void clear_ioasic_dma_irq(unsigned int irq); | ||
35 | |||
34 | extern void init_ioasic_irqs(int base); | 36 | extern void init_ioasic_irqs(int base); |
35 | 37 | ||
36 | extern void dec_ioasic_clocksource_init(void); | 38 | extern int dec_ioasic_clocksource_init(void); |
37 | 39 | ||
38 | #endif /* __ASM_DEC_IOASIC_H */ | 40 | #endif /* __ASM_DEC_IOASIC_H */ |
diff --git a/arch/mips/kernel/csrc-ioasic.c b/arch/mips/kernel/csrc-ioasic.c index 87e88feb4a25..6cbbf6e106b9 100644 --- a/arch/mips/kernel/csrc-ioasic.c +++ b/arch/mips/kernel/csrc-ioasic.c | |||
@@ -37,7 +37,7 @@ static struct clocksource clocksource_dec = { | |||
37 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | 37 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, |
38 | }; | 38 | }; |
39 | 39 | ||
40 | void __init dec_ioasic_clocksource_init(void) | 40 | int __init dec_ioasic_clocksource_init(void) |
41 | { | 41 | { |
42 | unsigned int freq; | 42 | unsigned int freq; |
43 | u32 start, end; | 43 | u32 start, end; |
@@ -56,8 +56,14 @@ void __init dec_ioasic_clocksource_init(void) | |||
56 | end = dec_ioasic_hpt_read(&clocksource_dec); | 56 | end = dec_ioasic_hpt_read(&clocksource_dec); |
57 | 57 | ||
58 | freq = (end - start) * 8; | 58 | freq = (end - start) * 8; |
59 | |||
60 | /* An early revision of the I/O ASIC didn't have the counter. */ | ||
61 | if (!freq) | ||
62 | return -ENXIO; | ||
63 | |||
59 | printk(KERN_INFO "I/O ASIC clock frequency %dHz\n", freq); | 64 | printk(KERN_INFO "I/O ASIC clock frequency %dHz\n", freq); |
60 | 65 | ||
61 | clocksource_dec.rating = 200 + freq / 10000000; | 66 | clocksource_dec.rating = 200 + freq / 10000000; |
62 | clocksource_register_hz(&clocksource_dec, freq); | 67 | clocksource_register_hz(&clocksource_dec, freq); |
68 | return 0; | ||
63 | } | 69 | } |
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c index c2e5d74739b4..5969f1e9b62a 100644 --- a/arch/mips/kernel/smp-cmp.c +++ b/arch/mips/kernel/smp-cmp.c | |||
@@ -99,7 +99,9 @@ static void cmp_init_secondary(void) | |||
99 | 99 | ||
100 | c->core = (read_c0_ebase() >> 1) & 0x1ff; | 100 | c->core = (read_c0_ebase() >> 1) & 0x1ff; |
101 | #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) | 101 | #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) |
102 | c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE; | 102 | if (cpu_has_mipsmt) |
103 | c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & | ||
104 | TCBIND_CURVPE; | ||
103 | #endif | 105 | #endif |
104 | #ifdef CONFIG_MIPS_MT_SMTC | 106 | #ifdef CONFIG_MIPS_MT_SMTC |
105 | c->tc_id = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT; | 107 | c->tc_id = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT; |
@@ -177,9 +179,16 @@ void __init cmp_smp_setup(void) | |||
177 | } | 179 | } |
178 | 180 | ||
179 | if (cpu_has_mipsmt) { | 181 | if (cpu_has_mipsmt) { |
180 | unsigned int nvpe, mvpconf0 = read_c0_mvpconf0(); | 182 | unsigned int nvpe = 1; |
183 | #ifdef CONFIG_MIPS_MT_SMP | ||
184 | unsigned int mvpconf0 = read_c0_mvpconf0(); | ||
185 | |||
186 | nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; | ||
187 | #elif defined(CONFIG_MIPS_MT_SMTC) | ||
188 | unsigned int mvpconf0 = read_c0_mvpconf0(); | ||
181 | 189 | ||
182 | nvpe = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; | 190 | nvpe = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; |
191 | #endif | ||
183 | smp_num_siblings = nvpe; | 192 | smp_num_siblings = nvpe; |
184 | } | 193 | } |
185 | pr_info("Detected %i available secondary CPU(s)\n", ncpu); | 194 | pr_info("Detected %i available secondary CPU(s)\n", ncpu); |
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index faf84c5f2629..59b2b3cd7885 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c | |||
@@ -1368,7 +1368,7 @@ out_einval: | |||
1368 | } | 1368 | } |
1369 | static DEVICE_ATTR_RW(ntcs); | 1369 | static DEVICE_ATTR_RW(ntcs); |
1370 | 1370 | ||
1371 | static struct attribute vpe_attrs[] = { | 1371 | static struct attribute *vpe_attrs[] = { |
1372 | &dev_attr_kill.attr, | 1372 | &dev_attr_kill.attr, |
1373 | &dev_attr_ntcs.attr, | 1373 | &dev_attr_ntcs.attr, |
1374 | NULL, | 1374 | NULL, |
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile index 136224b74d4f..81250ece3062 100644 --- a/arch/xtensa/Makefile +++ b/arch/xtensa/Makefile | |||
@@ -55,10 +55,10 @@ ifneq ($(CONFIG_LD_NO_RELAX),) | |||
55 | LDFLAGS := --no-relax | 55 | LDFLAGS := --no-relax |
56 | endif | 56 | endif |
57 | 57 | ||
58 | ifeq ($(shell echo -e __XTENSA_EB__ | $(CC) -E - | grep -v "\#"),1) | 58 | ifeq ($(shell echo __XTENSA_EB__ | $(CC) -E - | grep -v "\#"),1) |
59 | CHECKFLAGS += -D__XTENSA_EB__ | 59 | CHECKFLAGS += -D__XTENSA_EB__ |
60 | endif | 60 | endif |
61 | ifeq ($(shell echo -e __XTENSA_EL__ | $(CC) -E - | grep -v "\#"),1) | 61 | ifeq ($(shell echo __XTENSA_EL__ | $(CC) -E - | grep -v "\#"),1) |
62 | CHECKFLAGS += -D__XTENSA_EL__ | 62 | CHECKFLAGS += -D__XTENSA_EL__ |
63 | endif | 63 | endif |
64 | 64 | ||
diff --git a/arch/xtensa/boot/Makefile b/arch/xtensa/boot/Makefile index 64ffc4b53df6..ca20a892021b 100644 --- a/arch/xtensa/boot/Makefile +++ b/arch/xtensa/boot/Makefile | |||
@@ -12,7 +12,7 @@ | |||
12 | KBUILD_CFLAGS += -fno-builtin -Iarch/$(ARCH)/boot/include | 12 | KBUILD_CFLAGS += -fno-builtin -Iarch/$(ARCH)/boot/include |
13 | HOSTFLAGS += -Iarch/$(ARCH)/boot/include | 13 | HOSTFLAGS += -Iarch/$(ARCH)/boot/include |
14 | 14 | ||
15 | BIG_ENDIAN := $(shell echo -e __XTENSA_EB__ | $(CC) -E - | grep -v "\#") | 15 | BIG_ENDIAN := $(shell echo __XTENSA_EB__ | $(CC) -E - | grep -v "\#") |
16 | 16 | ||
17 | export ccflags-y | 17 | export ccflags-y |
18 | export BIG_ENDIAN | 18 | export BIG_ENDIAN |
diff --git a/arch/xtensa/include/asm/regs.h b/arch/xtensa/include/asm/regs.h index b24de6717020..4ba9f516b0e2 100644 --- a/arch/xtensa/include/asm/regs.h +++ b/arch/xtensa/include/asm/regs.h | |||
@@ -82,6 +82,7 @@ | |||
82 | #define PS_CALLINC_SHIFT 16 | 82 | #define PS_CALLINC_SHIFT 16 |
83 | #define PS_CALLINC_MASK 0x00030000 | 83 | #define PS_CALLINC_MASK 0x00030000 |
84 | #define PS_OWB_SHIFT 8 | 84 | #define PS_OWB_SHIFT 8 |
85 | #define PS_OWB_WIDTH 4 | ||
85 | #define PS_OWB_MASK 0x00000F00 | 86 | #define PS_OWB_MASK 0x00000F00 |
86 | #define PS_RING_SHIFT 6 | 87 | #define PS_RING_SHIFT 6 |
87 | #define PS_RING_MASK 0x000000C0 | 88 | #define PS_RING_MASK 0x000000C0 |
diff --git a/arch/xtensa/include/asm/timex.h b/arch/xtensa/include/asm/timex.h index 69f901713fb6..27fa3c170662 100644 --- a/arch/xtensa/include/asm/timex.h +++ b/arch/xtensa/include/asm/timex.h | |||
@@ -35,13 +35,7 @@ | |||
35 | # error "Bad timer number for Linux configurations!" | 35 | # error "Bad timer number for Linux configurations!" |
36 | #endif | 36 | #endif |
37 | 37 | ||
38 | #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT | ||
39 | extern unsigned long ccount_freq; | 38 | extern unsigned long ccount_freq; |
40 | #define CCOUNT_PER_JIFFY (ccount_freq / HZ) | ||
41 | #else | ||
42 | #define CCOUNT_PER_JIFFY (CONFIG_XTENSA_CPU_CLOCK*(1000000UL/HZ)) | ||
43 | #endif | ||
44 | |||
45 | 39 | ||
46 | typedef unsigned long long cycles_t; | 40 | typedef unsigned long long cycles_t; |
47 | 41 | ||
diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S index aa2e87b8566a..d4cef6039a5c 100644 --- a/arch/xtensa/kernel/align.S +++ b/arch/xtensa/kernel/align.S | |||
@@ -146,9 +146,9 @@ | |||
146 | * a0: trashed, original value saved on stack (PT_AREG0) | 146 | * a0: trashed, original value saved on stack (PT_AREG0) |
147 | * a1: a1 | 147 | * a1: a1 |
148 | * a2: new stack pointer, original in DEPC | 148 | * a2: new stack pointer, original in DEPC |
149 | * a3: dispatch table | 149 | * a3: a3 |
150 | * depc: a2, original value saved on stack (PT_DEPC) | 150 | * depc: a2, original value saved on stack (PT_DEPC) |
151 | * excsave_1: a3 | 151 | * excsave_1: dispatch table |
152 | * | 152 | * |
153 | * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC | 153 | * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC |
154 | * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception | 154 | * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception |
@@ -171,7 +171,6 @@ ENTRY(fast_unaligned) | |||
171 | s32i a8, a2, PT_AREG8 | 171 | s32i a8, a2, PT_AREG8 |
172 | 172 | ||
173 | rsr a0, depc | 173 | rsr a0, depc |
174 | xsr a3, excsave1 | ||
175 | s32i a0, a2, PT_AREG2 | 174 | s32i a0, a2, PT_AREG2 |
176 | s32i a3, a2, PT_AREG3 | 175 | s32i a3, a2, PT_AREG3 |
177 | 176 | ||
diff --git a/arch/xtensa/kernel/coprocessor.S b/arch/xtensa/kernel/coprocessor.S index 647657484866..a482df5df2b2 100644 --- a/arch/xtensa/kernel/coprocessor.S +++ b/arch/xtensa/kernel/coprocessor.S | |||
@@ -32,9 +32,9 @@ | |||
32 | * a0: trashed, original value saved on stack (PT_AREG0) | 32 | * a0: trashed, original value saved on stack (PT_AREG0) |
33 | * a1: a1 | 33 | * a1: a1 |
34 | * a2: new stack pointer, original in DEPC | 34 | * a2: new stack pointer, original in DEPC |
35 | * a3: dispatch table | 35 | * a3: a3 |
36 | * depc: a2, original value saved on stack (PT_DEPC) | 36 | * depc: a2, original value saved on stack (PT_DEPC) |
37 | * excsave_1: a3 | 37 | * excsave_1: dispatch table |
38 | * | 38 | * |
39 | * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC | 39 | * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC |
40 | * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception | 40 | * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception |
@@ -225,9 +225,9 @@ ENDPROC(coprocessor_restore) | |||
225 | * a0: trashed, original value saved on stack (PT_AREG0) | 225 | * a0: trashed, original value saved on stack (PT_AREG0) |
226 | * a1: a1 | 226 | * a1: a1 |
227 | * a2: new stack pointer, original in DEPC | 227 | * a2: new stack pointer, original in DEPC |
228 | * a3: dispatch table | 228 | * a3: a3 |
229 | * depc: a2, original value saved on stack (PT_DEPC) | 229 | * depc: a2, original value saved on stack (PT_DEPC) |
230 | * excsave_1: a3 | 230 | * excsave_1: dispatch table |
231 | * | 231 | * |
232 | * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC | 232 | * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC |
233 | * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception | 233 | * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception |
@@ -245,7 +245,6 @@ ENTRY(fast_coprocessor) | |||
245 | 245 | ||
246 | /* Save remaining registers a1-a3 and SAR */ | 246 | /* Save remaining registers a1-a3 and SAR */ |
247 | 247 | ||
248 | xsr a3, excsave1 | ||
249 | s32i a3, a2, PT_AREG3 | 248 | s32i a3, a2, PT_AREG3 |
250 | rsr a3, sar | 249 | rsr a3, sar |
251 | s32i a1, a2, PT_AREG1 | 250 | s32i a1, a2, PT_AREG1 |
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index 9298742f0fd0..de1dfa18d0a1 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S | |||
@@ -31,8 +31,6 @@ | |||
31 | /* Unimplemented features. */ | 31 | /* Unimplemented features. */ |
32 | 32 | ||
33 | #undef KERNEL_STACK_OVERFLOW_CHECK | 33 | #undef KERNEL_STACK_OVERFLOW_CHECK |
34 | #undef PREEMPTIBLE_KERNEL | ||
35 | #undef ALLOCA_EXCEPTION_IN_IRAM | ||
36 | 34 | ||
37 | /* Not well tested. | 35 | /* Not well tested. |
38 | * | 36 | * |
@@ -92,9 +90,9 @@ | |||
92 | * a0: trashed, original value saved on stack (PT_AREG0) | 90 | * a0: trashed, original value saved on stack (PT_AREG0) |
93 | * a1: a1 | 91 | * a1: a1 |
94 | * a2: new stack pointer, original value in depc | 92 | * a2: new stack pointer, original value in depc |
95 | * a3: dispatch table | 93 | * a3: a3 |
96 | * depc: a2, original value saved on stack (PT_DEPC) | 94 | * depc: a2, original value saved on stack (PT_DEPC) |
97 | * excsave1: a3 | 95 | * excsave1: dispatch table |
98 | * | 96 | * |
99 | * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC | 97 | * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC |
100 | * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception | 98 | * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception |
@@ -110,9 +108,8 @@ | |||
110 | 108 | ||
111 | ENTRY(user_exception) | 109 | ENTRY(user_exception) |
112 | 110 | ||
113 | /* Save a2, a3, and depc, restore excsave_1 and set SP. */ | 111 | /* Save a1, a2, a3, and set SP. */ |
114 | 112 | ||
115 | xsr a3, excsave1 | ||
116 | rsr a0, depc | 113 | rsr a0, depc |
117 | s32i a1, a2, PT_AREG1 | 114 | s32i a1, a2, PT_AREG1 |
118 | s32i a0, a2, PT_AREG2 | 115 | s32i a0, a2, PT_AREG2 |
@@ -238,9 +235,9 @@ ENDPROC(user_exception) | |||
238 | * a0: trashed, original value saved on stack (PT_AREG0) | 235 | * a0: trashed, original value saved on stack (PT_AREG0) |
239 | * a1: a1 | 236 | * a1: a1 |
240 | * a2: new stack pointer, original in DEPC | 237 | * a2: new stack pointer, original in DEPC |
241 | * a3: dispatch table | 238 | * a3: a3 |
242 | * depc: a2, original value saved on stack (PT_DEPC) | 239 | * depc: a2, original value saved on stack (PT_DEPC) |
243 | * excsave_1: a3 | 240 | * excsave_1: dispatch table |
244 | * | 241 | * |
245 | * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC | 242 | * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC |
246 | * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception | 243 | * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception |
@@ -256,9 +253,8 @@ ENDPROC(user_exception) | |||
256 | 253 | ||
257 | ENTRY(kernel_exception) | 254 | ENTRY(kernel_exception) |
258 | 255 | ||
259 | /* Save a0, a2, a3, DEPC and set SP. */ | 256 | /* Save a1, a2, a3, and set SP. */ |
260 | 257 | ||
261 | xsr a3, excsave1 # restore a3, excsave_1 | ||
262 | rsr a0, depc # get a2 | 258 | rsr a0, depc # get a2 |
263 | s32i a1, a2, PT_AREG1 | 259 | s32i a1, a2, PT_AREG1 |
264 | s32i a0, a2, PT_AREG2 | 260 | s32i a0, a2, PT_AREG2 |
@@ -409,7 +405,7 @@ common_exception: | |||
409 | * exception handler and call the exception handler. | 405 | * exception handler and call the exception handler. |
410 | */ | 406 | */ |
411 | 407 | ||
412 | movi a4, exc_table | 408 | rsr a4, excsave1 |
413 | mov a6, a1 # pass stack frame | 409 | mov a6, a1 # pass stack frame |
414 | mov a7, a0 # pass EXCCAUSE | 410 | mov a7, a0 # pass EXCCAUSE |
415 | addx4 a4, a0, a4 | 411 | addx4 a4, a0, a4 |
@@ -423,28 +419,15 @@ common_exception: | |||
423 | .global common_exception_return | 419 | .global common_exception_return |
424 | common_exception_return: | 420 | common_exception_return: |
425 | 421 | ||
426 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
427 | l32i a4, a1, PT_DEPC | ||
428 | /* Double exception means we came here with an exception | ||
429 | * while PS.EXCM was set, i.e. interrupts disabled. | ||
430 | */ | ||
431 | bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f | ||
432 | l32i a4, a1, PT_EXCCAUSE | ||
433 | bnei a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f | ||
434 | /* We came here with an interrupt means interrupts were enabled | ||
435 | * and we'll reenable them on return. | ||
436 | */ | ||
437 | movi a4, trace_hardirqs_on | ||
438 | callx4 a4 | ||
439 | 1: | 422 | 1: |
440 | #endif | 423 | rsil a2, LOCKLEVEL |
441 | 424 | ||
442 | /* Jump if we are returning from kernel exceptions. */ | 425 | /* Jump if we are returning from kernel exceptions. */ |
443 | 426 | ||
444 | 1: l32i a3, a1, PT_PS | 427 | l32i a3, a1, PT_PS |
445 | _bbci.l a3, PS_UM_BIT, 4f | 428 | GET_THREAD_INFO(a2, a1) |
446 | 429 | l32i a4, a2, TI_FLAGS | |
447 | rsil a2, 0 | 430 | _bbci.l a3, PS_UM_BIT, 6f |
448 | 431 | ||
449 | /* Specific to a user exception exit: | 432 | /* Specific to a user exception exit: |
450 | * We need to check some flags for signal handling and rescheduling, | 433 | * We need to check some flags for signal handling and rescheduling, |
@@ -453,9 +436,6 @@ common_exception_return: | |||
453 | * Note that we don't disable interrupts here. | 436 | * Note that we don't disable interrupts here. |
454 | */ | 437 | */ |
455 | 438 | ||
456 | GET_THREAD_INFO(a2,a1) | ||
457 | l32i a4, a2, TI_FLAGS | ||
458 | |||
459 | _bbsi.l a4, TIF_NEED_RESCHED, 3f | 439 | _bbsi.l a4, TIF_NEED_RESCHED, 3f |
460 | _bbsi.l a4, TIF_NOTIFY_RESUME, 2f | 440 | _bbsi.l a4, TIF_NOTIFY_RESUME, 2f |
461 | _bbci.l a4, TIF_SIGPENDING, 5f | 441 | _bbci.l a4, TIF_SIGPENDING, 5f |
@@ -465,6 +445,7 @@ common_exception_return: | |||
465 | 445 | ||
466 | /* Call do_signal() */ | 446 | /* Call do_signal() */ |
467 | 447 | ||
448 | rsil a2, 0 | ||
468 | movi a4, do_notify_resume # int do_notify_resume(struct pt_regs*) | 449 | movi a4, do_notify_resume # int do_notify_resume(struct pt_regs*) |
469 | mov a6, a1 | 450 | mov a6, a1 |
470 | callx4 a4 | 451 | callx4 a4 |
@@ -472,10 +453,24 @@ common_exception_return: | |||
472 | 453 | ||
473 | 3: /* Reschedule */ | 454 | 3: /* Reschedule */ |
474 | 455 | ||
456 | rsil a2, 0 | ||
475 | movi a4, schedule # void schedule (void) | 457 | movi a4, schedule # void schedule (void) |
476 | callx4 a4 | 458 | callx4 a4 |
477 | j 1b | 459 | j 1b |
478 | 460 | ||
461 | #ifdef CONFIG_PREEMPT | ||
462 | 6: | ||
463 | _bbci.l a4, TIF_NEED_RESCHED, 4f | ||
464 | |||
465 | /* Check current_thread_info->preempt_count */ | ||
466 | |||
467 | l32i a4, a2, TI_PRE_COUNT | ||
468 | bnez a4, 4f | ||
469 | movi a4, preempt_schedule_irq | ||
470 | callx4 a4 | ||
471 | j 1b | ||
472 | #endif | ||
473 | |||
479 | 5: | 474 | 5: |
480 | #ifdef CONFIG_DEBUG_TLB_SANITY | 475 | #ifdef CONFIG_DEBUG_TLB_SANITY |
481 | l32i a4, a1, PT_DEPC | 476 | l32i a4, a1, PT_DEPC |
@@ -483,7 +478,24 @@ common_exception_return: | |||
483 | movi a4, check_tlb_sanity | 478 | movi a4, check_tlb_sanity |
484 | callx4 a4 | 479 | callx4 a4 |
485 | #endif | 480 | #endif |
486 | 4: /* Restore optional registers. */ | 481 | 6: |
482 | 4: | ||
483 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
484 | l32i a4, a1, PT_DEPC | ||
485 | /* Double exception means we came here with an exception | ||
486 | * while PS.EXCM was set, i.e. interrupts disabled. | ||
487 | */ | ||
488 | bgeui a4, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f | ||
489 | l32i a4, a1, PT_EXCCAUSE | ||
490 | bnei a4, EXCCAUSE_LEVEL1_INTERRUPT, 1f | ||
491 | /* We came here with an interrupt means interrupts were enabled | ||
492 | * and we'll reenable them on return. | ||
493 | */ | ||
494 | movi a4, trace_hardirqs_on | ||
495 | callx4 a4 | ||
496 | 1: | ||
497 | #endif | ||
498 | /* Restore optional registers. */ | ||
487 | 499 | ||
488 | load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT | 500 | load_xtregs_opt a1 a2 a4 a5 a6 a7 PT_XTREGS_OPT |
489 | 501 | ||
@@ -570,29 +582,6 @@ user_exception_exit: | |||
570 | 582 | ||
571 | kernel_exception_exit: | 583 | kernel_exception_exit: |
572 | 584 | ||
573 | #ifdef PREEMPTIBLE_KERNEL | ||
574 | |||
575 | #ifdef CONFIG_PREEMPT | ||
576 | |||
577 | /* | ||
578 | * Note: We've just returned from a call4, so we have | ||
579 | * at least 4 addt'l regs. | ||
580 | */ | ||
581 | |||
582 | /* Check current_thread_info->preempt_count */ | ||
583 | |||
584 | GET_THREAD_INFO(a2) | ||
585 | l32i a3, a2, TI_PREEMPT | ||
586 | bnez a3, 1f | ||
587 | |||
588 | l32i a2, a2, TI_FLAGS | ||
589 | |||
590 | 1: | ||
591 | |||
592 | #endif | ||
593 | |||
594 | #endif | ||
595 | |||
596 | /* Check if we have to do a movsp. | 585 | /* Check if we have to do a movsp. |
597 | * | 586 | * |
598 | * We only have to do a movsp if the previous window-frame has | 587 | * We only have to do a movsp if the previous window-frame has |
@@ -829,176 +818,63 @@ ENDPROC(unrecoverable_exception) | |||
829 | * | 818 | * |
830 | * The ALLOCA handler is entered when user code executes the MOVSP | 819 | * The ALLOCA handler is entered when user code executes the MOVSP |
831 | * instruction and the caller's frame is not in the register file. | 820 | * instruction and the caller's frame is not in the register file. |
832 | * In this case, the caller frame's a0..a3 are on the stack just | ||
833 | * below sp (a1), and this handler moves them. | ||
834 | * | 821 | * |
835 | * For "MOVSP <ar>,<as>" without destination register a1, this routine | 822 | * This algorithm was taken from the Ross Morley's RTOS Porting Layer: |
836 | * simply moves the value from <as> to <ar> without moving the save area. | 823 | * |
824 | * /home/ross/rtos/porting/XtensaRTOS-PortingLayer-20090507/xtensa_vectors.S | ||
825 | * | ||
826 | * It leverages the existing window spill/fill routines and their support for | ||
827 | * double exceptions. The 'movsp' instruction will only cause an exception if | ||
828 | * the next window needs to be loaded. In fact this ALLOCA exception may be | ||
829 | * replaced at some point by changing the hardware to do a underflow exception | ||
830 | * of the proper size instead. | ||
831 | * | ||
832 | * This algorithm simply backs out the register changes started by the user | ||
833 | * excpetion handler, makes it appear that we have started a window underflow | ||
834 | * by rotating the window back and then setting the old window base (OWB) in | ||
835 | * the 'ps' register with the rolled back window base. The 'movsp' instruction | ||
836 | * will be re-executed and this time since the next window frames is in the | ||
837 | * active AR registers it won't cause an exception. | ||
838 | * | ||
839 | * If the WindowUnderflow code gets a TLB miss the page will get mapped | ||
840 | * the the partial windeowUnderflow will be handeled in the double exception | ||
841 | * handler. | ||
837 | * | 842 | * |
838 | * Entry condition: | 843 | * Entry condition: |
839 | * | 844 | * |
840 | * a0: trashed, original value saved on stack (PT_AREG0) | 845 | * a0: trashed, original value saved on stack (PT_AREG0) |
841 | * a1: a1 | 846 | * a1: a1 |
842 | * a2: new stack pointer, original in DEPC | 847 | * a2: new stack pointer, original in DEPC |
843 | * a3: dispatch table | 848 | * a3: a3 |
844 | * depc: a2, original value saved on stack (PT_DEPC) | 849 | * depc: a2, original value saved on stack (PT_DEPC) |
845 | * excsave_1: a3 | 850 | * excsave_1: dispatch table |
846 | * | 851 | * |
847 | * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC | 852 | * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC |
848 | * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception | 853 | * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception |
849 | */ | 854 | */ |
850 | 855 | ||
851 | #if XCHAL_HAVE_BE | ||
852 | #define _EXTUI_MOVSP_SRC(ar) extui ar, ar, 4, 4 | ||
853 | #define _EXTUI_MOVSP_DST(ar) extui ar, ar, 0, 4 | ||
854 | #else | ||
855 | #define _EXTUI_MOVSP_SRC(ar) extui ar, ar, 0, 4 | ||
856 | #define _EXTUI_MOVSP_DST(ar) extui ar, ar, 4, 4 | ||
857 | #endif | ||
858 | |||
859 | ENTRY(fast_alloca) | 856 | ENTRY(fast_alloca) |
857 | rsr a0, windowbase | ||
858 | rotw -1 | ||
859 | rsr a2, ps | ||
860 | extui a3, a2, PS_OWB_SHIFT, PS_OWB_WIDTH | ||
861 | xor a3, a3, a4 | ||
862 | l32i a4, a6, PT_AREG0 | ||
863 | l32i a1, a6, PT_DEPC | ||
864 | rsr a6, depc | ||
865 | wsr a1, depc | ||
866 | slli a3, a3, PS_OWB_SHIFT | ||
867 | xor a2, a2, a3 | ||
868 | wsr a2, ps | ||
869 | rsync | ||
860 | 870 | ||
861 | /* We shouldn't be in a double exception. */ | 871 | _bbci.l a4, 31, 4f |
862 | 872 | rotw -1 | |
863 | l32i a0, a2, PT_DEPC | 873 | _bbci.l a8, 30, 8f |
864 | _bgeui a0, VALID_DOUBLE_EXCEPTION_ADDRESS, .Lunhandled_double | 874 | rotw -1 |
865 | 875 | j _WindowUnderflow12 | |
866 | rsr a0, depc # get a2 | 876 | 8: j _WindowUnderflow8 |
867 | s32i a4, a2, PT_AREG4 # save a4 and | 877 | 4: j _WindowUnderflow4 |
868 | s32i a0, a2, PT_AREG2 # a2 to stack | ||
869 | |||
870 | /* Exit critical section. */ | ||
871 | |||
872 | movi a0, 0 | ||
873 | s32i a0, a3, EXC_TABLE_FIXUP | ||
874 | |||
875 | /* Restore a3, excsave_1 */ | ||
876 | |||
877 | xsr a3, excsave1 # make sure excsave_1 is valid for dbl. | ||
878 | rsr a4, epc1 # get exception address | ||
879 | s32i a3, a2, PT_AREG3 # save a3 to stack | ||
880 | |||
881 | #ifdef ALLOCA_EXCEPTION_IN_IRAM | ||
882 | #error iram not supported | ||
883 | #else | ||
884 | /* Note: l8ui not allowed in IRAM/IROM!! */ | ||
885 | l8ui a0, a4, 1 # read as(src) from MOVSP instruction | ||
886 | #endif | ||
887 | movi a3, .Lmovsp_src | ||
888 | _EXTUI_MOVSP_SRC(a0) # extract source register number | ||
889 | addx8 a3, a0, a3 | ||
890 | jx a3 | ||
891 | |||
892 | .Lunhandled_double: | ||
893 | wsr a0, excsave1 | ||
894 | movi a0, unrecoverable_exception | ||
895 | callx0 a0 | ||
896 | |||
897 | .align 8 | ||
898 | .Lmovsp_src: | ||
899 | l32i a3, a2, PT_AREG0; _j 1f; .align 8 | ||
900 | mov a3, a1; _j 1f; .align 8 | ||
901 | l32i a3, a2, PT_AREG2; _j 1f; .align 8 | ||
902 | l32i a3, a2, PT_AREG3; _j 1f; .align 8 | ||
903 | l32i a3, a2, PT_AREG4; _j 1f; .align 8 | ||
904 | mov a3, a5; _j 1f; .align 8 | ||
905 | mov a3, a6; _j 1f; .align 8 | ||
906 | mov a3, a7; _j 1f; .align 8 | ||
907 | mov a3, a8; _j 1f; .align 8 | ||
908 | mov a3, a9; _j 1f; .align 8 | ||
909 | mov a3, a10; _j 1f; .align 8 | ||
910 | mov a3, a11; _j 1f; .align 8 | ||
911 | mov a3, a12; _j 1f; .align 8 | ||
912 | mov a3, a13; _j 1f; .align 8 | ||
913 | mov a3, a14; _j 1f; .align 8 | ||
914 | mov a3, a15; _j 1f; .align 8 | ||
915 | |||
916 | 1: | ||
917 | |||
918 | #ifdef ALLOCA_EXCEPTION_IN_IRAM | ||
919 | #error iram not supported | ||
920 | #else | ||
921 | l8ui a0, a4, 0 # read ar(dst) from MOVSP instruction | ||
922 | #endif | ||
923 | addi a4, a4, 3 # step over movsp | ||
924 | _EXTUI_MOVSP_DST(a0) # extract destination register | ||
925 | wsr a4, epc1 # save new epc_1 | ||
926 | |||
927 | _bnei a0, 1, 1f # no 'movsp a1, ax': jump | ||
928 | |||
929 | /* Move the save area. This implies the use of the L32E | ||
930 | * and S32E instructions, because this move must be done with | ||
931 | * the user's PS.RING privilege levels, not with ring 0 | ||
932 | * (kernel's) privileges currently active with PS.EXCM | ||
933 | * set. Note that we have stil registered a fixup routine with the | ||
934 | * double exception vector in case a double exception occurs. | ||
935 | */ | ||
936 | |||
937 | /* a0,a4:avail a1:old user stack a2:exc. stack a3:new user stack. */ | ||
938 | |||
939 | l32e a0, a1, -16 | ||
940 | l32e a4, a1, -12 | ||
941 | s32e a0, a3, -16 | ||
942 | s32e a4, a3, -12 | ||
943 | l32e a0, a1, -8 | ||
944 | l32e a4, a1, -4 | ||
945 | s32e a0, a3, -8 | ||
946 | s32e a4, a3, -4 | ||
947 | |||
948 | /* Restore stack-pointer and all the other saved registers. */ | ||
949 | |||
950 | mov a1, a3 | ||
951 | |||
952 | l32i a4, a2, PT_AREG4 | ||
953 | l32i a3, a2, PT_AREG3 | ||
954 | l32i a0, a2, PT_AREG0 | ||
955 | l32i a2, a2, PT_AREG2 | ||
956 | rfe | ||
957 | |||
958 | /* MOVSP <at>,<as> was invoked with <at> != a1. | ||
959 | * Because the stack pointer is not being modified, | ||
960 | * we should be able to just modify the pointer | ||
961 | * without moving any save area. | ||
962 | * The processor only traps these occurrences if the | ||
963 | * caller window isn't live, so unfortunately we can't | ||
964 | * use this as an alternate trap mechanism. | ||
965 | * So we just do the move. This requires that we | ||
966 | * resolve the destination register, not just the source, | ||
967 | * so there's some extra work. | ||
968 | * (PERHAPS NOT REALLY NEEDED, BUT CLEANER...) | ||
969 | */ | ||
970 | |||
971 | /* a0 dst-reg, a1 user-stack, a2 stack, a3 value of src reg. */ | ||
972 | |||
973 | 1: movi a4, .Lmovsp_dst | ||
974 | addx8 a4, a0, a4 | ||
975 | jx a4 | ||
976 | |||
977 | .align 8 | ||
978 | .Lmovsp_dst: | ||
979 | s32i a3, a2, PT_AREG0; _j 1f; .align 8 | ||
980 | mov a1, a3; _j 1f; .align 8 | ||
981 | s32i a3, a2, PT_AREG2; _j 1f; .align 8 | ||
982 | s32i a3, a2, PT_AREG3; _j 1f; .align 8 | ||
983 | s32i a3, a2, PT_AREG4; _j 1f; .align 8 | ||
984 | mov a5, a3; _j 1f; .align 8 | ||
985 | mov a6, a3; _j 1f; .align 8 | ||
986 | mov a7, a3; _j 1f; .align 8 | ||
987 | mov a8, a3; _j 1f; .align 8 | ||
988 | mov a9, a3; _j 1f; .align 8 | ||
989 | mov a10, a3; _j 1f; .align 8 | ||
990 | mov a11, a3; _j 1f; .align 8 | ||
991 | mov a12, a3; _j 1f; .align 8 | ||
992 | mov a13, a3; _j 1f; .align 8 | ||
993 | mov a14, a3; _j 1f; .align 8 | ||
994 | mov a15, a3; _j 1f; .align 8 | ||
995 | |||
996 | 1: l32i a4, a2, PT_AREG4 | ||
997 | l32i a3, a2, PT_AREG3 | ||
998 | l32i a0, a2, PT_AREG0 | ||
999 | l32i a2, a2, PT_AREG2 | ||
1000 | rfe | ||
1001 | |||
1002 | ENDPROC(fast_alloca) | 878 | ENDPROC(fast_alloca) |
1003 | 879 | ||
1004 | /* | 880 | /* |
@@ -1015,9 +891,9 @@ ENDPROC(fast_alloca) | |||
1015 | * a0: trashed, original value saved on stack (PT_AREG0) | 891 | * a0: trashed, original value saved on stack (PT_AREG0) |
1016 | * a1: a1 | 892 | * a1: a1 |
1017 | * a2: new stack pointer, original in DEPC | 893 | * a2: new stack pointer, original in DEPC |
1018 | * a3: dispatch table | 894 | * a3: a3 |
1019 | * depc: a2, original value saved on stack (PT_DEPC) | 895 | * depc: a2, original value saved on stack (PT_DEPC) |
1020 | * excsave_1: a3 | 896 | * excsave_1: dispatch table |
1021 | */ | 897 | */ |
1022 | 898 | ||
1023 | ENTRY(fast_syscall_kernel) | 899 | ENTRY(fast_syscall_kernel) |
@@ -1064,7 +940,6 @@ ENTRY(fast_syscall_unrecoverable) | |||
1064 | 940 | ||
1065 | l32i a0, a2, PT_AREG0 # restore a0 | 941 | l32i a0, a2, PT_AREG0 # restore a0 |
1066 | xsr a2, depc # restore a2, depc | 942 | xsr a2, depc # restore a2, depc |
1067 | rsr a3, excsave1 | ||
1068 | 943 | ||
1069 | wsr a0, excsave1 | 944 | wsr a0, excsave1 |
1070 | movi a0, unrecoverable_exception | 945 | movi a0, unrecoverable_exception |
@@ -1086,10 +961,10 @@ ENDPROC(fast_syscall_unrecoverable) | |||
1086 | * a0: a2 (syscall-nr), original value saved on stack (PT_AREG0) | 961 | * a0: a2 (syscall-nr), original value saved on stack (PT_AREG0) |
1087 | * a1: a1 | 962 | * a1: a1 |
1088 | * a2: new stack pointer, original in a0 and DEPC | 963 | * a2: new stack pointer, original in a0 and DEPC |
1089 | * a3: dispatch table, original in excsave_1 | 964 | * a3: a3 |
1090 | * a4..a15: unchanged | 965 | * a4..a15: unchanged |
1091 | * depc: a2, original value saved on stack (PT_DEPC) | 966 | * depc: a2, original value saved on stack (PT_DEPC) |
1092 | * excsave_1: a3 | 967 | * excsave_1: dispatch table |
1093 | * | 968 | * |
1094 | * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC | 969 | * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC |
1095 | * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception | 970 | * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception |
@@ -1122,8 +997,6 @@ ENDPROC(fast_syscall_unrecoverable) | |||
1122 | 997 | ||
1123 | ENTRY(fast_syscall_xtensa) | 998 | ENTRY(fast_syscall_xtensa) |
1124 | 999 | ||
1125 | xsr a3, excsave1 # restore a3, excsave1 | ||
1126 | |||
1127 | s32i a7, a2, PT_AREG7 # we need an additional register | 1000 | s32i a7, a2, PT_AREG7 # we need an additional register |
1128 | movi a7, 4 # sizeof(unsigned int) | 1001 | movi a7, 4 # sizeof(unsigned int) |
1129 | access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp | 1002 | access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp |
@@ -1186,9 +1059,9 @@ ENDPROC(fast_syscall_xtensa) | |||
1186 | * a0: trashed, original value saved on stack (PT_AREG0) | 1059 | * a0: trashed, original value saved on stack (PT_AREG0) |
1187 | * a1: a1 | 1060 | * a1: a1 |
1188 | * a2: new stack pointer, original in DEPC | 1061 | * a2: new stack pointer, original in DEPC |
1189 | * a3: dispatch table | 1062 | * a3: a3 |
1190 | * depc: a2, original value saved on stack (PT_DEPC) | 1063 | * depc: a2, original value saved on stack (PT_DEPC) |
1191 | * excsave_1: a3 | 1064 | * excsave_1: dispatch table |
1192 | * | 1065 | * |
1193 | * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler. | 1066 | * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler. |
1194 | */ | 1067 | */ |
@@ -1197,15 +1070,16 @@ ENTRY(fast_syscall_spill_registers) | |||
1197 | 1070 | ||
1198 | /* Register a FIXUP handler (pass current wb as a parameter) */ | 1071 | /* Register a FIXUP handler (pass current wb as a parameter) */ |
1199 | 1072 | ||
1073 | xsr a3, excsave1 | ||
1200 | movi a0, fast_syscall_spill_registers_fixup | 1074 | movi a0, fast_syscall_spill_registers_fixup |
1201 | s32i a0, a3, EXC_TABLE_FIXUP | 1075 | s32i a0, a3, EXC_TABLE_FIXUP |
1202 | rsr a0, windowbase | 1076 | rsr a0, windowbase |
1203 | s32i a0, a3, EXC_TABLE_PARAM | 1077 | s32i a0, a3, EXC_TABLE_PARAM |
1078 | xsr a3, excsave1 # restore a3 and excsave_1 | ||
1204 | 1079 | ||
1205 | /* Save a3 and SAR on stack. */ | 1080 | /* Save a3, a4 and SAR on stack. */ |
1206 | 1081 | ||
1207 | rsr a0, sar | 1082 | rsr a0, sar |
1208 | xsr a3, excsave1 # restore a3 and excsave_1 | ||
1209 | s32i a3, a2, PT_AREG3 | 1083 | s32i a3, a2, PT_AREG3 |
1210 | s32i a4, a2, PT_AREG4 | 1084 | s32i a4, a2, PT_AREG4 |
1211 | s32i a0, a2, PT_AREG5 # store SAR to PT_AREG5 | 1085 | s32i a0, a2, PT_AREG5 # store SAR to PT_AREG5 |
@@ -1259,14 +1133,14 @@ fast_syscall_spill_registers_fixup: | |||
1259 | * in WS, so that the exception handlers save them to the task stack. | 1133 | * in WS, so that the exception handlers save them to the task stack. |
1260 | */ | 1134 | */ |
1261 | 1135 | ||
1262 | rsr a3, excsave1 # get spill-mask | 1136 | xsr a3, excsave1 # get spill-mask |
1263 | slli a2, a3, 1 # shift left by one | 1137 | slli a2, a3, 1 # shift left by one |
1264 | 1138 | ||
1265 | slli a3, a2, 32-WSBITS | 1139 | slli a3, a2, 32-WSBITS |
1266 | src a2, a2, a3 # a1 = xxwww1yyxxxwww1yy...... | 1140 | src a2, a2, a3 # a1 = xxwww1yyxxxwww1yy...... |
1267 | wsr a2, windowstart # set corrected windowstart | 1141 | wsr a2, windowstart # set corrected windowstart |
1268 | 1142 | ||
1269 | movi a3, exc_table | 1143 | rsr a3, excsave1 |
1270 | l32i a2, a3, EXC_TABLE_DOUBLE_SAVE # restore a2 | 1144 | l32i a2, a3, EXC_TABLE_DOUBLE_SAVE # restore a2 |
1271 | l32i a3, a3, EXC_TABLE_PARAM # original WB (in user task) | 1145 | l32i a3, a3, EXC_TABLE_PARAM # original WB (in user task) |
1272 | 1146 | ||
@@ -1303,7 +1177,7 @@ fast_syscall_spill_registers_fixup: | |||
1303 | 1177 | ||
1304 | /* Jump to the exception handler. */ | 1178 | /* Jump to the exception handler. */ |
1305 | 1179 | ||
1306 | movi a3, exc_table | 1180 | rsr a3, excsave1 |
1307 | rsr a0, exccause | 1181 | rsr a0, exccause |
1308 | addx4 a0, a0, a3 # find entry in table | 1182 | addx4 a0, a0, a3 # find entry in table |
1309 | l32i a0, a0, EXC_TABLE_FAST_USER # load handler | 1183 | l32i a0, a0, EXC_TABLE_FAST_USER # load handler |
@@ -1320,6 +1194,7 @@ fast_syscall_spill_registers_fixup_return: | |||
1320 | xsr a3, excsave1 | 1194 | xsr a3, excsave1 |
1321 | movi a2, fast_syscall_spill_registers_fixup | 1195 | movi a2, fast_syscall_spill_registers_fixup |
1322 | s32i a2, a3, EXC_TABLE_FIXUP | 1196 | s32i a2, a3, EXC_TABLE_FIXUP |
1197 | s32i a0, a3, EXC_TABLE_DOUBLE_SAVE | ||
1323 | rsr a2, windowbase | 1198 | rsr a2, windowbase |
1324 | s32i a2, a3, EXC_TABLE_PARAM | 1199 | s32i a2, a3, EXC_TABLE_PARAM |
1325 | l32i a2, a3, EXC_TABLE_KSTK | 1200 | l32i a2, a3, EXC_TABLE_KSTK |
@@ -1331,11 +1206,6 @@ fast_syscall_spill_registers_fixup_return: | |||
1331 | wsr a3, windowbase | 1206 | wsr a3, windowbase |
1332 | rsync | 1207 | rsync |
1333 | 1208 | ||
1334 | /* Restore a3 and return. */ | ||
1335 | |||
1336 | movi a3, exc_table | ||
1337 | xsr a3, excsave1 | ||
1338 | |||
1339 | rfde | 1209 | rfde |
1340 | 1210 | ||
1341 | 1211 | ||
@@ -1522,9 +1392,8 @@ ENTRY(_spill_registers) | |||
1522 | 1392 | ||
1523 | movi a0, 0 | 1393 | movi a0, 0 |
1524 | 1394 | ||
1525 | movi a3, exc_table | 1395 | rsr a3, excsave1 |
1526 | l32i a1, a3, EXC_TABLE_KSTK | 1396 | l32i a1, a3, EXC_TABLE_KSTK |
1527 | wsr a3, excsave1 | ||
1528 | 1397 | ||
1529 | movi a4, (1 << PS_WOE_BIT) | LOCKLEVEL | 1398 | movi a4, (1 << PS_WOE_BIT) | LOCKLEVEL |
1530 | wsr a4, ps | 1399 | wsr a4, ps |
@@ -1568,9 +1437,9 @@ ENDPROC(fast_second_level_miss_double_kernel) | |||
1568 | * a0: trashed, original value saved on stack (PT_AREG0) | 1437 | * a0: trashed, original value saved on stack (PT_AREG0) |
1569 | * a1: a1 | 1438 | * a1: a1 |
1570 | * a2: new stack pointer, original in DEPC | 1439 | * a2: new stack pointer, original in DEPC |
1571 | * a3: dispatch table | 1440 | * a3: a3 |
1572 | * depc: a2, original value saved on stack (PT_DEPC) | 1441 | * depc: a2, original value saved on stack (PT_DEPC) |
1573 | * excsave_1: a3 | 1442 | * excsave_1: dispatch table |
1574 | * | 1443 | * |
1575 | * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC | 1444 | * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC |
1576 | * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception | 1445 | * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception |
@@ -1578,9 +1447,10 @@ ENDPROC(fast_second_level_miss_double_kernel) | |||
1578 | 1447 | ||
1579 | ENTRY(fast_second_level_miss) | 1448 | ENTRY(fast_second_level_miss) |
1580 | 1449 | ||
1581 | /* Save a1. Note: we don't expect a double exception. */ | 1450 | /* Save a1 and a3. Note: we don't expect a double exception. */ |
1582 | 1451 | ||
1583 | s32i a1, a2, PT_AREG1 | 1452 | s32i a1, a2, PT_AREG1 |
1453 | s32i a3, a2, PT_AREG3 | ||
1584 | 1454 | ||
1585 | /* We need to map the page of PTEs for the user task. Find | 1455 | /* We need to map the page of PTEs for the user task. Find |
1586 | * the pointer to that page. Also, it's possible for tsk->mm | 1456 | * the pointer to that page. Also, it's possible for tsk->mm |
@@ -1602,9 +1472,6 @@ ENTRY(fast_second_level_miss) | |||
1602 | l32i a0, a1, TASK_MM # tsk->mm | 1472 | l32i a0, a1, TASK_MM # tsk->mm |
1603 | beqz a0, 9f | 1473 | beqz a0, 9f |
1604 | 1474 | ||
1605 | |||
1606 | /* We deliberately destroy a3 that holds the exception table. */ | ||
1607 | |||
1608 | 8: rsr a3, excvaddr # fault address | 1475 | 8: rsr a3, excvaddr # fault address |
1609 | _PGD_OFFSET(a0, a3, a1) | 1476 | _PGD_OFFSET(a0, a3, a1) |
1610 | l32i a0, a0, 0 # read pmdval | 1477 | l32i a0, a0, 0 # read pmdval |
@@ -1655,7 +1522,7 @@ ENTRY(fast_second_level_miss) | |||
1655 | 1522 | ||
1656 | /* Exit critical section. */ | 1523 | /* Exit critical section. */ |
1657 | 1524 | ||
1658 | 4: movi a3, exc_table # restore a3 | 1525 | 4: rsr a3, excsave1 |
1659 | movi a0, 0 | 1526 | movi a0, 0 |
1660 | s32i a0, a3, EXC_TABLE_FIXUP | 1527 | s32i a0, a3, EXC_TABLE_FIXUP |
1661 | 1528 | ||
@@ -1663,8 +1530,8 @@ ENTRY(fast_second_level_miss) | |||
1663 | 1530 | ||
1664 | l32i a0, a2, PT_AREG0 | 1531 | l32i a0, a2, PT_AREG0 |
1665 | l32i a1, a2, PT_AREG1 | 1532 | l32i a1, a2, PT_AREG1 |
1533 | l32i a3, a2, PT_AREG3 | ||
1666 | l32i a2, a2, PT_DEPC | 1534 | l32i a2, a2, PT_DEPC |
1667 | xsr a3, excsave1 | ||
1668 | 1535 | ||
1669 | bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f | 1536 | bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f |
1670 | 1537 | ||
@@ -1751,11 +1618,8 @@ ENTRY(fast_second_level_miss) | |||
1751 | 1618 | ||
1752 | 2: /* Invalid PGD, default exception handling */ | 1619 | 2: /* Invalid PGD, default exception handling */ |
1753 | 1620 | ||
1754 | movi a3, exc_table | ||
1755 | rsr a1, depc | 1621 | rsr a1, depc |
1756 | xsr a3, excsave1 | ||
1757 | s32i a1, a2, PT_AREG2 | 1622 | s32i a1, a2, PT_AREG2 |
1758 | s32i a3, a2, PT_AREG3 | ||
1759 | mov a1, a2 | 1623 | mov a1, a2 |
1760 | 1624 | ||
1761 | rsr a2, ps | 1625 | rsr a2, ps |
@@ -1775,9 +1639,9 @@ ENDPROC(fast_second_level_miss) | |||
1775 | * a0: trashed, original value saved on stack (PT_AREG0) | 1639 | * a0: trashed, original value saved on stack (PT_AREG0) |
1776 | * a1: a1 | 1640 | * a1: a1 |
1777 | * a2: new stack pointer, original in DEPC | 1641 | * a2: new stack pointer, original in DEPC |
1778 | * a3: dispatch table | 1642 | * a3: a3 |
1779 | * depc: a2, original value saved on stack (PT_DEPC) | 1643 | * depc: a2, original value saved on stack (PT_DEPC) |
1780 | * excsave_1: a3 | 1644 | * excsave_1: dispatch table |
1781 | * | 1645 | * |
1782 | * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC | 1646 | * PT_DEPC >= VALID_DOUBLE_EXCEPTION_ADDRESS: double exception, DEPC |
1783 | * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception | 1647 | * < VALID_DOUBLE_EXCEPTION_ADDRESS: regular exception |
@@ -1785,17 +1649,17 @@ ENDPROC(fast_second_level_miss) | |||
1785 | 1649 | ||
1786 | ENTRY(fast_store_prohibited) | 1650 | ENTRY(fast_store_prohibited) |
1787 | 1651 | ||
1788 | /* Save a1 and a4. */ | 1652 | /* Save a1 and a3. */ |
1789 | 1653 | ||
1790 | s32i a1, a2, PT_AREG1 | 1654 | s32i a1, a2, PT_AREG1 |
1791 | s32i a4, a2, PT_AREG4 | 1655 | s32i a3, a2, PT_AREG3 |
1792 | 1656 | ||
1793 | GET_CURRENT(a1,a2) | 1657 | GET_CURRENT(a1,a2) |
1794 | l32i a0, a1, TASK_MM # tsk->mm | 1658 | l32i a0, a1, TASK_MM # tsk->mm |
1795 | beqz a0, 9f | 1659 | beqz a0, 9f |
1796 | 1660 | ||
1797 | 8: rsr a1, excvaddr # fault address | 1661 | 8: rsr a1, excvaddr # fault address |
1798 | _PGD_OFFSET(a0, a1, a4) | 1662 | _PGD_OFFSET(a0, a1, a3) |
1799 | l32i a0, a0, 0 | 1663 | l32i a0, a0, 0 |
1800 | beqz a0, 2f | 1664 | beqz a0, 2f |
1801 | 1665 | ||
@@ -1804,39 +1668,37 @@ ENTRY(fast_store_prohibited) | |||
1804 | * and is not PAGE_NONE. See pgtable.h for possible PTE layouts. | 1668 | * and is not PAGE_NONE. See pgtable.h for possible PTE layouts. |
1805 | */ | 1669 | */ |
1806 | 1670 | ||
1807 | _PTE_OFFSET(a0, a1, a4) | 1671 | _PTE_OFFSET(a0, a1, a3) |
1808 | l32i a4, a0, 0 # read pteval | 1672 | l32i a3, a0, 0 # read pteval |
1809 | movi a1, _PAGE_CA_INVALID | 1673 | movi a1, _PAGE_CA_INVALID |
1810 | ball a4, a1, 2f | 1674 | ball a3, a1, 2f |
1811 | bbci.l a4, _PAGE_WRITABLE_BIT, 2f | 1675 | bbci.l a3, _PAGE_WRITABLE_BIT, 2f |
1812 | 1676 | ||
1813 | movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE | 1677 | movi a1, _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_HW_WRITE |
1814 | or a4, a4, a1 | 1678 | or a3, a3, a1 |
1815 | rsr a1, excvaddr | 1679 | rsr a1, excvaddr |
1816 | s32i a4, a0, 0 | 1680 | s32i a3, a0, 0 |
1817 | 1681 | ||
1818 | /* We need to flush the cache if we have page coloring. */ | 1682 | /* We need to flush the cache if we have page coloring. */ |
1819 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK | 1683 | #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK |
1820 | dhwb a0, 0 | 1684 | dhwb a0, 0 |
1821 | #endif | 1685 | #endif |
1822 | pdtlb a0, a1 | 1686 | pdtlb a0, a1 |
1823 | wdtlb a4, a0 | 1687 | wdtlb a3, a0 |
1824 | 1688 | ||
1825 | /* Exit critical section. */ | 1689 | /* Exit critical section. */ |
1826 | 1690 | ||
1827 | movi a0, 0 | 1691 | movi a0, 0 |
1692 | rsr a3, excsave1 | ||
1828 | s32i a0, a3, EXC_TABLE_FIXUP | 1693 | s32i a0, a3, EXC_TABLE_FIXUP |
1829 | 1694 | ||
1830 | /* Restore the working registers, and return. */ | 1695 | /* Restore the working registers, and return. */ |
1831 | 1696 | ||
1832 | l32i a4, a2, PT_AREG4 | 1697 | l32i a3, a2, PT_AREG3 |
1833 | l32i a1, a2, PT_AREG1 | 1698 | l32i a1, a2, PT_AREG1 |
1834 | l32i a0, a2, PT_AREG0 | 1699 | l32i a0, a2, PT_AREG0 |
1835 | l32i a2, a2, PT_DEPC | 1700 | l32i a2, a2, PT_DEPC |
1836 | 1701 | ||
1837 | /* Restore excsave1 and a3. */ | ||
1838 | |||
1839 | xsr a3, excsave1 | ||
1840 | bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f | 1702 | bgeui a2, VALID_DOUBLE_EXCEPTION_ADDRESS, 1f |
1841 | 1703 | ||
1842 | rsr a2, depc | 1704 | rsr a2, depc |
@@ -1853,11 +1715,8 @@ ENTRY(fast_store_prohibited) | |||
1853 | 1715 | ||
1854 | 2: /* If there was a problem, handle fault in C */ | 1716 | 2: /* If there was a problem, handle fault in C */ |
1855 | 1717 | ||
1856 | rsr a4, depc # still holds a2 | 1718 | rsr a3, depc # still holds a2 |
1857 | xsr a3, excsave1 | 1719 | s32i a3, a2, PT_AREG2 |
1858 | s32i a4, a2, PT_AREG2 | ||
1859 | s32i a3, a2, PT_AREG3 | ||
1860 | l32i a4, a2, PT_AREG4 | ||
1861 | mov a1, a2 | 1720 | mov a1, a2 |
1862 | 1721 | ||
1863 | rsr a2, ps | 1722 | rsr a2, ps |
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c index 101012bc1ff6..946fb8d06c8b 100644 --- a/arch/xtensa/kernel/setup.c +++ b/arch/xtensa/kernel/setup.c | |||
@@ -584,8 +584,8 @@ c_show(struct seq_file *f, void *slot) | |||
584 | "bogomips\t: %lu.%02lu\n", | 584 | "bogomips\t: %lu.%02lu\n", |
585 | XCHAL_BUILD_UNIQUE_ID, | 585 | XCHAL_BUILD_UNIQUE_ID, |
586 | XCHAL_HAVE_BE ? "big" : "little", | 586 | XCHAL_HAVE_BE ? "big" : "little", |
587 | CCOUNT_PER_JIFFY/(1000000/HZ), | 587 | ccount_freq/1000000, |
588 | (CCOUNT_PER_JIFFY/(10000/HZ)) % 100, | 588 | (ccount_freq/10000) % 100, |
589 | loops_per_jiffy/(500000/HZ), | 589 | loops_per_jiffy/(500000/HZ), |
590 | (loops_per_jiffy/(5000/HZ)) % 100); | 590 | (loops_per_jiffy/(5000/HZ)) % 100); |
591 | 591 | ||
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c index 24bb0c1776ba..9af3dd88ad7e 100644 --- a/arch/xtensa/kernel/time.c +++ b/arch/xtensa/kernel/time.c | |||
@@ -29,9 +29,7 @@ | |||
29 | #include <asm/timex.h> | 29 | #include <asm/timex.h> |
30 | #include <asm/platform.h> | 30 | #include <asm/platform.h> |
31 | 31 | ||
32 | #ifdef CONFIG_XTENSA_CALIBRATE_CCOUNT | ||
33 | unsigned long ccount_freq; /* ccount Hz */ | 32 | unsigned long ccount_freq; /* ccount Hz */ |
34 | #endif | ||
35 | 33 | ||
36 | static cycle_t ccount_read(struct clocksource *cs) | 34 | static cycle_t ccount_read(struct clocksource *cs) |
37 | { | 35 | { |
@@ -129,8 +127,10 @@ void __init time_init(void) | |||
129 | platform_calibrate_ccount(); | 127 | platform_calibrate_ccount(); |
130 | printk("%d.%02d MHz\n", (int)ccount_freq/1000000, | 128 | printk("%d.%02d MHz\n", (int)ccount_freq/1000000, |
131 | (int)(ccount_freq/10000)%100); | 129 | (int)(ccount_freq/10000)%100); |
130 | #else | ||
131 | ccount_freq = CONFIG_XTENSA_CPU_CLOCK*1000000UL; | ||
132 | #endif | 132 | #endif |
133 | clocksource_register_hz(&ccount_clocksource, CCOUNT_PER_JIFFY * HZ); | 133 | clocksource_register_hz(&ccount_clocksource, ccount_freq); |
134 | 134 | ||
135 | ccount_timer.evt.cpumask = cpumask_of(0); | 135 | ccount_timer.evt.cpumask = cpumask_of(0); |
136 | ccount_timer.evt.irq = irq_create_mapping(NULL, LINUX_TIMER_INT); | 136 | ccount_timer.evt.irq = irq_create_mapping(NULL, LINUX_TIMER_INT); |
@@ -164,7 +164,7 @@ irqreturn_t timer_interrupt (int irq, void *dev_id) | |||
164 | #ifndef CONFIG_GENERIC_CALIBRATE_DELAY | 164 | #ifndef CONFIG_GENERIC_CALIBRATE_DELAY |
165 | void calibrate_delay(void) | 165 | void calibrate_delay(void) |
166 | { | 166 | { |
167 | loops_per_jiffy = CCOUNT_PER_JIFFY; | 167 | loops_per_jiffy = ccount_freq / HZ; |
168 | printk("Calibrating delay loop (skipped)... " | 168 | printk("Calibrating delay loop (skipped)... " |
169 | "%lu.%02lu BogoMIPS preset\n", | 169 | "%lu.%02lu BogoMIPS preset\n", |
170 | loops_per_jiffy/(1000000/HZ), | 170 | loops_per_jiffy/(1000000/HZ), |
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S index f9e175382aa9..cb8fd44caabc 100644 --- a/arch/xtensa/kernel/vectors.S +++ b/arch/xtensa/kernel/vectors.S | |||
@@ -78,6 +78,7 @@ ENTRY(_UserExceptionVector) | |||
78 | s32i a0, a2, PT_DEPC # mark it as a regular exception | 78 | s32i a0, a2, PT_DEPC # mark it as a regular exception |
79 | addx4 a0, a0, a3 # find entry in table | 79 | addx4 a0, a0, a3 # find entry in table |
80 | l32i a0, a0, EXC_TABLE_FAST_USER # load handler | 80 | l32i a0, a0, EXC_TABLE_FAST_USER # load handler |
81 | xsr a3, excsave1 # restore a3 and dispatch table | ||
81 | jx a0 | 82 | jx a0 |
82 | 83 | ||
83 | ENDPROC(_UserExceptionVector) | 84 | ENDPROC(_UserExceptionVector) |
@@ -104,6 +105,7 @@ ENTRY(_KernelExceptionVector) | |||
104 | s32i a0, a2, PT_DEPC # mark it as a regular exception | 105 | s32i a0, a2, PT_DEPC # mark it as a regular exception |
105 | addx4 a0, a0, a3 # find entry in table | 106 | addx4 a0, a0, a3 # find entry in table |
106 | l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler address | 107 | l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler address |
108 | xsr a3, excsave1 # restore a3 and dispatch table | ||
107 | jx a0 | 109 | jx a0 |
108 | 110 | ||
109 | ENDPROC(_KernelExceptionVector) | 111 | ENDPROC(_KernelExceptionVector) |
@@ -168,7 +170,7 @@ ENDPROC(_KernelExceptionVector) | |||
168 | * | 170 | * |
169 | * a0: DEPC | 171 | * a0: DEPC |
170 | * a1: a1 | 172 | * a1: a1 |
171 | * a2: trashed, original value in EXC_TABLE_DOUBLE_A2 | 173 | * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE |
172 | * a3: exctable | 174 | * a3: exctable |
173 | * depc: a0 | 175 | * depc: a0 |
174 | * excsave_1: a3 | 176 | * excsave_1: a3 |
@@ -204,47 +206,46 @@ ENDPROC(_KernelExceptionVector) | |||
204 | 206 | ||
205 | .section .DoubleExceptionVector.text, "ax" | 207 | .section .DoubleExceptionVector.text, "ax" |
206 | .begin literal_prefix .DoubleExceptionVector | 208 | .begin literal_prefix .DoubleExceptionVector |
209 | .globl _DoubleExceptionVector_WindowUnderflow | ||
210 | .globl _DoubleExceptionVector_WindowOverflow | ||
207 | 211 | ||
208 | ENTRY(_DoubleExceptionVector) | 212 | ENTRY(_DoubleExceptionVector) |
209 | 213 | ||
210 | /* Deliberately destroy excsave (don't assume it's value was valid). */ | 214 | xsr a3, excsave1 |
211 | 215 | s32i a2, a3, EXC_TABLE_DOUBLE_SAVE | |
212 | wsr a3, excsave1 # save a3 | ||
213 | 216 | ||
214 | /* Check for kernel double exception (usually fatal). */ | 217 | /* Check for kernel double exception (usually fatal). */ |
215 | 218 | ||
216 | rsr a3, ps | 219 | rsr a2, ps |
217 | _bbci.l a3, PS_UM_BIT, .Lksp | 220 | _bbci.l a2, PS_UM_BIT, .Lksp |
218 | 221 | ||
219 | /* Check if we are currently handling a window exception. */ | 222 | /* Check if we are currently handling a window exception. */ |
220 | /* Note: We don't need to indicate that we enter a critical section. */ | 223 | /* Note: We don't need to indicate that we enter a critical section. */ |
221 | 224 | ||
222 | xsr a0, depc # get DEPC, save a0 | 225 | xsr a0, depc # get DEPC, save a0 |
223 | 226 | ||
224 | movi a3, WINDOW_VECTORS_VADDR | 227 | movi a2, WINDOW_VECTORS_VADDR |
225 | _bltu a0, a3, .Lfixup | 228 | _bltu a0, a2, .Lfixup |
226 | addi a3, a3, WINDOW_VECTORS_SIZE | 229 | addi a2, a2, WINDOW_VECTORS_SIZE |
227 | _bgeu a0, a3, .Lfixup | 230 | _bgeu a0, a2, .Lfixup |
228 | 231 | ||
229 | /* Window overflow/underflow exception. Get stack pointer. */ | 232 | /* Window overflow/underflow exception. Get stack pointer. */ |
230 | 233 | ||
231 | mov a3, a2 | 234 | l32i a2, a3, EXC_TABLE_KSTK |
232 | /* This explicit literal and the following references to it are made | ||
233 | * in order to fit DoubleExceptionVector.literals into the available | ||
234 | * 16-byte gap before DoubleExceptionVector.text in the absence of | ||
235 | * link time relaxation. See kernel/vmlinux.lds.S | ||
236 | */ | ||
237 | .literal .Lexc_table, exc_table | ||
238 | l32r a2, .Lexc_table | ||
239 | l32i a2, a2, EXC_TABLE_KSTK | ||
240 | 235 | ||
241 | /* Check for overflow/underflow exception, jump if overflow. */ | 236 | /* Check for overflow/underflow exception, jump if overflow. */ |
242 | 237 | ||
243 | _bbci.l a0, 6, .Lovfl | 238 | _bbci.l a0, 6, _DoubleExceptionVector_WindowOverflow |
244 | |||
245 | /* a0: depc, a1: a1, a2: kstk, a3: a2, depc: a0, excsave: a3 */ | ||
246 | 239 | ||
247 | /* Restart window underflow exception. | 240 | /* |
241 | * Restart window underflow exception. | ||
242 | * Currently: | ||
243 | * depc = orig a0, | ||
244 | * a0 = orig DEPC, | ||
245 | * a2 = new sp based on KSTK from exc_table | ||
246 | * a3 = excsave_1 | ||
247 | * excsave_1 = orig a3 | ||
248 | * | ||
248 | * We return to the instruction in user space that caused the window | 249 | * We return to the instruction in user space that caused the window |
249 | * underflow exception. Therefore, we change window base to the value | 250 | * underflow exception. Therefore, we change window base to the value |
250 | * before we entered the window underflow exception and prepare the | 251 | * before we entered the window underflow exception and prepare the |
@@ -252,10 +253,11 @@ ENTRY(_DoubleExceptionVector) | |||
252 | * by changing depc (in a0). | 253 | * by changing depc (in a0). |
253 | * Note: We can trash the current window frame (a0...a3) and depc! | 254 | * Note: We can trash the current window frame (a0...a3) and depc! |
254 | */ | 255 | */ |
255 | 256 | _DoubleExceptionVector_WindowUnderflow: | |
257 | xsr a3, excsave1 | ||
256 | wsr a2, depc # save stack pointer temporarily | 258 | wsr a2, depc # save stack pointer temporarily |
257 | rsr a0, ps | 259 | rsr a0, ps |
258 | extui a0, a0, PS_OWB_SHIFT, 4 | 260 | extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH |
259 | wsr a0, windowbase | 261 | wsr a0, windowbase |
260 | rsync | 262 | rsync |
261 | 263 | ||
@@ -263,28 +265,57 @@ ENTRY(_DoubleExceptionVector) | |||
263 | 265 | ||
264 | xsr a2, depc # save a2 and get stack pointer | 266 | xsr a2, depc # save a2 and get stack pointer |
265 | s32i a0, a2, PT_AREG0 | 267 | s32i a0, a2, PT_AREG0 |
266 | 268 | xsr a3, excsave1 | |
267 | wsr a3, excsave1 # save a3 | ||
268 | l32r a3, .Lexc_table | ||
269 | |||
270 | rsr a0, exccause | 269 | rsr a0, exccause |
271 | s32i a0, a2, PT_DEPC # mark it as a regular exception | 270 | s32i a0, a2, PT_DEPC # mark it as a regular exception |
272 | addx4 a0, a0, a3 | 271 | addx4 a0, a0, a3 |
272 | xsr a3, excsave1 | ||
273 | l32i a0, a0, EXC_TABLE_FAST_USER | 273 | l32i a0, a0, EXC_TABLE_FAST_USER |
274 | jx a0 | 274 | jx a0 |
275 | 275 | ||
276 | .Lfixup:/* Check for a fixup handler or if we were in a critical section. */ | 276 | /* |
277 | * We only allow the ITLB miss exception if we are in kernel space. | ||
278 | * All other exceptions are unexpected and thus unrecoverable! | ||
279 | */ | ||
280 | |||
281 | #ifdef CONFIG_MMU | ||
282 | .extern fast_second_level_miss_double_kernel | ||
283 | |||
284 | .Lksp: /* a0: a0, a1: a1, a2: a2, a3: trashed, depc: depc, excsave: a3 */ | ||
285 | |||
286 | rsr a3, exccause | ||
287 | beqi a3, EXCCAUSE_ITLB_MISS, 1f | ||
288 | addi a3, a3, -EXCCAUSE_DTLB_MISS | ||
289 | bnez a3, .Lunrecoverable | ||
290 | 1: movi a3, fast_second_level_miss_double_kernel | ||
291 | jx a3 | ||
292 | #else | ||
293 | .equ .Lksp, .Lunrecoverable | ||
294 | #endif | ||
295 | |||
296 | /* Critical! We can't handle this situation. PANIC! */ | ||
277 | 297 | ||
278 | /* a0: depc, a1: a1, a2: a2, a3: trashed, depc: a0, excsave1: a3 */ | 298 | .extern unrecoverable_exception |
279 | 299 | ||
280 | l32r a3, .Lexc_table | 300 | .Lunrecoverable_fixup: |
281 | s32i a2, a3, EXC_TABLE_DOUBLE_SAVE # temporary variable | 301 | l32i a2, a3, EXC_TABLE_DOUBLE_SAVE |
302 | xsr a0, depc | ||
303 | |||
304 | .Lunrecoverable: | ||
305 | rsr a3, excsave1 | ||
306 | wsr a0, excsave1 | ||
307 | movi a0, unrecoverable_exception | ||
308 | callx0 a0 | ||
309 | |||
310 | .Lfixup:/* Check for a fixup handler or if we were in a critical section. */ | ||
311 | |||
312 | /* a0: depc, a1: a1, a2: trash, a3: exctable, depc: a0, excsave1: a3 */ | ||
282 | 313 | ||
283 | /* Enter critical section. */ | 314 | /* Enter critical section. */ |
284 | 315 | ||
285 | l32i a2, a3, EXC_TABLE_FIXUP | 316 | l32i a2, a3, EXC_TABLE_FIXUP |
286 | s32i a3, a3, EXC_TABLE_FIXUP | 317 | s32i a3, a3, EXC_TABLE_FIXUP |
287 | beq a2, a3, .Lunrecoverable_fixup # critical! | 318 | beq a2, a3, .Lunrecoverable_fixup # critical section |
288 | beqz a2, .Ldflt # no handler was registered | 319 | beqz a2, .Ldflt # no handler was registered |
289 | 320 | ||
290 | /* a0: depc, a1: a1, a2: trash, a3: exctable, depc: a0, excsave: a3 */ | 321 | /* a0: depc, a1: a1, a2: trash, a3: exctable, depc: a0, excsave: a3 */ |
@@ -293,58 +324,145 @@ ENTRY(_DoubleExceptionVector) | |||
293 | 324 | ||
294 | .Ldflt: /* Get stack pointer. */ | 325 | .Ldflt: /* Get stack pointer. */ |
295 | 326 | ||
296 | l32i a3, a3, EXC_TABLE_DOUBLE_SAVE | 327 | l32i a2, a3, EXC_TABLE_DOUBLE_SAVE |
297 | addi a2, a3, -PT_USER_SIZE | 328 | addi a2, a2, -PT_USER_SIZE |
298 | |||
299 | .Lovfl: /* Jump to default handlers. */ | ||
300 | 329 | ||
301 | /* a0: depc, a1: a1, a2: kstk, a3: a2, depc: a0, excsave: a3 */ | 330 | /* a0: depc, a1: a1, a2: kstk, a3: exctable, depc: a0, excsave: a3 */ |
302 | 331 | ||
303 | xsr a3, depc | ||
304 | s32i a0, a2, PT_DEPC | 332 | s32i a0, a2, PT_DEPC |
305 | s32i a3, a2, PT_AREG0 | 333 | l32i a0, a3, EXC_TABLE_DOUBLE_SAVE |
334 | xsr a0, depc | ||
335 | s32i a0, a2, PT_AREG0 | ||
306 | 336 | ||
307 | /* a0: avail, a1: a1, a2: kstk, a3: avail, depc: a2, excsave: a3 */ | 337 | /* a0: avail, a1: a1, a2: kstk, a3: exctable, depc: a2, excsave: a3 */ |
308 | 338 | ||
309 | l32r a3, .Lexc_table | ||
310 | rsr a0, exccause | 339 | rsr a0, exccause |
311 | addx4 a0, a0, a3 | 340 | addx4 a0, a0, a3 |
341 | xsr a3, excsave1 | ||
312 | l32i a0, a0, EXC_TABLE_FAST_USER | 342 | l32i a0, a0, EXC_TABLE_FAST_USER |
313 | jx a0 | 343 | jx a0 |
314 | 344 | ||
315 | /* | 345 | /* |
316 | * We only allow the ITLB miss exception if we are in kernel space. | 346 | * Restart window OVERFLOW exception. |
317 | * All other exceptions are unexpected and thus unrecoverable! | 347 | * Currently: |
348 | * depc = orig a0, | ||
349 | * a0 = orig DEPC, | ||
350 | * a2 = new sp based on KSTK from exc_table | ||
351 | * a3 = EXCSAVE_1 | ||
352 | * excsave_1 = orig a3 | ||
353 | * | ||
354 | * We return to the instruction in user space that caused the window | ||
355 | * overflow exception. Therefore, we change window base to the value | ||
356 | * before we entered the window overflow exception and prepare the | ||
357 | * registers to return as if we were coming from a regular exception | ||
358 | * by changing DEPC (in a0). | ||
359 | * | ||
360 | * NOTE: We CANNOT trash the current window frame (a0...a3), but we | ||
361 | * can clobber depc. | ||
362 | * | ||
363 | * The tricky part here is that overflow8 and overflow12 handlers | ||
364 | * save a0, then clobber a0. To restart the handler, we have to restore | ||
365 | * a0 if the double exception was past the point where a0 was clobbered. | ||
366 | * | ||
367 | * To keep things simple, we take advantage of the fact all overflow | ||
368 | * handlers save a0 in their very first instruction. If DEPC was past | ||
369 | * that instruction, we can safely restore a0 from where it was saved | ||
370 | * on the stack. | ||
371 | * | ||
372 | * a0: depc, a1: a1, a2: kstk, a3: exc_table, depc: a0, excsave1: a3 | ||
318 | */ | 373 | */ |
374 | _DoubleExceptionVector_WindowOverflow: | ||
375 | extui a2, a0, 0, 6 # get offset into 64-byte vector handler | ||
376 | beqz a2, 1f # if at start of vector, don't restore | ||
319 | 377 | ||
320 | #ifdef CONFIG_MMU | 378 | addi a0, a0, -128 |
321 | .extern fast_second_level_miss_double_kernel | 379 | bbsi a0, 8, 1f # don't restore except for overflow 8 and 12 |
380 | bbsi a0, 7, 2f | ||
322 | 381 | ||
323 | .Lksp: /* a0: a0, a1: a1, a2: a2, a3: trashed, depc: depc, excsave: a3 */ | 382 | /* |
383 | * Restore a0 as saved by _WindowOverflow8(). | ||
384 | * | ||
385 | * FIXME: we really need a fixup handler for this L32E, | ||
386 | * for the extremely unlikely case where the overflow handler's | ||
387 | * reference thru a0 gets a hardware TLB refill that bumps out | ||
388 | * the (distinct, aliasing) TLB entry that mapped its prior | ||
389 | * references thru a9, and where our reference now thru a9 | ||
390 | * gets a 2nd-level miss exception (not hardware TLB refill). | ||
391 | */ | ||
324 | 392 | ||
325 | rsr a3, exccause | 393 | l32e a2, a9, -16 |
326 | beqi a3, EXCCAUSE_ITLB_MISS, 1f | 394 | wsr a2, depc # replace the saved a0 |
327 | addi a3, a3, -EXCCAUSE_DTLB_MISS | 395 | j 1f |
328 | bnez a3, .Lunrecoverable | ||
329 | 1: movi a3, fast_second_level_miss_double_kernel | ||
330 | jx a3 | ||
331 | #else | ||
332 | .equ .Lksp, .Lunrecoverable | ||
333 | #endif | ||
334 | 396 | ||
335 | /* Critical! We can't handle this situation. PANIC! */ | 397 | 2: |
398 | /* | ||
399 | * Restore a0 as saved by _WindowOverflow12(). | ||
400 | * | ||
401 | * FIXME: we really need a fixup handler for this L32E, | ||
402 | * for the extremely unlikely case where the overflow handler's | ||
403 | * reference thru a0 gets a hardware TLB refill that bumps out | ||
404 | * the (distinct, aliasing) TLB entry that mapped its prior | ||
405 | * references thru a13, and where our reference now thru a13 | ||
406 | * gets a 2nd-level miss exception (not hardware TLB refill). | ||
407 | */ | ||
336 | 408 | ||
337 | .extern unrecoverable_exception | 409 | l32e a2, a13, -16 |
410 | wsr a2, depc # replace the saved a0 | ||
411 | 1: | ||
412 | /* | ||
413 | * Restore WindowBase while leaving all address registers restored. | ||
414 | * We have to use ROTW for this, because WSR.WINDOWBASE requires | ||
415 | * an address register (which would prevent restore). | ||
416 | * | ||
417 | * Window Base goes from 0 ... 7 (Module 8) | ||
418 | * Window Start is 8 bits; Ex: (0b1010 1010):0x55 from series of call4s | ||
419 | */ | ||
420 | |||
421 | rsr a0, ps | ||
422 | extui a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH | ||
423 | rsr a2, windowbase | ||
424 | sub a0, a2, a0 | ||
425 | extui a0, a0, 0, 3 | ||
338 | 426 | ||
339 | .Lunrecoverable_fixup: | ||
340 | l32i a2, a3, EXC_TABLE_DOUBLE_SAVE | 427 | l32i a2, a3, EXC_TABLE_DOUBLE_SAVE |
341 | xsr a0, depc | 428 | xsr a3, excsave1 |
429 | beqi a0, 1, .L1pane | ||
430 | beqi a0, 3, .L3pane | ||
342 | 431 | ||
343 | .Lunrecoverable: | 432 | rsr a0, depc |
344 | rsr a3, excsave1 | 433 | rotw -2 |
345 | wsr a0, excsave1 | 434 | |
346 | movi a0, unrecoverable_exception | 435 | /* |
347 | callx0 a0 | 436 | * We are now in the user code's original window frame. |
437 | * Process the exception as a user exception as if it was | ||
438 | * taken by the user code. | ||
439 | * | ||
440 | * This is similar to the user exception vector, | ||
441 | * except that PT_DEPC isn't set to EXCCAUSE. | ||
442 | */ | ||
443 | 1: | ||
444 | xsr a3, excsave1 | ||
445 | wsr a2, depc | ||
446 | l32i a2, a3, EXC_TABLE_KSTK | ||
447 | s32i a0, a2, PT_AREG0 | ||
448 | rsr a0, exccause | ||
449 | |||
450 | s32i a0, a2, PT_DEPC | ||
451 | |||
452 | addx4 a0, a0, a3 | ||
453 | l32i a0, a0, EXC_TABLE_FAST_USER | ||
454 | xsr a3, excsave1 | ||
455 | jx a0 | ||
456 | |||
457 | .L1pane: | ||
458 | rsr a0, depc | ||
459 | rotw -1 | ||
460 | j 1b | ||
461 | |||
462 | .L3pane: | ||
463 | rsr a0, depc | ||
464 | rotw -3 | ||
465 | j 1b | ||
348 | 466 | ||
349 | .end literal_prefix | 467 | .end literal_prefix |
350 | 468 | ||
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c index d8507f812f46..74a60c7e085e 100644 --- a/arch/xtensa/kernel/xtensa_ksyms.c +++ b/arch/xtensa/kernel/xtensa_ksyms.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <asm/io.h> | 25 | #include <asm/io.h> |
26 | #include <asm/page.h> | 26 | #include <asm/page.h> |
27 | #include <asm/pgalloc.h> | 27 | #include <asm/pgalloc.h> |
28 | #include <asm/ftrace.h> | ||
28 | #ifdef CONFIG_BLK_DEV_FD | 29 | #ifdef CONFIG_BLK_DEV_FD |
29 | #include <asm/floppy.h> | 30 | #include <asm/floppy.h> |
30 | #endif | 31 | #endif |
diff --git a/block/partitions/efi.c b/block/partitions/efi.c index 1a5ec9a03c00..1eb09ee5311b 100644 --- a/block/partitions/efi.c +++ b/block/partitions/efi.c | |||
@@ -186,6 +186,7 @@ invalid: | |||
186 | */ | 186 | */ |
187 | static int is_pmbr_valid(legacy_mbr *mbr, sector_t total_sectors) | 187 | static int is_pmbr_valid(legacy_mbr *mbr, sector_t total_sectors) |
188 | { | 188 | { |
189 | uint32_t sz = 0; | ||
189 | int i, part = 0, ret = 0; /* invalid by default */ | 190 | int i, part = 0, ret = 0; /* invalid by default */ |
190 | 191 | ||
191 | if (!mbr || le16_to_cpu(mbr->signature) != MSDOS_MBR_SIGNATURE) | 192 | if (!mbr || le16_to_cpu(mbr->signature) != MSDOS_MBR_SIGNATURE) |
@@ -216,12 +217,15 @@ check_hybrid: | |||
216 | /* | 217 | /* |
217 | * Protective MBRs take up the lesser of the whole disk | 218 | * Protective MBRs take up the lesser of the whole disk |
218 | * or 2 TiB (32bit LBA), ignoring the rest of the disk. | 219 | * or 2 TiB (32bit LBA), ignoring the rest of the disk. |
220 | * Some partitioning programs, nonetheless, choose to set | ||
221 | * the size to the maximum 32-bit limitation, disregarding | ||
222 | * the disk size. | ||
219 | * | 223 | * |
220 | * Hybrid MBRs do not necessarily comply with this. | 224 | * Hybrid MBRs do not necessarily comply with this. |
221 | */ | 225 | */ |
222 | if (ret == GPT_MBR_PROTECTIVE) { | 226 | if (ret == GPT_MBR_PROTECTIVE) { |
223 | if (le32_to_cpu(mbr->partition_record[part].size_in_lba) != | 227 | sz = le32_to_cpu(mbr->partition_record[part].size_in_lba); |
224 | min((uint32_t) total_sectors - 1, 0xFFFFFFFF)) | 228 | if (sz != (uint32_t) total_sectors - 1 && sz != 0xFFFFFFFF) |
225 | ret = 0; | 229 | ret = 0; |
226 | } | 230 | } |
227 | done: | 231 | done: |
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c index 4329a29a5310..b9c81b7c3a3b 100644 --- a/drivers/clocksource/em_sti.c +++ b/drivers/clocksource/em_sti.c | |||
@@ -315,68 +315,47 @@ static int em_sti_probe(struct platform_device *pdev) | |||
315 | { | 315 | { |
316 | struct em_sti_priv *p; | 316 | struct em_sti_priv *p; |
317 | struct resource *res; | 317 | struct resource *res; |
318 | int irq, ret; | 318 | int irq; |
319 | 319 | ||
320 | p = kzalloc(sizeof(*p), GFP_KERNEL); | 320 | p = devm_kzalloc(&pdev->dev, sizeof(*p), GFP_KERNEL); |
321 | if (p == NULL) { | 321 | if (p == NULL) { |
322 | dev_err(&pdev->dev, "failed to allocate driver data\n"); | 322 | dev_err(&pdev->dev, "failed to allocate driver data\n"); |
323 | ret = -ENOMEM; | 323 | return -ENOMEM; |
324 | goto err0; | ||
325 | } | 324 | } |
326 | 325 | ||
327 | p->pdev = pdev; | 326 | p->pdev = pdev; |
328 | platform_set_drvdata(pdev, p); | 327 | platform_set_drvdata(pdev, p); |
329 | 328 | ||
330 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
331 | if (!res) { | ||
332 | dev_err(&pdev->dev, "failed to get I/O memory\n"); | ||
333 | ret = -EINVAL; | ||
334 | goto err0; | ||
335 | } | ||
336 | |||
337 | irq = platform_get_irq(pdev, 0); | 329 | irq = platform_get_irq(pdev, 0); |
338 | if (irq < 0) { | 330 | if (irq < 0) { |
339 | dev_err(&pdev->dev, "failed to get irq\n"); | 331 | dev_err(&pdev->dev, "failed to get irq\n"); |
340 | ret = -EINVAL; | 332 | return -EINVAL; |
341 | goto err0; | ||
342 | } | 333 | } |
343 | 334 | ||
344 | /* map memory, let base point to the STI instance */ | 335 | /* map memory, let base point to the STI instance */ |
345 | p->base = ioremap_nocache(res->start, resource_size(res)); | 336 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
346 | if (p->base == NULL) { | 337 | p->base = devm_ioremap_resource(&pdev->dev, res); |
347 | dev_err(&pdev->dev, "failed to remap I/O memory\n"); | 338 | if (IS_ERR(p->base)) |
348 | ret = -ENXIO; | 339 | return PTR_ERR(p->base); |
349 | goto err0; | ||
350 | } | ||
351 | 340 | ||
352 | /* get hold of clock */ | 341 | /* get hold of clock */ |
353 | p->clk = clk_get(&pdev->dev, "sclk"); | 342 | p->clk = devm_clk_get(&pdev->dev, "sclk"); |
354 | if (IS_ERR(p->clk)) { | 343 | if (IS_ERR(p->clk)) { |
355 | dev_err(&pdev->dev, "cannot get clock\n"); | 344 | dev_err(&pdev->dev, "cannot get clock\n"); |
356 | ret = PTR_ERR(p->clk); | 345 | return PTR_ERR(p->clk); |
357 | goto err1; | ||
358 | } | 346 | } |
359 | 347 | ||
360 | if (request_irq(irq, em_sti_interrupt, | 348 | if (devm_request_irq(&pdev->dev, irq, em_sti_interrupt, |
361 | IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, | 349 | IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING, |
362 | dev_name(&pdev->dev), p)) { | 350 | dev_name(&pdev->dev), p)) { |
363 | dev_err(&pdev->dev, "failed to request low IRQ\n"); | 351 | dev_err(&pdev->dev, "failed to request low IRQ\n"); |
364 | ret = -ENOENT; | 352 | return -ENOENT; |
365 | goto err2; | ||
366 | } | 353 | } |
367 | 354 | ||
368 | raw_spin_lock_init(&p->lock); | 355 | raw_spin_lock_init(&p->lock); |
369 | em_sti_register_clockevent(p); | 356 | em_sti_register_clockevent(p); |
370 | em_sti_register_clocksource(p); | 357 | em_sti_register_clocksource(p); |
371 | return 0; | 358 | return 0; |
372 | |||
373 | err2: | ||
374 | clk_put(p->clk); | ||
375 | err1: | ||
376 | iounmap(p->base); | ||
377 | err0: | ||
378 | kfree(p); | ||
379 | return ret; | ||
380 | } | 359 | } |
381 | 360 | ||
382 | static int em_sti_remove(struct platform_device *pdev) | 361 | static int em_sti_remove(struct platform_device *pdev) |
diff --git a/drivers/clocksource/nomadik-mtu.c b/drivers/clocksource/nomadik-mtu.c index 7d2c2c56f73c..1b74bea12385 100644 --- a/drivers/clocksource/nomadik-mtu.c +++ b/drivers/clocksource/nomadik-mtu.c | |||
@@ -165,7 +165,8 @@ static void nmdk_clkevt_resume(struct clock_event_device *cedev) | |||
165 | 165 | ||
166 | static struct clock_event_device nmdk_clkevt = { | 166 | static struct clock_event_device nmdk_clkevt = { |
167 | .name = "mtu_1", | 167 | .name = "mtu_1", |
168 | .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, | 168 | .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC | |
169 | CLOCK_EVT_FEAT_DYNIRQ, | ||
169 | .rating = 200, | 170 | .rating = 200, |
170 | .set_mode = nmdk_clkevt_mode, | 171 | .set_mode = nmdk_clkevt_mode, |
171 | .set_next_event = nmdk_clkevt_next, | 172 | .set_next_event = nmdk_clkevt_next, |
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index 08d0c418c94a..0965e9848b3d 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c | |||
@@ -37,6 +37,7 @@ | |||
37 | 37 | ||
38 | struct sh_cmt_priv { | 38 | struct sh_cmt_priv { |
39 | void __iomem *mapbase; | 39 | void __iomem *mapbase; |
40 | void __iomem *mapbase_str; | ||
40 | struct clk *clk; | 41 | struct clk *clk; |
41 | unsigned long width; /* 16 or 32 bit version of hardware block */ | 42 | unsigned long width; /* 16 or 32 bit version of hardware block */ |
42 | unsigned long overflow_bit; | 43 | unsigned long overflow_bit; |
@@ -79,6 +80,12 @@ struct sh_cmt_priv { | |||
79 | * CMCSR 0xffca0060 16-bit | 80 | * CMCSR 0xffca0060 16-bit |
80 | * CMCNT 0xffca0064 32-bit | 81 | * CMCNT 0xffca0064 32-bit |
81 | * CMCOR 0xffca0068 32-bit | 82 | * CMCOR 0xffca0068 32-bit |
83 | * | ||
84 | * "32-bit counter and 32-bit control" as found on r8a73a4 and r8a7790: | ||
85 | * CMSTR 0xffca0500 32-bit | ||
86 | * CMCSR 0xffca0510 32-bit | ||
87 | * CMCNT 0xffca0514 32-bit | ||
88 | * CMCOR 0xffca0518 32-bit | ||
82 | */ | 89 | */ |
83 | 90 | ||
84 | static unsigned long sh_cmt_read16(void __iomem *base, unsigned long offs) | 91 | static unsigned long sh_cmt_read16(void __iomem *base, unsigned long offs) |
@@ -109,9 +116,7 @@ static void sh_cmt_write32(void __iomem *base, unsigned long offs, | |||
109 | 116 | ||
110 | static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_priv *p) | 117 | static inline unsigned long sh_cmt_read_cmstr(struct sh_cmt_priv *p) |
111 | { | 118 | { |
112 | struct sh_timer_config *cfg = p->pdev->dev.platform_data; | 119 | return p->read_control(p->mapbase_str, 0); |
113 | |||
114 | return p->read_control(p->mapbase - cfg->channel_offset, 0); | ||
115 | } | 120 | } |
116 | 121 | ||
117 | static inline unsigned long sh_cmt_read_cmcsr(struct sh_cmt_priv *p) | 122 | static inline unsigned long sh_cmt_read_cmcsr(struct sh_cmt_priv *p) |
@@ -127,9 +132,7 @@ static inline unsigned long sh_cmt_read_cmcnt(struct sh_cmt_priv *p) | |||
127 | static inline void sh_cmt_write_cmstr(struct sh_cmt_priv *p, | 132 | static inline void sh_cmt_write_cmstr(struct sh_cmt_priv *p, |
128 | unsigned long value) | 133 | unsigned long value) |
129 | { | 134 | { |
130 | struct sh_timer_config *cfg = p->pdev->dev.platform_data; | 135 | p->write_control(p->mapbase_str, 0, value); |
131 | |||
132 | p->write_control(p->mapbase - cfg->channel_offset, 0, value); | ||
133 | } | 136 | } |
134 | 137 | ||
135 | static inline void sh_cmt_write_cmcsr(struct sh_cmt_priv *p, | 138 | static inline void sh_cmt_write_cmcsr(struct sh_cmt_priv *p, |
@@ -676,7 +679,7 @@ static int sh_cmt_register(struct sh_cmt_priv *p, char *name, | |||
676 | static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev) | 679 | static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev) |
677 | { | 680 | { |
678 | struct sh_timer_config *cfg = pdev->dev.platform_data; | 681 | struct sh_timer_config *cfg = pdev->dev.platform_data; |
679 | struct resource *res; | 682 | struct resource *res, *res2; |
680 | int irq, ret; | 683 | int irq, ret; |
681 | ret = -ENXIO; | 684 | ret = -ENXIO; |
682 | 685 | ||
@@ -694,6 +697,9 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev) | |||
694 | goto err0; | 697 | goto err0; |
695 | } | 698 | } |
696 | 699 | ||
700 | /* optional resource for the shared timer start/stop register */ | ||
701 | res2 = platform_get_resource(p->pdev, IORESOURCE_MEM, 1); | ||
702 | |||
697 | irq = platform_get_irq(p->pdev, 0); | 703 | irq = platform_get_irq(p->pdev, 0); |
698 | if (irq < 0) { | 704 | if (irq < 0) { |
699 | dev_err(&p->pdev->dev, "failed to get irq\n"); | 705 | dev_err(&p->pdev->dev, "failed to get irq\n"); |
@@ -707,6 +713,15 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev) | |||
707 | goto err0; | 713 | goto err0; |
708 | } | 714 | } |
709 | 715 | ||
716 | /* map second resource for CMSTR */ | ||
717 | p->mapbase_str = ioremap_nocache(res2 ? res2->start : | ||
718 | res->start - cfg->channel_offset, | ||
719 | res2 ? resource_size(res2) : 2); | ||
720 | if (p->mapbase_str == NULL) { | ||
721 | dev_err(&p->pdev->dev, "failed to remap I/O second memory\n"); | ||
722 | goto err1; | ||
723 | } | ||
724 | |||
710 | /* request irq using setup_irq() (too early for request_irq()) */ | 725 | /* request irq using setup_irq() (too early for request_irq()) */ |
711 | p->irqaction.name = dev_name(&p->pdev->dev); | 726 | p->irqaction.name = dev_name(&p->pdev->dev); |
712 | p->irqaction.handler = sh_cmt_interrupt; | 727 | p->irqaction.handler = sh_cmt_interrupt; |
@@ -719,11 +734,17 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev) | |||
719 | if (IS_ERR(p->clk)) { | 734 | if (IS_ERR(p->clk)) { |
720 | dev_err(&p->pdev->dev, "cannot get clock\n"); | 735 | dev_err(&p->pdev->dev, "cannot get clock\n"); |
721 | ret = PTR_ERR(p->clk); | 736 | ret = PTR_ERR(p->clk); |
722 | goto err1; | 737 | goto err2; |
723 | } | 738 | } |
724 | 739 | ||
725 | p->read_control = sh_cmt_read16; | 740 | if (res2 && (resource_size(res2) == 4)) { |
726 | p->write_control = sh_cmt_write16; | 741 | /* assume both CMSTR and CMCSR to be 32-bit */ |
742 | p->read_control = sh_cmt_read32; | ||
743 | p->write_control = sh_cmt_write32; | ||
744 | } else { | ||
745 | p->read_control = sh_cmt_read16; | ||
746 | p->write_control = sh_cmt_write16; | ||
747 | } | ||
727 | 748 | ||
728 | if (resource_size(res) == 6) { | 749 | if (resource_size(res) == 6) { |
729 | p->width = 16; | 750 | p->width = 16; |
@@ -752,22 +773,23 @@ static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev) | |||
752 | cfg->clocksource_rating); | 773 | cfg->clocksource_rating); |
753 | if (ret) { | 774 | if (ret) { |
754 | dev_err(&p->pdev->dev, "registration failed\n"); | 775 | dev_err(&p->pdev->dev, "registration failed\n"); |
755 | goto err2; | 776 | goto err3; |
756 | } | 777 | } |
757 | p->cs_enabled = false; | 778 | p->cs_enabled = false; |
758 | 779 | ||
759 | ret = setup_irq(irq, &p->irqaction); | 780 | ret = setup_irq(irq, &p->irqaction); |
760 | if (ret) { | 781 | if (ret) { |
761 | dev_err(&p->pdev->dev, "failed to request irq %d\n", irq); | 782 | dev_err(&p->pdev->dev, "failed to request irq %d\n", irq); |
762 | goto err2; | 783 | goto err3; |
763 | } | 784 | } |
764 | 785 | ||
765 | platform_set_drvdata(pdev, p); | 786 | platform_set_drvdata(pdev, p); |
766 | 787 | ||
767 | return 0; | 788 | return 0; |
768 | err2: | 789 | err3: |
769 | clk_put(p->clk); | 790 | clk_put(p->clk); |
770 | 791 | err2: | |
792 | iounmap(p->mapbase_str); | ||
771 | err1: | 793 | err1: |
772 | iounmap(p->mapbase); | 794 | iounmap(p->mapbase); |
773 | err0: | 795 | err0: |
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c index 847cab6f6e31..0198504ef6b0 100644 --- a/drivers/clocksource/time-armada-370-xp.c +++ b/drivers/clocksource/time-armada-370-xp.c | |||
@@ -13,6 +13,19 @@ | |||
13 | * | 13 | * |
14 | * Timer 0 is used as free-running clocksource, while timer 1 is | 14 | * Timer 0 is used as free-running clocksource, while timer 1 is |
15 | * used as clock_event_device. | 15 | * used as clock_event_device. |
16 | * | ||
17 | * --- | ||
18 | * Clocksource driver for Armada 370 and Armada XP SoC. | ||
19 | * This driver implements one compatible string for each SoC, given | ||
20 | * each has its own characteristics: | ||
21 | * | ||
22 | * * Armada 370 has no 25 MHz fixed timer. | ||
23 | * | ||
24 | * * Armada XP cannot work properly without such 25 MHz fixed timer as | ||
25 | * doing otherwise leads to using a clocksource whose frequency varies | ||
26 | * when doing cpufreq frequency changes. | ||
27 | * | ||
28 | * See Documentation/devicetree/bindings/timer/marvell,armada-370-xp-timer.txt | ||
16 | */ | 29 | */ |
17 | 30 | ||
18 | #include <linux/init.h> | 31 | #include <linux/init.h> |
@@ -30,19 +43,18 @@ | |||
30 | #include <linux/module.h> | 43 | #include <linux/module.h> |
31 | #include <linux/sched_clock.h> | 44 | #include <linux/sched_clock.h> |
32 | #include <linux/percpu.h> | 45 | #include <linux/percpu.h> |
33 | #include <linux/time-armada-370-xp.h> | ||
34 | 46 | ||
35 | /* | 47 | /* |
36 | * Timer block registers. | 48 | * Timer block registers. |
37 | */ | 49 | */ |
38 | #define TIMER_CTRL_OFF 0x0000 | 50 | #define TIMER_CTRL_OFF 0x0000 |
39 | #define TIMER0_EN 0x0001 | 51 | #define TIMER0_EN BIT(0) |
40 | #define TIMER0_RELOAD_EN 0x0002 | 52 | #define TIMER0_RELOAD_EN BIT(1) |
41 | #define TIMER0_25MHZ 0x0800 | 53 | #define TIMER0_25MHZ BIT(11) |
42 | #define TIMER0_DIV(div) ((div) << 19) | 54 | #define TIMER0_DIV(div) ((div) << 19) |
43 | #define TIMER1_EN 0x0004 | 55 | #define TIMER1_EN BIT(2) |
44 | #define TIMER1_RELOAD_EN 0x0008 | 56 | #define TIMER1_RELOAD_EN BIT(3) |
45 | #define TIMER1_25MHZ 0x1000 | 57 | #define TIMER1_25MHZ BIT(12) |
46 | #define TIMER1_DIV(div) ((div) << 22) | 58 | #define TIMER1_DIV(div) ((div) << 22) |
47 | #define TIMER_EVENTS_STATUS 0x0004 | 59 | #define TIMER_EVENTS_STATUS 0x0004 |
48 | #define TIMER0_CLR_MASK (~0x1) | 60 | #define TIMER0_CLR_MASK (~0x1) |
@@ -72,6 +84,18 @@ static u32 ticks_per_jiffy; | |||
72 | 84 | ||
73 | static struct clock_event_device __percpu *armada_370_xp_evt; | 85 | static struct clock_event_device __percpu *armada_370_xp_evt; |
74 | 86 | ||
87 | static void timer_ctrl_clrset(u32 clr, u32 set) | ||
88 | { | ||
89 | writel((readl(timer_base + TIMER_CTRL_OFF) & ~clr) | set, | ||
90 | timer_base + TIMER_CTRL_OFF); | ||
91 | } | ||
92 | |||
93 | static void local_timer_ctrl_clrset(u32 clr, u32 set) | ||
94 | { | ||
95 | writel((readl(local_base + TIMER_CTRL_OFF) & ~clr) | set, | ||
96 | local_base + TIMER_CTRL_OFF); | ||
97 | } | ||
98 | |||
75 | static u32 notrace armada_370_xp_read_sched_clock(void) | 99 | static u32 notrace armada_370_xp_read_sched_clock(void) |
76 | { | 100 | { |
77 | return ~readl(timer_base + TIMER0_VAL_OFF); | 101 | return ~readl(timer_base + TIMER0_VAL_OFF); |
@@ -84,7 +108,6 @@ static int | |||
84 | armada_370_xp_clkevt_next_event(unsigned long delta, | 108 | armada_370_xp_clkevt_next_event(unsigned long delta, |
85 | struct clock_event_device *dev) | 109 | struct clock_event_device *dev) |
86 | { | 110 | { |
87 | u32 u; | ||
88 | /* | 111 | /* |
89 | * Clear clockevent timer interrupt. | 112 | * Clear clockevent timer interrupt. |
90 | */ | 113 | */ |
@@ -98,11 +121,8 @@ armada_370_xp_clkevt_next_event(unsigned long delta, | |||
98 | /* | 121 | /* |
99 | * Enable the timer. | 122 | * Enable the timer. |
100 | */ | 123 | */ |
101 | u = readl(local_base + TIMER_CTRL_OFF); | 124 | local_timer_ctrl_clrset(TIMER0_RELOAD_EN, |
102 | u = ((u & ~TIMER0_RELOAD_EN) | TIMER0_EN | | 125 | TIMER0_EN | TIMER0_DIV(TIMER_DIVIDER_SHIFT)); |
103 | TIMER0_DIV(TIMER_DIVIDER_SHIFT)); | ||
104 | writel(u, local_base + TIMER_CTRL_OFF); | ||
105 | |||
106 | return 0; | 126 | return 0; |
107 | } | 127 | } |
108 | 128 | ||
@@ -110,8 +130,6 @@ static void | |||
110 | armada_370_xp_clkevt_mode(enum clock_event_mode mode, | 130 | armada_370_xp_clkevt_mode(enum clock_event_mode mode, |
111 | struct clock_event_device *dev) | 131 | struct clock_event_device *dev) |
112 | { | 132 | { |
113 | u32 u; | ||
114 | |||
115 | if (mode == CLOCK_EVT_MODE_PERIODIC) { | 133 | if (mode == CLOCK_EVT_MODE_PERIODIC) { |
116 | 134 | ||
117 | /* | 135 | /* |
@@ -123,18 +141,14 @@ armada_370_xp_clkevt_mode(enum clock_event_mode mode, | |||
123 | /* | 141 | /* |
124 | * Enable timer. | 142 | * Enable timer. |
125 | */ | 143 | */ |
126 | 144 | local_timer_ctrl_clrset(0, TIMER0_RELOAD_EN | | |
127 | u = readl(local_base + TIMER_CTRL_OFF); | 145 | TIMER0_EN | |
128 | 146 | TIMER0_DIV(TIMER_DIVIDER_SHIFT)); | |
129 | writel((u | TIMER0_EN | TIMER0_RELOAD_EN | | ||
130 | TIMER0_DIV(TIMER_DIVIDER_SHIFT)), | ||
131 | local_base + TIMER_CTRL_OFF); | ||
132 | } else { | 147 | } else { |
133 | /* | 148 | /* |
134 | * Disable timer. | 149 | * Disable timer. |
135 | */ | 150 | */ |
136 | u = readl(local_base + TIMER_CTRL_OFF); | 151 | local_timer_ctrl_clrset(TIMER0_EN, 0); |
137 | writel(u & ~TIMER0_EN, local_base + TIMER_CTRL_OFF); | ||
138 | 152 | ||
139 | /* | 153 | /* |
140 | * ACK pending timer interrupt. | 154 | * ACK pending timer interrupt. |
@@ -163,14 +177,14 @@ static irqreturn_t armada_370_xp_timer_interrupt(int irq, void *dev_id) | |||
163 | */ | 177 | */ |
164 | static int armada_370_xp_timer_setup(struct clock_event_device *evt) | 178 | static int armada_370_xp_timer_setup(struct clock_event_device *evt) |
165 | { | 179 | { |
166 | u32 u; | 180 | u32 clr = 0, set = 0; |
167 | int cpu = smp_processor_id(); | 181 | int cpu = smp_processor_id(); |
168 | 182 | ||
169 | u = readl(local_base + TIMER_CTRL_OFF); | ||
170 | if (timer25Mhz) | 183 | if (timer25Mhz) |
171 | writel(u | TIMER0_25MHZ, local_base + TIMER_CTRL_OFF); | 184 | set = TIMER0_25MHZ; |
172 | else | 185 | else |
173 | writel(u & ~TIMER0_25MHZ, local_base + TIMER_CTRL_OFF); | 186 | clr = TIMER0_25MHZ; |
187 | local_timer_ctrl_clrset(clr, set); | ||
174 | 188 | ||
175 | evt->name = "armada_370_xp_per_cpu_tick", | 189 | evt->name = "armada_370_xp_per_cpu_tick", |
176 | evt->features = CLOCK_EVT_FEAT_ONESHOT | | 190 | evt->features = CLOCK_EVT_FEAT_ONESHOT | |
@@ -217,36 +231,21 @@ static struct notifier_block armada_370_xp_timer_cpu_nb = { | |||
217 | .notifier_call = armada_370_xp_timer_cpu_notify, | 231 | .notifier_call = armada_370_xp_timer_cpu_notify, |
218 | }; | 232 | }; |
219 | 233 | ||
220 | void __init armada_370_xp_timer_init(void) | 234 | static void __init armada_370_xp_timer_common_init(struct device_node *np) |
221 | { | 235 | { |
222 | u32 u; | 236 | u32 clr = 0, set = 0; |
223 | struct device_node *np; | ||
224 | int res; | 237 | int res; |
225 | 238 | ||
226 | np = of_find_compatible_node(NULL, NULL, "marvell,armada-370-xp-timer"); | ||
227 | timer_base = of_iomap(np, 0); | 239 | timer_base = of_iomap(np, 0); |
228 | WARN_ON(!timer_base); | 240 | WARN_ON(!timer_base); |
229 | local_base = of_iomap(np, 1); | 241 | local_base = of_iomap(np, 1); |
230 | 242 | ||
231 | if (of_find_property(np, "marvell,timer-25Mhz", NULL)) { | 243 | if (timer25Mhz) |
232 | /* The fixed 25MHz timer is available so let's use it */ | 244 | set = TIMER0_25MHZ; |
233 | u = readl(timer_base + TIMER_CTRL_OFF); | 245 | else |
234 | writel(u | TIMER0_25MHZ, | 246 | clr = TIMER0_25MHZ; |
235 | timer_base + TIMER_CTRL_OFF); | 247 | timer_ctrl_clrset(clr, set); |
236 | timer_clk = 25000000; | 248 | local_timer_ctrl_clrset(clr, set); |
237 | } else { | ||
238 | unsigned long rate = 0; | ||
239 | struct clk *clk = of_clk_get(np, 0); | ||
240 | WARN_ON(IS_ERR(clk)); | ||
241 | rate = clk_get_rate(clk); | ||
242 | |||
243 | u = readl(timer_base + TIMER_CTRL_OFF); | ||
244 | writel(u & ~(TIMER0_25MHZ), | ||
245 | timer_base + TIMER_CTRL_OFF); | ||
246 | |||
247 | timer_clk = rate / TIMER_DIVIDER; | ||
248 | timer25Mhz = false; | ||
249 | } | ||
250 | 249 | ||
251 | /* | 250 | /* |
252 | * We use timer 0 as clocksource, and private(local) timer 0 | 251 | * We use timer 0 as clocksource, and private(local) timer 0 |
@@ -268,10 +267,8 @@ void __init armada_370_xp_timer_init(void) | |||
268 | writel(0xffffffff, timer_base + TIMER0_VAL_OFF); | 267 | writel(0xffffffff, timer_base + TIMER0_VAL_OFF); |
269 | writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF); | 268 | writel(0xffffffff, timer_base + TIMER0_RELOAD_OFF); |
270 | 269 | ||
271 | u = readl(timer_base + TIMER_CTRL_OFF); | 270 | timer_ctrl_clrset(0, TIMER0_EN | TIMER0_RELOAD_EN | |
272 | 271 | TIMER0_DIV(TIMER_DIVIDER_SHIFT)); | |
273 | writel((u | TIMER0_EN | TIMER0_RELOAD_EN | | ||
274 | TIMER0_DIV(TIMER_DIVIDER_SHIFT)), timer_base + TIMER_CTRL_OFF); | ||
275 | 272 | ||
276 | clocksource_mmio_init(timer_base + TIMER0_VAL_OFF, | 273 | clocksource_mmio_init(timer_base + TIMER0_VAL_OFF, |
277 | "armada_370_xp_clocksource", | 274 | "armada_370_xp_clocksource", |
@@ -293,3 +290,29 @@ void __init armada_370_xp_timer_init(void) | |||
293 | if (!res) | 290 | if (!res) |
294 | armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt)); | 291 | armada_370_xp_timer_setup(this_cpu_ptr(armada_370_xp_evt)); |
295 | } | 292 | } |
293 | |||
294 | static void __init armada_xp_timer_init(struct device_node *np) | ||
295 | { | ||
296 | struct clk *clk = of_clk_get_by_name(np, "fixed"); | ||
297 | |||
298 | /* The 25Mhz fixed clock is mandatory, and must always be available */ | ||
299 | BUG_ON(IS_ERR(clk)); | ||
300 | timer_clk = clk_get_rate(clk); | ||
301 | |||
302 | armada_370_xp_timer_common_init(np); | ||
303 | } | ||
304 | CLOCKSOURCE_OF_DECLARE(armada_xp, "marvell,armada-xp-timer", | ||
305 | armada_xp_timer_init); | ||
306 | |||
307 | static void __init armada_370_timer_init(struct device_node *np) | ||
308 | { | ||
309 | struct clk *clk = of_clk_get(np, 0); | ||
310 | |||
311 | BUG_ON(IS_ERR(clk)); | ||
312 | timer_clk = clk_get_rate(clk) / TIMER_DIVIDER; | ||
313 | timer25Mhz = false; | ||
314 | |||
315 | armada_370_xp_timer_common_init(np); | ||
316 | } | ||
317 | CLOCKSOURCE_OF_DECLARE(armada_370, "marvell,armada-370-timer", | ||
318 | armada_370_timer_init); | ||
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c index 4fe49d2bfe1d..eea817296513 100644 --- a/drivers/hwmon/amc6821.c +++ b/drivers/hwmon/amc6821.c | |||
@@ -364,7 +364,7 @@ static ssize_t set_pwm1_enable( | |||
364 | if (config < 0) { | 364 | if (config < 0) { |
365 | dev_err(&client->dev, | 365 | dev_err(&client->dev, |
366 | "Error reading configuration register, aborting.\n"); | 366 | "Error reading configuration register, aborting.\n"); |
367 | return -EIO; | 367 | return config; |
368 | } | 368 | } |
369 | 369 | ||
370 | switch (val) { | 370 | switch (val) { |
@@ -416,11 +416,9 @@ static ssize_t get_temp_auto_point_temp( | |||
416 | case 1: | 416 | case 1: |
417 | return sprintf(buf, "%d\n", | 417 | return sprintf(buf, "%d\n", |
418 | data->temp1_auto_point_temp[ix] * 1000); | 418 | data->temp1_auto_point_temp[ix] * 1000); |
419 | break; | ||
420 | case 2: | 419 | case 2: |
421 | return sprintf(buf, "%d\n", | 420 | return sprintf(buf, "%d\n", |
422 | data->temp2_auto_point_temp[ix] * 1000); | 421 | data->temp2_auto_point_temp[ix] * 1000); |
423 | break; | ||
424 | default: | 422 | default: |
425 | dev_dbg(dev, "Unknown attr->nr (%d).\n", nr); | 423 | dev_dbg(dev, "Unknown attr->nr (%d).\n", nr); |
426 | return -EINVAL; | 424 | return -EINVAL; |
@@ -513,7 +511,6 @@ static ssize_t set_temp_auto_point_temp( | |||
513 | count = -EIO; | 511 | count = -EIO; |
514 | } | 512 | } |
515 | goto EXIT; | 513 | goto EXIT; |
516 | break; | ||
517 | case 1: | 514 | case 1: |
518 | ptemp[1] = clamp_val(val / 1000, (ptemp[0] & 0x7C) + 4, 124); | 515 | ptemp[1] = clamp_val(val / 1000, (ptemp[0] & 0x7C) + 4, 124); |
519 | ptemp[1] &= 0x7C; | 516 | ptemp[1] &= 0x7C; |
@@ -665,7 +662,7 @@ static ssize_t set_fan1_div( | |||
665 | if (config < 0) { | 662 | if (config < 0) { |
666 | dev_err(&client->dev, | 663 | dev_err(&client->dev, |
667 | "Error reading configuration register, aborting.\n"); | 664 | "Error reading configuration register, aborting.\n"); |
668 | return -EIO; | 665 | return config; |
669 | } | 666 | } |
670 | mutex_lock(&data->update_lock); | 667 | mutex_lock(&data->update_lock); |
671 | switch (val) { | 668 | switch (val) { |
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c index b07305622087..2c137b26acb4 100644 --- a/drivers/hwmon/emc2103.c +++ b/drivers/hwmon/emc2103.c | |||
@@ -248,7 +248,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *da, | |||
248 | 248 | ||
249 | int result = kstrtol(buf, 10, &val); | 249 | int result = kstrtol(buf, 10, &val); |
250 | if (result < 0) | 250 | if (result < 0) |
251 | return -EINVAL; | 251 | return result; |
252 | 252 | ||
253 | val = DIV_ROUND_CLOSEST(val, 1000); | 253 | val = DIV_ROUND_CLOSEST(val, 1000); |
254 | if ((val < -63) || (val > 127)) | 254 | if ((val < -63) || (val > 127)) |
@@ -272,7 +272,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *da, | |||
272 | 272 | ||
273 | int result = kstrtol(buf, 10, &val); | 273 | int result = kstrtol(buf, 10, &val); |
274 | if (result < 0) | 274 | if (result < 0) |
275 | return -EINVAL; | 275 | return result; |
276 | 276 | ||
277 | val = DIV_ROUND_CLOSEST(val, 1000); | 277 | val = DIV_ROUND_CLOSEST(val, 1000); |
278 | if ((val < -63) || (val > 127)) | 278 | if ((val < -63) || (val > 127)) |
@@ -320,7 +320,7 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *da, | |||
320 | 320 | ||
321 | int status = kstrtol(buf, 10, &new_div); | 321 | int status = kstrtol(buf, 10, &new_div); |
322 | if (status < 0) | 322 | if (status < 0) |
323 | return -EINVAL; | 323 | return status; |
324 | 324 | ||
325 | if (new_div == old_div) /* No change */ | 325 | if (new_div == old_div) /* No change */ |
326 | return count; | 326 | return count; |
@@ -394,7 +394,7 @@ static ssize_t set_fan_target(struct device *dev, struct device_attribute *da, | |||
394 | 394 | ||
395 | int result = kstrtol(buf, 10, &rpm_target); | 395 | int result = kstrtol(buf, 10, &rpm_target); |
396 | if (result < 0) | 396 | if (result < 0) |
397 | return -EINVAL; | 397 | return result; |
398 | 398 | ||
399 | /* Datasheet states 16384 as maximum RPM target (table 3.2) */ | 399 | /* Datasheet states 16384 as maximum RPM target (table 3.2) */ |
400 | if ((rpm_target < 0) || (rpm_target > 16384)) | 400 | if ((rpm_target < 0) || (rpm_target > 16384)) |
@@ -440,7 +440,7 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *da, | |||
440 | 440 | ||
441 | int result = kstrtol(buf, 10, &new_value); | 441 | int result = kstrtol(buf, 10, &new_value); |
442 | if (result < 0) | 442 | if (result < 0) |
443 | return -EINVAL; | 443 | return result; |
444 | 444 | ||
445 | mutex_lock(&data->update_lock); | 445 | mutex_lock(&data->update_lock); |
446 | switch (new_value) { | 446 | switch (new_value) { |
diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c index e2b56a2b756c..632f1dc0fe1f 100644 --- a/drivers/hwmon/ibmaem.c +++ b/drivers/hwmon/ibmaem.c | |||
@@ -292,7 +292,7 @@ static int aem_init_ipmi_data(struct aem_ipmi_data *data, int iface, | |||
292 | dev_err(bmc, | 292 | dev_err(bmc, |
293 | "Unable to register user with IPMI interface %d\n", | 293 | "Unable to register user with IPMI interface %d\n", |
294 | data->interface); | 294 | data->interface); |
295 | return -EACCES; | 295 | return err; |
296 | } | 296 | } |
297 | 297 | ||
298 | return 0; | 298 | return 0; |
diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index e633856370cf..d65f3fd895dd 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c | |||
@@ -202,7 +202,6 @@ static void k10temp_remove(struct pci_dev *pdev) | |||
202 | &sensor_dev_attr_temp1_crit.dev_attr); | 202 | &sensor_dev_attr_temp1_crit.dev_attr); |
203 | device_remove_file(&pdev->dev, | 203 | device_remove_file(&pdev->dev, |
204 | &sensor_dev_attr_temp1_crit_hyst.dev_attr); | 204 | &sensor_dev_attr_temp1_crit_hyst.dev_attr); |
205 | pci_set_drvdata(pdev, NULL); | ||
206 | } | 205 | } |
207 | 206 | ||
208 | static DEFINE_PCI_DEVICE_TABLE(k10temp_id_table) = { | 207 | static DEFINE_PCI_DEVICE_TABLE(k10temp_id_table) = { |
diff --git a/drivers/hwmon/tmp421.c b/drivers/hwmon/tmp421.c index 964c1d688274..ae26b06fa819 100644 --- a/drivers/hwmon/tmp421.c +++ b/drivers/hwmon/tmp421.c | |||
@@ -210,7 +210,7 @@ static int tmp421_init_client(struct i2c_client *client) | |||
210 | if (config < 0) { | 210 | if (config < 0) { |
211 | dev_err(&client->dev, | 211 | dev_err(&client->dev, |
212 | "Could not read configuration register (%d)\n", config); | 212 | "Could not read configuration register (%d)\n", config); |
213 | return -ENODEV; | 213 | return config; |
214 | } | 214 | } |
215 | 215 | ||
216 | config_orig = config; | 216 | config_orig = config; |
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index d2b34fbbc42e..b6ded17b3be3 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c | |||
@@ -48,6 +48,7 @@ struct evdev_client { | |||
48 | struct evdev *evdev; | 48 | struct evdev *evdev; |
49 | struct list_head node; | 49 | struct list_head node; |
50 | int clkid; | 50 | int clkid; |
51 | bool revoked; | ||
51 | unsigned int bufsize; | 52 | unsigned int bufsize; |
52 | struct input_event buffer[]; | 53 | struct input_event buffer[]; |
53 | }; | 54 | }; |
@@ -164,6 +165,9 @@ static void evdev_pass_values(struct evdev_client *client, | |||
164 | struct input_event event; | 165 | struct input_event event; |
165 | bool wakeup = false; | 166 | bool wakeup = false; |
166 | 167 | ||
168 | if (client->revoked) | ||
169 | return; | ||
170 | |||
167 | event.time = ktime_to_timeval(client->clkid == CLOCK_MONOTONIC ? | 171 | event.time = ktime_to_timeval(client->clkid == CLOCK_MONOTONIC ? |
168 | mono : real); | 172 | mono : real); |
169 | 173 | ||
@@ -240,7 +244,7 @@ static int evdev_flush(struct file *file, fl_owner_t id) | |||
240 | if (retval) | 244 | if (retval) |
241 | return retval; | 245 | return retval; |
242 | 246 | ||
243 | if (!evdev->exist) | 247 | if (!evdev->exist || client->revoked) |
244 | retval = -ENODEV; | 248 | retval = -ENODEV; |
245 | else | 249 | else |
246 | retval = input_flush_device(&evdev->handle, file); | 250 | retval = input_flush_device(&evdev->handle, file); |
@@ -429,7 +433,7 @@ static ssize_t evdev_write(struct file *file, const char __user *buffer, | |||
429 | if (retval) | 433 | if (retval) |
430 | return retval; | 434 | return retval; |
431 | 435 | ||
432 | if (!evdev->exist) { | 436 | if (!evdev->exist || client->revoked) { |
433 | retval = -ENODEV; | 437 | retval = -ENODEV; |
434 | goto out; | 438 | goto out; |
435 | } | 439 | } |
@@ -482,7 +486,7 @@ static ssize_t evdev_read(struct file *file, char __user *buffer, | |||
482 | return -EINVAL; | 486 | return -EINVAL; |
483 | 487 | ||
484 | for (;;) { | 488 | for (;;) { |
485 | if (!evdev->exist) | 489 | if (!evdev->exist || client->revoked) |
486 | return -ENODEV; | 490 | return -ENODEV; |
487 | 491 | ||
488 | if (client->packet_head == client->tail && | 492 | if (client->packet_head == client->tail && |
@@ -511,7 +515,7 @@ static ssize_t evdev_read(struct file *file, char __user *buffer, | |||
511 | if (!(file->f_flags & O_NONBLOCK)) { | 515 | if (!(file->f_flags & O_NONBLOCK)) { |
512 | error = wait_event_interruptible(evdev->wait, | 516 | error = wait_event_interruptible(evdev->wait, |
513 | client->packet_head != client->tail || | 517 | client->packet_head != client->tail || |
514 | !evdev->exist); | 518 | !evdev->exist || client->revoked); |
515 | if (error) | 519 | if (error) |
516 | return error; | 520 | return error; |
517 | } | 521 | } |
@@ -529,7 +533,11 @@ static unsigned int evdev_poll(struct file *file, poll_table *wait) | |||
529 | 533 | ||
530 | poll_wait(file, &evdev->wait, wait); | 534 | poll_wait(file, &evdev->wait, wait); |
531 | 535 | ||
532 | mask = evdev->exist ? POLLOUT | POLLWRNORM : POLLHUP | POLLERR; | 536 | if (evdev->exist && !client->revoked) |
537 | mask = POLLOUT | POLLWRNORM; | ||
538 | else | ||
539 | mask = POLLHUP | POLLERR; | ||
540 | |||
533 | if (client->packet_head != client->tail) | 541 | if (client->packet_head != client->tail) |
534 | mask |= POLLIN | POLLRDNORM; | 542 | mask |= POLLIN | POLLRDNORM; |
535 | 543 | ||
@@ -795,6 +803,17 @@ static int evdev_handle_mt_request(struct input_dev *dev, | |||
795 | return 0; | 803 | return 0; |
796 | } | 804 | } |
797 | 805 | ||
806 | static int evdev_revoke(struct evdev *evdev, struct evdev_client *client, | ||
807 | struct file *file) | ||
808 | { | ||
809 | client->revoked = true; | ||
810 | evdev_ungrab(evdev, client); | ||
811 | input_flush_device(&evdev->handle, file); | ||
812 | wake_up_interruptible(&evdev->wait); | ||
813 | |||
814 | return 0; | ||
815 | } | ||
816 | |||
798 | static long evdev_do_ioctl(struct file *file, unsigned int cmd, | 817 | static long evdev_do_ioctl(struct file *file, unsigned int cmd, |
799 | void __user *p, int compat_mode) | 818 | void __user *p, int compat_mode) |
800 | { | 819 | { |
@@ -857,6 +876,12 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd, | |||
857 | else | 876 | else |
858 | return evdev_ungrab(evdev, client); | 877 | return evdev_ungrab(evdev, client); |
859 | 878 | ||
879 | case EVIOCREVOKE: | ||
880 | if (p) | ||
881 | return -EINVAL; | ||
882 | else | ||
883 | return evdev_revoke(evdev, client, file); | ||
884 | |||
860 | case EVIOCSCLOCKID: | 885 | case EVIOCSCLOCKID: |
861 | if (copy_from_user(&i, p, sizeof(unsigned int))) | 886 | if (copy_from_user(&i, p, sizeof(unsigned int))) |
862 | return -EFAULT; | 887 | return -EFAULT; |
@@ -1002,7 +1027,7 @@ static long evdev_ioctl_handler(struct file *file, unsigned int cmd, | |||
1002 | if (retval) | 1027 | if (retval) |
1003 | return retval; | 1028 | return retval; |
1004 | 1029 | ||
1005 | if (!evdev->exist) { | 1030 | if (!evdev->exist || client->revoked) { |
1006 | retval = -ENODEV; | 1031 | retval = -ENODEV; |
1007 | goto out; | 1032 | goto out; |
1008 | } | 1033 | } |
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c index 154275182b4b..f5aa4b02cfa6 100644 --- a/drivers/mtd/ubi/fastmap.c +++ b/drivers/mtd/ubi/fastmap.c | |||
@@ -1343,7 +1343,7 @@ out: | |||
1343 | static int invalidate_fastmap(struct ubi_device *ubi, | 1343 | static int invalidate_fastmap(struct ubi_device *ubi, |
1344 | struct ubi_fastmap_layout *fm) | 1344 | struct ubi_fastmap_layout *fm) |
1345 | { | 1345 | { |
1346 | int ret, i; | 1346 | int ret; |
1347 | struct ubi_vid_hdr *vh; | 1347 | struct ubi_vid_hdr *vh; |
1348 | 1348 | ||
1349 | ret = erase_block(ubi, fm->e[0]->pnum); | 1349 | ret = erase_block(ubi, fm->e[0]->pnum); |
@@ -1360,9 +1360,6 @@ static int invalidate_fastmap(struct ubi_device *ubi, | |||
1360 | vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); | 1360 | vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); |
1361 | ret = ubi_io_write_vid_hdr(ubi, fm->e[0]->pnum, vh); | 1361 | ret = ubi_io_write_vid_hdr(ubi, fm->e[0]->pnum, vh); |
1362 | 1362 | ||
1363 | for (i = 0; i < fm->used_blocks; i++) | ||
1364 | ubi_wl_put_fm_peb(ubi, fm->e[i], i, fm->to_be_tortured[i]); | ||
1365 | |||
1366 | return ret; | 1363 | return ret; |
1367 | } | 1364 | } |
1368 | 1365 | ||
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 5df49d3cb5c7..c95bfb183c62 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c | |||
@@ -1069,6 +1069,9 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
1069 | if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { | 1069 | if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { |
1070 | dbg_wl("no WL needed: min used EC %d, max free EC %d", | 1070 | dbg_wl("no WL needed: min used EC %d, max free EC %d", |
1071 | e1->ec, e2->ec); | 1071 | e1->ec, e2->ec); |
1072 | |||
1073 | /* Give the unused PEB back */ | ||
1074 | wl_tree_add(e2, &ubi->free); | ||
1072 | goto out_cancel; | 1075 | goto out_cancel; |
1073 | } | 1076 | } |
1074 | self_check_in_wl_tree(ubi, e1, &ubi->used); | 1077 | self_check_in_wl_tree(ubi, e1, &ubi->used); |
diff --git a/drivers/net/ethernet/amd/declance.c b/drivers/net/ethernet/amd/declance.c index 3d86ffeb4e15..94edc9c6fbbf 100644 --- a/drivers/net/ethernet/amd/declance.c +++ b/drivers/net/ethernet/amd/declance.c | |||
@@ -725,6 +725,7 @@ static irqreturn_t lance_dma_merr_int(int irq, void *dev_id) | |||
725 | { | 725 | { |
726 | struct net_device *dev = dev_id; | 726 | struct net_device *dev = dev_id; |
727 | 727 | ||
728 | clear_ioasic_dma_irq(irq); | ||
728 | printk(KERN_ERR "%s: DMA error\n", dev->name); | 729 | printk(KERN_ERR "%s: DMA error\n", dev->name); |
729 | return IRQ_HANDLED; | 730 | return IRQ_HANDLED; |
730 | } | 731 | } |
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 36a9e6023395..96d6b2eef4f2 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig | |||
@@ -732,6 +732,7 @@ config SAMSUNG_LAPTOP | |||
732 | tristate "Samsung Laptop driver" | 732 | tristate "Samsung Laptop driver" |
733 | depends on X86 | 733 | depends on X86 |
734 | depends on RFKILL || RFKILL = n | 734 | depends on RFKILL || RFKILL = n |
735 | depends on ACPI_VIDEO || ACPI_VIDEO = n | ||
735 | depends on BACKLIGHT_CLASS_DEVICE | 736 | depends on BACKLIGHT_CLASS_DEVICE |
736 | select LEDS_CLASS | 737 | select LEDS_CLASS |
737 | select NEW_LEDS | 738 | select NEW_LEDS |
@@ -764,7 +765,7 @@ config INTEL_OAKTRAIL | |||
764 | 765 | ||
765 | config SAMSUNG_Q10 | 766 | config SAMSUNG_Q10 |
766 | tristate "Samsung Q10 Extras" | 767 | tristate "Samsung Q10 Extras" |
767 | depends on SERIO_I8042 | 768 | depends on ACPI |
768 | select BACKLIGHT_CLASS_DEVICE | 769 | select BACKLIGHT_CLASS_DEVICE |
769 | ---help--- | 770 | ---help--- |
770 | This driver provides support for backlight control on Samsung Q10 | 771 | This driver provides support for backlight control on Samsung Q10 |
diff --git a/drivers/platform/x86/amilo-rfkill.c b/drivers/platform/x86/amilo-rfkill.c index 6296f078b7bc..da36b5e824d4 100644 --- a/drivers/platform/x86/amilo-rfkill.c +++ b/drivers/platform/x86/amilo-rfkill.c | |||
@@ -85,6 +85,13 @@ static const struct dmi_system_id amilo_rfkill_id_table[] = { | |||
85 | { | 85 | { |
86 | .matches = { | 86 | .matches = { |
87 | DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), | 87 | DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), |
88 | DMI_MATCH(DMI_BOARD_NAME, "AMILO L1310"), | ||
89 | }, | ||
90 | .driver_data = (void *)&amilo_a1655_rfkill_ops | ||
91 | }, | ||
92 | { | ||
93 | .matches = { | ||
94 | DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), | ||
88 | DMI_MATCH(DMI_BOARD_NAME, "AMILO M7440"), | 95 | DMI_MATCH(DMI_BOARD_NAME, "AMILO M7440"), |
89 | }, | 96 | }, |
90 | .driver_data = (void *)&amilo_m7440_rfkill_ops | 97 | .driver_data = (void *)&amilo_m7440_rfkill_ops |
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c index 36e5e6c13db4..6dfa8d3b4eec 100644 --- a/drivers/platform/x86/classmate-laptop.c +++ b/drivers/platform/x86/classmate-laptop.c | |||
@@ -590,7 +590,7 @@ static ssize_t cmpc_accel_sensitivity_store(struct device *dev, | |||
590 | inputdev = dev_get_drvdata(&acpi->dev); | 590 | inputdev = dev_get_drvdata(&acpi->dev); |
591 | accel = dev_get_drvdata(&inputdev->dev); | 591 | accel = dev_get_drvdata(&inputdev->dev); |
592 | 592 | ||
593 | r = strict_strtoul(buf, 0, &sensitivity); | 593 | r = kstrtoul(buf, 0, &sensitivity); |
594 | if (r) | 594 | if (r) |
595 | return r; | 595 | return r; |
596 | 596 | ||
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c index 475cc5242511..eaa78edb1f4e 100644 --- a/drivers/platform/x86/compal-laptop.c +++ b/drivers/platform/x86/compal-laptop.c | |||
@@ -425,7 +425,8 @@ static ssize_t pwm_enable_store(struct device *dev, | |||
425 | struct compal_data *data = dev_get_drvdata(dev); | 425 | struct compal_data *data = dev_get_drvdata(dev); |
426 | long val; | 426 | long val; |
427 | int err; | 427 | int err; |
428 | err = strict_strtol(buf, 10, &val); | 428 | |
429 | err = kstrtol(buf, 10, &val); | ||
429 | if (err) | 430 | if (err) |
430 | return err; | 431 | return err; |
431 | if (val < 0) | 432 | if (val < 0) |
@@ -463,7 +464,8 @@ static ssize_t pwm_store(struct device *dev, struct device_attribute *attr, | |||
463 | struct compal_data *data = dev_get_drvdata(dev); | 464 | struct compal_data *data = dev_get_drvdata(dev); |
464 | long val; | 465 | long val; |
465 | int err; | 466 | int err; |
466 | err = strict_strtol(buf, 10, &val); | 467 | |
468 | err = kstrtol(buf, 10, &val); | ||
467 | if (err) | 469 | if (err) |
468 | return err; | 470 | return err; |
469 | if (val < 0 || val > 255) | 471 | if (val < 0 || val > 255) |
@@ -1081,7 +1083,6 @@ static int compal_remove(struct platform_device *pdev) | |||
1081 | hwmon_device_unregister(data->hwmon_dev); | 1083 | hwmon_device_unregister(data->hwmon_dev); |
1082 | power_supply_unregister(&data->psy); | 1084 | power_supply_unregister(&data->psy); |
1083 | 1085 | ||
1084 | platform_set_drvdata(pdev, NULL); | ||
1085 | kfree(data); | 1086 | kfree(data); |
1086 | 1087 | ||
1087 | sysfs_remove_group(&pdev->dev.kobj, &compal_attribute_group); | 1088 | sysfs_remove_group(&pdev->dev.kobj, &compal_attribute_group); |
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index d6970f47ae72..1c86fa0857c8 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c | |||
@@ -725,7 +725,7 @@ static int hp_wmi_rfkill_setup(struct platform_device *device) | |||
725 | (void *) HPWMI_WWAN); | 725 | (void *) HPWMI_WWAN); |
726 | if (!wwan_rfkill) { | 726 | if (!wwan_rfkill) { |
727 | err = -ENOMEM; | 727 | err = -ENOMEM; |
728 | goto register_gps_error; | 728 | goto register_bluetooth_error; |
729 | } | 729 | } |
730 | rfkill_init_sw_state(wwan_rfkill, | 730 | rfkill_init_sw_state(wwan_rfkill, |
731 | hp_wmi_get_sw_state(HPWMI_WWAN)); | 731 | hp_wmi_get_sw_state(HPWMI_WWAN)); |
@@ -733,7 +733,7 @@ static int hp_wmi_rfkill_setup(struct platform_device *device) | |||
733 | hp_wmi_get_hw_state(HPWMI_WWAN)); | 733 | hp_wmi_get_hw_state(HPWMI_WWAN)); |
734 | err = rfkill_register(wwan_rfkill); | 734 | err = rfkill_register(wwan_rfkill); |
735 | if (err) | 735 | if (err) |
736 | goto register_wwan_err; | 736 | goto register_wwan_error; |
737 | } | 737 | } |
738 | 738 | ||
739 | if (wireless & 0x8) { | 739 | if (wireless & 0x8) { |
@@ -743,7 +743,7 @@ static int hp_wmi_rfkill_setup(struct platform_device *device) | |||
743 | (void *) HPWMI_GPS); | 743 | (void *) HPWMI_GPS); |
744 | if (!gps_rfkill) { | 744 | if (!gps_rfkill) { |
745 | err = -ENOMEM; | 745 | err = -ENOMEM; |
746 | goto register_bluetooth_error; | 746 | goto register_wwan_error; |
747 | } | 747 | } |
748 | rfkill_init_sw_state(gps_rfkill, | 748 | rfkill_init_sw_state(gps_rfkill, |
749 | hp_wmi_get_sw_state(HPWMI_GPS)); | 749 | hp_wmi_get_sw_state(HPWMI_GPS)); |
@@ -755,16 +755,16 @@ static int hp_wmi_rfkill_setup(struct platform_device *device) | |||
755 | } | 755 | } |
756 | 756 | ||
757 | return 0; | 757 | return 0; |
758 | register_wwan_err: | ||
759 | rfkill_destroy(wwan_rfkill); | ||
760 | wwan_rfkill = NULL; | ||
761 | if (gps_rfkill) | ||
762 | rfkill_unregister(gps_rfkill); | ||
763 | register_gps_error: | 758 | register_gps_error: |
764 | rfkill_destroy(gps_rfkill); | 759 | rfkill_destroy(gps_rfkill); |
765 | gps_rfkill = NULL; | 760 | gps_rfkill = NULL; |
766 | if (bluetooth_rfkill) | 761 | if (bluetooth_rfkill) |
767 | rfkill_unregister(bluetooth_rfkill); | 762 | rfkill_unregister(bluetooth_rfkill); |
763 | register_wwan_error: | ||
764 | rfkill_destroy(wwan_rfkill); | ||
765 | wwan_rfkill = NULL; | ||
766 | if (gps_rfkill) | ||
767 | rfkill_unregister(gps_rfkill); | ||
768 | register_bluetooth_error: | 768 | register_bluetooth_error: |
769 | rfkill_destroy(bluetooth_rfkill); | 769 | rfkill_destroy(bluetooth_rfkill); |
770 | bluetooth_rfkill = NULL; | 770 | bluetooth_rfkill = NULL; |
diff --git a/drivers/platform/x86/intel-rst.c b/drivers/platform/x86/intel-rst.c index 9385afd9b558..41b740cb28bc 100644 --- a/drivers/platform/x86/intel-rst.c +++ b/drivers/platform/x86/intel-rst.c | |||
@@ -193,17 +193,6 @@ static struct acpi_driver irst_driver = { | |||
193 | }, | 193 | }, |
194 | }; | 194 | }; |
195 | 195 | ||
196 | static int irst_init(void) | 196 | module_acpi_driver(irst_driver); |
197 | { | ||
198 | return acpi_bus_register_driver(&irst_driver); | ||
199 | } | ||
200 | |||
201 | static void irst_exit(void) | ||
202 | { | ||
203 | acpi_bus_unregister_driver(&irst_driver); | ||
204 | } | ||
205 | |||
206 | module_init(irst_init); | ||
207 | module_exit(irst_exit); | ||
208 | 197 | ||
209 | MODULE_DEVICE_TABLE(acpi, irst_ids); | 198 | MODULE_DEVICE_TABLE(acpi, irst_ids); |
diff --git a/drivers/platform/x86/intel-smartconnect.c b/drivers/platform/x86/intel-smartconnect.c index f74e93d096bc..52259dcabecb 100644 --- a/drivers/platform/x86/intel-smartconnect.c +++ b/drivers/platform/x86/intel-smartconnect.c | |||
@@ -74,17 +74,6 @@ static struct acpi_driver smartconnect_driver = { | |||
74 | }, | 74 | }, |
75 | }; | 75 | }; |
76 | 76 | ||
77 | static int smartconnect_init(void) | 77 | module_acpi_driver(smartconnect_driver); |
78 | { | ||
79 | return acpi_bus_register_driver(&smartconnect_driver); | ||
80 | } | ||
81 | |||
82 | static void smartconnect_exit(void) | ||
83 | { | ||
84 | acpi_bus_unregister_driver(&smartconnect_driver); | ||
85 | } | ||
86 | |||
87 | module_init(smartconnect_init); | ||
88 | module_exit(smartconnect_exit); | ||
89 | 78 | ||
90 | MODULE_DEVICE_TABLE(acpi, smartconnect_ids); | 79 | MODULE_DEVICE_TABLE(acpi, smartconnect_ids); |
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c index f59683aa13d5..6b18aba82cfa 100644 --- a/drivers/platform/x86/intel_mid_powerbtn.c +++ b/drivers/platform/x86/intel_mid_powerbtn.c | |||
@@ -128,7 +128,6 @@ static int mfld_pb_remove(struct platform_device *pdev) | |||
128 | 128 | ||
129 | free_irq(irq, input); | 129 | free_irq(irq, input); |
130 | input_unregister_device(input); | 130 | input_unregister_device(input); |
131 | platform_set_drvdata(pdev, NULL); | ||
132 | 131 | ||
133 | return 0; | 132 | return 0; |
134 | } | 133 | } |
diff --git a/drivers/platform/x86/intel_mid_thermal.c b/drivers/platform/x86/intel_mid_thermal.c index 81c491e74b34..93fab8b70ce1 100644 --- a/drivers/platform/x86/intel_mid_thermal.c +++ b/drivers/platform/x86/intel_mid_thermal.c | |||
@@ -542,7 +542,6 @@ static int mid_thermal_remove(struct platform_device *pdev) | |||
542 | } | 542 | } |
543 | 543 | ||
544 | kfree(pinfo); | 544 | kfree(pinfo); |
545 | platform_set_drvdata(pdev, NULL); | ||
546 | 545 | ||
547 | /* Stop the ADC */ | 546 | /* Stop the ADC */ |
548 | return configure_adc(0); | 547 | return configure_adc(0); |
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c index 984253da365d..10d12b221601 100644 --- a/drivers/platform/x86/panasonic-laptop.c +++ b/drivers/platform/x86/panasonic-laptop.c | |||
@@ -643,23 +643,6 @@ out_hotkey: | |||
643 | return result; | 643 | return result; |
644 | } | 644 | } |
645 | 645 | ||
646 | static int __init acpi_pcc_init(void) | ||
647 | { | ||
648 | int result = 0; | ||
649 | |||
650 | if (acpi_disabled) | ||
651 | return -ENODEV; | ||
652 | |||
653 | result = acpi_bus_register_driver(&acpi_pcc_driver); | ||
654 | if (result < 0) { | ||
655 | ACPI_DEBUG_PRINT((ACPI_DB_ERROR, | ||
656 | "Error registering hotkey driver\n")); | ||
657 | return -ENODEV; | ||
658 | } | ||
659 | |||
660 | return 0; | ||
661 | } | ||
662 | |||
663 | static int acpi_pcc_hotkey_remove(struct acpi_device *device) | 646 | static int acpi_pcc_hotkey_remove(struct acpi_device *device) |
664 | { | 647 | { |
665 | struct pcc_acpi *pcc = acpi_driver_data(device); | 648 | struct pcc_acpi *pcc = acpi_driver_data(device); |
@@ -679,10 +662,4 @@ static int acpi_pcc_hotkey_remove(struct acpi_device *device) | |||
679 | return 0; | 662 | return 0; |
680 | } | 663 | } |
681 | 664 | ||
682 | static void __exit acpi_pcc_exit(void) | 665 | module_acpi_driver(acpi_pcc_driver); |
683 | { | ||
684 | acpi_bus_unregister_driver(&acpi_pcc_driver); | ||
685 | } | ||
686 | |||
687 | module_init(acpi_pcc_init); | ||
688 | module_exit(acpi_pcc_exit); | ||
diff --git a/drivers/platform/x86/samsung-q10.c b/drivers/platform/x86/samsung-q10.c index 4430b8c1369d..cae7098e9b0d 100644 --- a/drivers/platform/x86/samsung-q10.c +++ b/drivers/platform/x86/samsung-q10.c | |||
@@ -14,16 +14,12 @@ | |||
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
16 | #include <linux/backlight.h> | 16 | #include <linux/backlight.h> |
17 | #include <linux/i8042.h> | ||
18 | #include <linux/dmi.h> | 17 | #include <linux/dmi.h> |
18 | #include <acpi/acpi_drivers.h> | ||
19 | 19 | ||
20 | #define SAMSUNGQ10_BL_MAX_INTENSITY 255 | 20 | #define SAMSUNGQ10_BL_MAX_INTENSITY 7 |
21 | #define SAMSUNGQ10_BL_DEFAULT_INTENSITY 185 | ||
22 | 21 | ||
23 | #define SAMSUNGQ10_BL_8042_CMD 0xbe | 22 | static acpi_handle ec_handle; |
24 | #define SAMSUNGQ10_BL_8042_DATA { 0x89, 0x91 } | ||
25 | |||
26 | static int samsungq10_bl_brightness; | ||
27 | 23 | ||
28 | static bool force; | 24 | static bool force; |
29 | module_param(force, bool, 0); | 25 | module_param(force, bool, 0); |
@@ -33,21 +29,26 @@ MODULE_PARM_DESC(force, | |||
33 | static int samsungq10_bl_set_intensity(struct backlight_device *bd) | 29 | static int samsungq10_bl_set_intensity(struct backlight_device *bd) |
34 | { | 30 | { |
35 | 31 | ||
36 | int brightness = bd->props.brightness; | 32 | acpi_status status; |
37 | unsigned char c[3] = SAMSUNGQ10_BL_8042_DATA; | 33 | int i; |
38 | 34 | ||
39 | c[2] = (unsigned char)brightness; | 35 | for (i = 0; i < SAMSUNGQ10_BL_MAX_INTENSITY; i++) { |
40 | i8042_lock_chip(); | 36 | status = acpi_evaluate_object(ec_handle, "_Q63", NULL, NULL); |
41 | i8042_command(c, (0x30 << 8) | SAMSUNGQ10_BL_8042_CMD); | 37 | if (ACPI_FAILURE(status)) |
42 | i8042_unlock_chip(); | 38 | return -EIO; |
43 | samsungq10_bl_brightness = brightness; | 39 | } |
40 | for (i = 0; i < bd->props.brightness; i++) { | ||
41 | status = acpi_evaluate_object(ec_handle, "_Q64", NULL, NULL); | ||
42 | if (ACPI_FAILURE(status)) | ||
43 | return -EIO; | ||
44 | } | ||
44 | 45 | ||
45 | return 0; | 46 | return 0; |
46 | } | 47 | } |
47 | 48 | ||
48 | static int samsungq10_bl_get_intensity(struct backlight_device *bd) | 49 | static int samsungq10_bl_get_intensity(struct backlight_device *bd) |
49 | { | 50 | { |
50 | return samsungq10_bl_brightness; | 51 | return bd->props.brightness; |
51 | } | 52 | } |
52 | 53 | ||
53 | static const struct backlight_ops samsungq10_bl_ops = { | 54 | static const struct backlight_ops samsungq10_bl_ops = { |
@@ -55,28 +56,6 @@ static const struct backlight_ops samsungq10_bl_ops = { | |||
55 | .update_status = samsungq10_bl_set_intensity, | 56 | .update_status = samsungq10_bl_set_intensity, |
56 | }; | 57 | }; |
57 | 58 | ||
58 | #ifdef CONFIG_PM_SLEEP | ||
59 | static int samsungq10_suspend(struct device *dev) | ||
60 | { | ||
61 | return 0; | ||
62 | } | ||
63 | |||
64 | static int samsungq10_resume(struct device *dev) | ||
65 | { | ||
66 | |||
67 | struct backlight_device *bd = dev_get_drvdata(dev); | ||
68 | |||
69 | samsungq10_bl_set_intensity(bd); | ||
70 | return 0; | ||
71 | } | ||
72 | #else | ||
73 | #define samsungq10_suspend NULL | ||
74 | #define samsungq10_resume NULL | ||
75 | #endif | ||
76 | |||
77 | static SIMPLE_DEV_PM_OPS(samsungq10_pm_ops, | ||
78 | samsungq10_suspend, samsungq10_resume); | ||
79 | |||
80 | static int samsungq10_probe(struct platform_device *pdev) | 59 | static int samsungq10_probe(struct platform_device *pdev) |
81 | { | 60 | { |
82 | 61 | ||
@@ -93,9 +72,6 @@ static int samsungq10_probe(struct platform_device *pdev) | |||
93 | 72 | ||
94 | platform_set_drvdata(pdev, bd); | 73 | platform_set_drvdata(pdev, bd); |
95 | 74 | ||
96 | bd->props.brightness = SAMSUNGQ10_BL_DEFAULT_INTENSITY; | ||
97 | samsungq10_bl_set_intensity(bd); | ||
98 | |||
99 | return 0; | 75 | return 0; |
100 | } | 76 | } |
101 | 77 | ||
@@ -104,9 +80,6 @@ static int samsungq10_remove(struct platform_device *pdev) | |||
104 | 80 | ||
105 | struct backlight_device *bd = platform_get_drvdata(pdev); | 81 | struct backlight_device *bd = platform_get_drvdata(pdev); |
106 | 82 | ||
107 | bd->props.brightness = SAMSUNGQ10_BL_DEFAULT_INTENSITY; | ||
108 | samsungq10_bl_set_intensity(bd); | ||
109 | |||
110 | backlight_device_unregister(bd); | 83 | backlight_device_unregister(bd); |
111 | 84 | ||
112 | return 0; | 85 | return 0; |
@@ -116,7 +89,6 @@ static struct platform_driver samsungq10_driver = { | |||
116 | .driver = { | 89 | .driver = { |
117 | .name = KBUILD_MODNAME, | 90 | .name = KBUILD_MODNAME, |
118 | .owner = THIS_MODULE, | 91 | .owner = THIS_MODULE, |
119 | .pm = &samsungq10_pm_ops, | ||
120 | }, | 92 | }, |
121 | .probe = samsungq10_probe, | 93 | .probe = samsungq10_probe, |
122 | .remove = samsungq10_remove, | 94 | .remove = samsungq10_remove, |
@@ -172,6 +144,11 @@ static int __init samsungq10_init(void) | |||
172 | if (!force && !dmi_check_system(samsungq10_dmi_table)) | 144 | if (!force && !dmi_check_system(samsungq10_dmi_table)) |
173 | return -ENODEV; | 145 | return -ENODEV; |
174 | 146 | ||
147 | ec_handle = ec_get_handle(); | ||
148 | |||
149 | if (!ec_handle) | ||
150 | return -ENODEV; | ||
151 | |||
175 | samsungq10_device = platform_create_bundle(&samsungq10_driver, | 152 | samsungq10_device = platform_create_bundle(&samsungq10_driver, |
176 | samsungq10_probe, | 153 | samsungq10_probe, |
177 | NULL, 0, NULL, 0); | 154 | NULL, 0, NULL, 0); |
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index be67e5e28d18..03ca6c139f1a 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c | |||
@@ -369,7 +369,7 @@ struct tpacpi_led_classdev { | |||
369 | struct led_classdev led_classdev; | 369 | struct led_classdev led_classdev; |
370 | struct work_struct work; | 370 | struct work_struct work; |
371 | enum led_status_t new_state; | 371 | enum led_status_t new_state; |
372 | unsigned int led; | 372 | int led; |
373 | }; | 373 | }; |
374 | 374 | ||
375 | /* brightness level capabilities */ | 375 | /* brightness level capabilities */ |
@@ -5296,6 +5296,16 @@ static int __init led_init(struct ibm_init_struct *iibm) | |||
5296 | 5296 | ||
5297 | led_supported = led_init_detect_mode(); | 5297 | led_supported = led_init_detect_mode(); |
5298 | 5298 | ||
5299 | if (led_supported != TPACPI_LED_NONE) { | ||
5300 | useful_leds = tpacpi_check_quirks(led_useful_qtable, | ||
5301 | ARRAY_SIZE(led_useful_qtable)); | ||
5302 | |||
5303 | if (!useful_leds) { | ||
5304 | led_handle = NULL; | ||
5305 | led_supported = TPACPI_LED_NONE; | ||
5306 | } | ||
5307 | } | ||
5308 | |||
5299 | vdbg_printk(TPACPI_DBG_INIT, "LED commands are %s, mode %d\n", | 5309 | vdbg_printk(TPACPI_DBG_INIT, "LED commands are %s, mode %d\n", |
5300 | str_supported(led_supported), led_supported); | 5310 | str_supported(led_supported), led_supported); |
5301 | 5311 | ||
@@ -5309,10 +5319,9 @@ static int __init led_init(struct ibm_init_struct *iibm) | |||
5309 | return -ENOMEM; | 5319 | return -ENOMEM; |
5310 | } | 5320 | } |
5311 | 5321 | ||
5312 | useful_leds = tpacpi_check_quirks(led_useful_qtable, | ||
5313 | ARRAY_SIZE(led_useful_qtable)); | ||
5314 | |||
5315 | for (i = 0; i < TPACPI_LED_NUMLEDS; i++) { | 5322 | for (i = 0; i < TPACPI_LED_NUMLEDS; i++) { |
5323 | tpacpi_leds[i].led = -1; | ||
5324 | |||
5316 | if (!tpacpi_is_led_restricted(i) && | 5325 | if (!tpacpi_is_led_restricted(i) && |
5317 | test_bit(i, &useful_leds)) { | 5326 | test_bit(i, &useful_leds)) { |
5318 | rc = tpacpi_init_led(i); | 5327 | rc = tpacpi_init_led(i); |
@@ -5370,9 +5379,13 @@ static int led_write(char *buf) | |||
5370 | return -ENODEV; | 5379 | return -ENODEV; |
5371 | 5380 | ||
5372 | while ((cmd = next_cmd(&buf))) { | 5381 | while ((cmd = next_cmd(&buf))) { |
5373 | if (sscanf(cmd, "%d", &led) != 1 || led < 0 || led > 15) | 5382 | if (sscanf(cmd, "%d", &led) != 1) |
5374 | return -EINVAL; | 5383 | return -EINVAL; |
5375 | 5384 | ||
5385 | if (led < 0 || led > (TPACPI_LED_NUMLEDS - 1) || | ||
5386 | tpacpi_leds[led].led < 0) | ||
5387 | return -ENODEV; | ||
5388 | |||
5376 | if (strstr(cmd, "off")) { | 5389 | if (strstr(cmd, "off")) { |
5377 | s = TPACPI_LED_OFF; | 5390 | s = TPACPI_LED_OFF; |
5378 | } else if (strstr(cmd, "on")) { | 5391 | } else if (strstr(cmd, "on")) { |
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/x86/wmi.c index 6e02c953d888..601ea9512242 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/x86/wmi.c | |||
@@ -780,7 +780,7 @@ static bool guid_already_parsed(const char *guid_string) | |||
780 | /* | 780 | /* |
781 | * Parse the _WDG method for the GUID data blocks | 781 | * Parse the _WDG method for the GUID data blocks |
782 | */ | 782 | */ |
783 | static acpi_status parse_wdg(acpi_handle handle) | 783 | static int parse_wdg(acpi_handle handle) |
784 | { | 784 | { |
785 | struct acpi_buffer out = {ACPI_ALLOCATE_BUFFER, NULL}; | 785 | struct acpi_buffer out = {ACPI_ALLOCATE_BUFFER, NULL}; |
786 | union acpi_object *obj; | 786 | union acpi_object *obj; |
@@ -812,7 +812,7 @@ static acpi_status parse_wdg(acpi_handle handle) | |||
812 | 812 | ||
813 | wblock = kzalloc(sizeof(struct wmi_block), GFP_KERNEL); | 813 | wblock = kzalloc(sizeof(struct wmi_block), GFP_KERNEL); |
814 | if (!wblock) | 814 | if (!wblock) |
815 | return AE_NO_MEMORY; | 815 | return -ENOMEM; |
816 | 816 | ||
817 | wblock->handle = handle; | 817 | wblock->handle = handle; |
818 | wblock->gblock = gblock[i]; | 818 | wblock->gblock = gblock[i]; |
diff --git a/drivers/scsi/aic7xxx/aic7xxx_pci.c b/drivers/scsi/aic7xxx/aic7xxx_pci.c index 6917b4f5ac9e..22d5a949ec83 100644 --- a/drivers/scsi/aic7xxx/aic7xxx_pci.c +++ b/drivers/scsi/aic7xxx/aic7xxx_pci.c | |||
@@ -692,7 +692,7 @@ ahc_find_pci_device(ahc_dev_softc_t pci) | |||
692 | * ID as valid. | 692 | * ID as valid. |
693 | */ | 693 | */ |
694 | if (ahc_get_pci_function(pci) > 0 | 694 | if (ahc_get_pci_function(pci) > 0 |
695 | && ahc_9005_subdevinfo_valid(vendor, device, subvendor, subdevice) | 695 | && ahc_9005_subdevinfo_valid(device, vendor, subdevice, subvendor) |
696 | && SUBID_9005_MFUNCENB(subdevice) == 0) | 696 | && SUBID_9005_MFUNCENB(subdevice) == 0) |
697 | return (NULL); | 697 | return (NULL); |
698 | 698 | ||
diff --git a/drivers/scsi/esas2r/esas2r_flash.c b/drivers/scsi/esas2r/esas2r_flash.c index 8582929b1fef..2ec3c23275b8 100644 --- a/drivers/scsi/esas2r/esas2r_flash.c +++ b/drivers/scsi/esas2r/esas2r_flash.c | |||
@@ -860,8 +860,13 @@ bool esas2r_process_fs_ioctl(struct esas2r_adapter *a, | |||
860 | return false; | 860 | return false; |
861 | } | 861 | } |
862 | 862 | ||
863 | if (fsc->command >= cmdcnt) { | ||
864 | fs->status = ATTO_STS_INV_FUNC; | ||
865 | return false; | ||
866 | } | ||
867 | |||
863 | func = cmd_to_fls_func[fsc->command]; | 868 | func = cmd_to_fls_func[fsc->command]; |
864 | if (fsc->command >= cmdcnt || func == 0xFF) { | 869 | if (func == 0xFF) { |
865 | fs->status = ATTO_STS_INV_FUNC; | 870 | fs->status = ATTO_STS_INV_FUNC; |
866 | return false; | 871 | return false; |
867 | } | 872 | } |
@@ -1355,7 +1360,7 @@ void esas2r_nvram_set_defaults(struct esas2r_adapter *a) | |||
1355 | u32 time = jiffies_to_msecs(jiffies); | 1360 | u32 time = jiffies_to_msecs(jiffies); |
1356 | 1361 | ||
1357 | esas2r_lock_clear_flags(&a->flags, AF_NVR_VALID); | 1362 | esas2r_lock_clear_flags(&a->flags, AF_NVR_VALID); |
1358 | memcpy(n, &default_sas_nvram, sizeof(struct esas2r_sas_nvram)); | 1363 | *n = default_sas_nvram; |
1359 | n->sas_addr[3] |= 0x0F; | 1364 | n->sas_addr[3] |= 0x0F; |
1360 | n->sas_addr[4] = HIBYTE(LOWORD(time)); | 1365 | n->sas_addr[4] = HIBYTE(LOWORD(time)); |
1361 | n->sas_addr[5] = LOBYTE(LOWORD(time)); | 1366 | n->sas_addr[5] = LOBYTE(LOWORD(time)); |
@@ -1373,7 +1378,7 @@ void esas2r_nvram_get_defaults(struct esas2r_adapter *a, | |||
1373 | * address out first. | 1378 | * address out first. |
1374 | */ | 1379 | */ |
1375 | memcpy(&sas_addr[0], a->nvram->sas_addr, 8); | 1380 | memcpy(&sas_addr[0], a->nvram->sas_addr, 8); |
1376 | memcpy(nvram, &default_sas_nvram, sizeof(struct esas2r_sas_nvram)); | 1381 | *nvram = default_sas_nvram; |
1377 | memcpy(&nvram->sas_addr[0], &sas_addr[0], 8); | 1382 | memcpy(&nvram->sas_addr[0], &sas_addr[0], 8); |
1378 | } | 1383 | } |
1379 | 1384 | ||
diff --git a/drivers/scsi/esas2r/esas2r_init.c b/drivers/scsi/esas2r/esas2r_init.c index 3a798e7d5c56..da1869df2408 100644 --- a/drivers/scsi/esas2r/esas2r_init.c +++ b/drivers/scsi/esas2r/esas2r_init.c | |||
@@ -665,7 +665,7 @@ void esas2r_kill_adapter(int i) | |||
665 | 665 | ||
666 | int esas2r_cleanup(struct Scsi_Host *host) | 666 | int esas2r_cleanup(struct Scsi_Host *host) |
667 | { | 667 | { |
668 | struct esas2r_adapter *a = (struct esas2r_adapter *)host->hostdata; | 668 | struct esas2r_adapter *a; |
669 | int index; | 669 | int index; |
670 | 670 | ||
671 | if (host == NULL) { | 671 | if (host == NULL) { |
@@ -678,6 +678,7 @@ int esas2r_cleanup(struct Scsi_Host *host) | |||
678 | } | 678 | } |
679 | 679 | ||
680 | esas2r_debug("esas2r_cleanup called for host %p", host); | 680 | esas2r_debug("esas2r_cleanup called for host %p", host); |
681 | a = (struct esas2r_adapter *)host->hostdata; | ||
681 | index = a->index; | 682 | index = a->index; |
682 | esas2r_kill_adapter(index); | 683 | esas2r_kill_adapter(index); |
683 | return index; | 684 | return index; |
@@ -808,7 +809,7 @@ static void esas2r_init_pci_cfg_space(struct esas2r_adapter *a) | |||
808 | int pcie_cap_reg; | 809 | int pcie_cap_reg; |
809 | 810 | ||
810 | pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP); | 811 | pcie_cap_reg = pci_find_capability(a->pcid, PCI_CAP_ID_EXP); |
811 | if (0xffff && pcie_cap_reg) { | 812 | if (0xffff & pcie_cap_reg) { |
812 | u16 devcontrol; | 813 | u16 devcontrol; |
813 | 814 | ||
814 | pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL, | 815 | pci_read_config_word(a->pcid, pcie_cap_reg + PCI_EXP_DEVCTL, |
@@ -1550,8 +1551,7 @@ void esas2r_reset_chip(struct esas2r_adapter *a) | |||
1550 | * to not overwrite a previous crash that was saved. | 1551 | * to not overwrite a previous crash that was saved. |
1551 | */ | 1552 | */ |
1552 | if ((a->flags2 & AF2_COREDUMP_AVAIL) | 1553 | if ((a->flags2 & AF2_COREDUMP_AVAIL) |
1553 | && !(a->flags2 & AF2_COREDUMP_SAVED) | 1554 | && !(a->flags2 & AF2_COREDUMP_SAVED)) { |
1554 | && a->fw_coredump_buff) { | ||
1555 | esas2r_read_mem_block(a, | 1555 | esas2r_read_mem_block(a, |
1556 | a->fw_coredump_buff, | 1556 | a->fw_coredump_buff, |
1557 | MW_DATA_ADDR_SRAM + 0x80000, | 1557 | MW_DATA_ADDR_SRAM + 0x80000, |
diff --git a/drivers/scsi/esas2r/esas2r_ioctl.c b/drivers/scsi/esas2r/esas2r_ioctl.c index f3d0cb885972..e5b09027e066 100644 --- a/drivers/scsi/esas2r/esas2r_ioctl.c +++ b/drivers/scsi/esas2r/esas2r_ioctl.c | |||
@@ -415,7 +415,7 @@ static int csmi_ioctl_callback(struct esas2r_adapter *a, | |||
415 | lun = tm->lun; | 415 | lun = tm->lun; |
416 | } | 416 | } |
417 | 417 | ||
418 | if (path > 0 || tid > ESAS2R_MAX_ID) { | 418 | if (path > 0) { |
419 | rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32( | 419 | rq->func_rsp.ioctl_rsp.csmi.csmi_status = cpu_to_le32( |
420 | CSMI_STS_INV_PARAM); | 420 | CSMI_STS_INV_PARAM); |
421 | return false; | 421 | return false; |
diff --git a/drivers/scsi/esas2r/esas2r_vda.c b/drivers/scsi/esas2r/esas2r_vda.c index f8ec6d636846..fd1392879647 100644 --- a/drivers/scsi/esas2r/esas2r_vda.c +++ b/drivers/scsi/esas2r/esas2r_vda.c | |||
@@ -302,6 +302,7 @@ static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a, | |||
302 | if (vi->cmd.cfg.cfg_func == VDA_CFG_GET_INIT) { | 302 | if (vi->cmd.cfg.cfg_func == VDA_CFG_GET_INIT) { |
303 | struct atto_ioctl_vda_cfg_cmd *cfg = &vi->cmd.cfg; | 303 | struct atto_ioctl_vda_cfg_cmd *cfg = &vi->cmd.cfg; |
304 | struct atto_vda_cfg_rsp *rsp = &rq->func_rsp.cfg_rsp; | 304 | struct atto_vda_cfg_rsp *rsp = &rq->func_rsp.cfg_rsp; |
305 | char buf[sizeof(cfg->data.init.fw_release) + 1]; | ||
305 | 306 | ||
306 | cfg->data_length = | 307 | cfg->data_length = |
307 | cpu_to_le32(sizeof(struct atto_vda_cfg_init)); | 308 | cpu_to_le32(sizeof(struct atto_vda_cfg_init)); |
@@ -309,11 +310,13 @@ static void esas2r_complete_vda_ioctl(struct esas2r_adapter *a, | |||
309 | le32_to_cpu(rsp->vda_version); | 310 | le32_to_cpu(rsp->vda_version); |
310 | cfg->data.init.fw_build = rsp->fw_build; | 311 | cfg->data.init.fw_build = rsp->fw_build; |
311 | 312 | ||
312 | sprintf((char *)&cfg->data.init.fw_release, | 313 | snprintf(buf, sizeof(buf), "%1d.%02d", |
313 | "%1d.%02d", | ||
314 | (int)LOBYTE(le16_to_cpu(rsp->fw_release)), | 314 | (int)LOBYTE(le16_to_cpu(rsp->fw_release)), |
315 | (int)HIBYTE(le16_to_cpu(rsp->fw_release))); | 315 | (int)HIBYTE(le16_to_cpu(rsp->fw_release))); |
316 | 316 | ||
317 | memcpy(&cfg->data.init.fw_release, buf, | ||
318 | sizeof(cfg->data.init.fw_release)); | ||
319 | |||
317 | if (LOWORD(LOBYTE(cfg->data.init.fw_build)) == 'A') | 320 | if (LOWORD(LOBYTE(cfg->data.init.fw_build)) == 'A') |
318 | cfg->data.init.fw_version = | 321 | cfg->data.init.fw_version = |
319 | cfg->data.init.fw_build; | 322 | cfg->data.init.fw_build; |
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h index c18c68150e9f..e4dd3d7cd236 100644 --- a/drivers/scsi/fnic/fnic.h +++ b/drivers/scsi/fnic/fnic.h | |||
@@ -43,6 +43,8 @@ | |||
43 | #define DFX DRV_NAME "%d: " | 43 | #define DFX DRV_NAME "%d: " |
44 | 44 | ||
45 | #define DESC_CLEAN_LOW_WATERMARK 8 | 45 | #define DESC_CLEAN_LOW_WATERMARK 8 |
46 | #define FNIC_UCSM_DFLT_THROTTLE_CNT_BLD 16 /* UCSM default throttle count */ | ||
47 | #define FNIC_MIN_IO_REQ 256 /* Min IO throttle count */ | ||
46 | #define FNIC_MAX_IO_REQ 2048 /* scsi_cmnd tag map entries */ | 48 | #define FNIC_MAX_IO_REQ 2048 /* scsi_cmnd tag map entries */ |
47 | #define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */ | 49 | #define FNIC_IO_LOCKS 64 /* IO locks: power of 2 */ |
48 | #define FNIC_DFLT_QUEUE_DEPTH 32 | 50 | #define FNIC_DFLT_QUEUE_DEPTH 32 |
@@ -154,6 +156,9 @@ do { \ | |||
154 | FNIC_CHECK_LOGGING(FNIC_ISR_LOGGING, \ | 156 | FNIC_CHECK_LOGGING(FNIC_ISR_LOGGING, \ |
155 | shost_printk(kern_level, host, fmt, ##args);) | 157 | shost_printk(kern_level, host, fmt, ##args);) |
156 | 158 | ||
159 | #define FNIC_MAIN_NOTE(kern_level, host, fmt, args...) \ | ||
160 | shost_printk(kern_level, host, fmt, ##args) | ||
161 | |||
157 | extern const char *fnic_state_str[]; | 162 | extern const char *fnic_state_str[]; |
158 | 163 | ||
159 | enum fnic_intx_intr_index { | 164 | enum fnic_intx_intr_index { |
@@ -215,10 +220,12 @@ struct fnic { | |||
215 | 220 | ||
216 | struct vnic_stats *stats; | 221 | struct vnic_stats *stats; |
217 | unsigned long stats_time; /* time of stats update */ | 222 | unsigned long stats_time; /* time of stats update */ |
223 | unsigned long stats_reset_time; /* time of stats reset */ | ||
218 | struct vnic_nic_cfg *nic_cfg; | 224 | struct vnic_nic_cfg *nic_cfg; |
219 | char name[IFNAMSIZ]; | 225 | char name[IFNAMSIZ]; |
220 | struct timer_list notify_timer; /* used for MSI interrupts */ | 226 | struct timer_list notify_timer; /* used for MSI interrupts */ |
221 | 227 | ||
228 | unsigned int fnic_max_tag_id; | ||
222 | unsigned int err_intr_offset; | 229 | unsigned int err_intr_offset; |
223 | unsigned int link_intr_offset; | 230 | unsigned int link_intr_offset; |
224 | 231 | ||
@@ -359,4 +366,5 @@ fnic_chk_state_flags_locked(struct fnic *fnic, unsigned long st_flags) | |||
359 | return ((fnic->state_flags & st_flags) == st_flags); | 366 | return ((fnic->state_flags & st_flags) == st_flags); |
360 | } | 367 | } |
361 | void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long); | 368 | void __fnic_set_state_flags(struct fnic *, unsigned long, unsigned long); |
369 | void fnic_dump_fchost_stats(struct Scsi_Host *, struct fc_host_statistics *); | ||
362 | #endif /* _FNIC_H_ */ | 370 | #endif /* _FNIC_H_ */ |
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c index 42e15ee6e1bb..bbf81ea3a252 100644 --- a/drivers/scsi/fnic/fnic_main.c +++ b/drivers/scsi/fnic/fnic_main.c | |||
@@ -74,6 +74,10 @@ module_param(fnic_trace_max_pages, uint, S_IRUGO|S_IWUSR); | |||
74 | MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages " | 74 | MODULE_PARM_DESC(fnic_trace_max_pages, "Total allocated memory pages " |
75 | "for fnic trace buffer"); | 75 | "for fnic trace buffer"); |
76 | 76 | ||
77 | static unsigned int fnic_max_qdepth = FNIC_DFLT_QUEUE_DEPTH; | ||
78 | module_param(fnic_max_qdepth, uint, S_IRUGO|S_IWUSR); | ||
79 | MODULE_PARM_DESC(fnic_max_qdepth, "Queue depth to report for each LUN"); | ||
80 | |||
77 | static struct libfc_function_template fnic_transport_template = { | 81 | static struct libfc_function_template fnic_transport_template = { |
78 | .frame_send = fnic_send, | 82 | .frame_send = fnic_send, |
79 | .lport_set_port_id = fnic_set_port_id, | 83 | .lport_set_port_id = fnic_set_port_id, |
@@ -91,7 +95,7 @@ static int fnic_slave_alloc(struct scsi_device *sdev) | |||
91 | if (!rport || fc_remote_port_chkready(rport)) | 95 | if (!rport || fc_remote_port_chkready(rport)) |
92 | return -ENXIO; | 96 | return -ENXIO; |
93 | 97 | ||
94 | scsi_activate_tcq(sdev, FNIC_DFLT_QUEUE_DEPTH); | 98 | scsi_activate_tcq(sdev, fnic_max_qdepth); |
95 | return 0; | 99 | return 0; |
96 | } | 100 | } |
97 | 101 | ||
@@ -126,6 +130,7 @@ fnic_set_rport_dev_loss_tmo(struct fc_rport *rport, u32 timeout) | |||
126 | static void fnic_get_host_speed(struct Scsi_Host *shost); | 130 | static void fnic_get_host_speed(struct Scsi_Host *shost); |
127 | static struct scsi_transport_template *fnic_fc_transport; | 131 | static struct scsi_transport_template *fnic_fc_transport; |
128 | static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *); | 132 | static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *); |
133 | static void fnic_reset_host_stats(struct Scsi_Host *); | ||
129 | 134 | ||
130 | static struct fc_function_template fnic_fc_functions = { | 135 | static struct fc_function_template fnic_fc_functions = { |
131 | 136 | ||
@@ -153,6 +158,7 @@ static struct fc_function_template fnic_fc_functions = { | |||
153 | .set_rport_dev_loss_tmo = fnic_set_rport_dev_loss_tmo, | 158 | .set_rport_dev_loss_tmo = fnic_set_rport_dev_loss_tmo, |
154 | .issue_fc_host_lip = fnic_reset, | 159 | .issue_fc_host_lip = fnic_reset, |
155 | .get_fc_host_stats = fnic_get_stats, | 160 | .get_fc_host_stats = fnic_get_stats, |
161 | .reset_fc_host_stats = fnic_reset_host_stats, | ||
156 | .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), | 162 | .dd_fcrport_size = sizeof(struct fc_rport_libfc_priv), |
157 | .terminate_rport_io = fnic_terminate_rport_io, | 163 | .terminate_rport_io = fnic_terminate_rport_io, |
158 | .bsg_request = fc_lport_bsg_request, | 164 | .bsg_request = fc_lport_bsg_request, |
@@ -206,13 +212,116 @@ static struct fc_host_statistics *fnic_get_stats(struct Scsi_Host *host) | |||
206 | stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors; | 212 | stats->error_frames = vs->tx.tx_errors + vs->rx.rx_errors; |
207 | stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop; | 213 | stats->dumped_frames = vs->tx.tx_drops + vs->rx.rx_drop; |
208 | stats->invalid_crc_count = vs->rx.rx_crc_errors; | 214 | stats->invalid_crc_count = vs->rx.rx_crc_errors; |
209 | stats->seconds_since_last_reset = (jiffies - lp->boot_time) / HZ; | 215 | stats->seconds_since_last_reset = |
216 | (jiffies - fnic->stats_reset_time) / HZ; | ||
210 | stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000); | 217 | stats->fcp_input_megabytes = div_u64(fnic->fcp_input_bytes, 1000000); |
211 | stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000); | 218 | stats->fcp_output_megabytes = div_u64(fnic->fcp_output_bytes, 1000000); |
212 | 219 | ||
213 | return stats; | 220 | return stats; |
214 | } | 221 | } |
215 | 222 | ||
223 | /* | ||
224 | * fnic_dump_fchost_stats | ||
225 | * note : dumps fc_statistics into system logs | ||
226 | */ | ||
227 | void fnic_dump_fchost_stats(struct Scsi_Host *host, | ||
228 | struct fc_host_statistics *stats) | ||
229 | { | ||
230 | FNIC_MAIN_NOTE(KERN_NOTICE, host, | ||
231 | "fnic: seconds since last reset = %llu\n", | ||
232 | stats->seconds_since_last_reset); | ||
233 | FNIC_MAIN_NOTE(KERN_NOTICE, host, | ||
234 | "fnic: tx frames = %llu\n", | ||
235 | stats->tx_frames); | ||
236 | FNIC_MAIN_NOTE(KERN_NOTICE, host, | ||
237 | "fnic: tx words = %llu\n", | ||
238 | stats->tx_words); | ||
239 | FNIC_MAIN_NOTE(KERN_NOTICE, host, | ||
240 | "fnic: rx frames = %llu\n", | ||
241 | stats->rx_frames); | ||
242 | FNIC_MAIN_NOTE(KERN_NOTICE, host, | ||
243 | "fnic: rx words = %llu\n", | ||
244 | stats->rx_words); | ||
245 | FNIC_MAIN_NOTE(KERN_NOTICE, host, | ||
246 | "fnic: lip count = %llu\n", | ||
247 | stats->lip_count); | ||
248 | FNIC_MAIN_NOTE(KERN_NOTICE, host, | ||
249 | "fnic: nos count = %llu\n", | ||
250 | stats->nos_count); | ||
251 | FNIC_MAIN_NOTE(KERN_NOTICE, host, | ||
252 | "fnic: error frames = %llu\n", | ||
253 | stats->error_frames); | ||
254 | FNIC_MAIN_NOTE(KERN_NOTICE, host, | ||
255 | "fnic: dumped frames = %llu\n", | ||
256 | stats->dumped_frames); | ||
257 | FNIC_MAIN_NOTE(KERN_NOTICE, host, | ||
258 | "fnic: link failure count = %llu\n", | ||
259 | stats->link_failure_count); | ||
260 | FNIC_MAIN_NOTE(KERN_NOTICE, host, | ||
261 | "fnic: loss of sync count = %llu\n", | ||
262 | stats->loss_of_sync_count); | ||
263 | FNIC_MAIN_NOTE(KERN_NOTICE, host, | ||
264 | "fnic: loss of signal count = %llu\n", | ||
265 | stats->loss_of_signal_count); | ||
266 | FNIC_MAIN_NOTE(KERN_NOTICE, host, | ||
267 | "fnic: prim seq protocol err count = %llu\n", | ||
268 | stats->prim_seq_protocol_err_count); | ||
269 | FNIC_MAIN_NOTE(KERN_NOTICE, host, | ||
270 | "fnic: invalid tx word count= %llu\n", | ||
271 | stats->invalid_tx_word_count); | ||
272 | FNIC_MAIN_NOTE(KERN_NOTICE, host, | ||
273 | "fnic: invalid crc count = %llu\n", | ||
274 | stats->invalid_crc_count); | ||
275 | FNIC_MAIN_NOTE(KERN_NOTICE, host, | ||
276 | "fnic: fcp input requests = %llu\n", | ||
277 | stats->fcp_input_requests); | ||
278 | FNIC_MAIN_NOTE(KERN_NOTICE, host, | ||
279 | "fnic: fcp output requests = %llu\n", | ||
280 | stats->fcp_output_requests); | ||
281 | FNIC_MAIN_NOTE(KERN_NOTICE, host, | ||
282 | "fnic: fcp control requests = %llu\n", | ||
283 | stats->fcp_control_requests); | ||
284 | FNIC_MAIN_NOTE(KERN_NOTICE, host, | ||
285 | "fnic: fcp input megabytes = %llu\n", | ||
286 | stats->fcp_input_megabytes); | ||
287 | FNIC_MAIN_NOTE(KERN_NOTICE, host, | ||
288 | "fnic: fcp output megabytes = %llu\n", | ||
289 | stats->fcp_output_megabytes); | ||
290 | return; | ||
291 | } | ||
292 | |||
293 | /* | ||
294 | * fnic_reset_host_stats : clears host stats | ||
295 | * note : called when reset_statistics set under sysfs dir | ||
296 | */ | ||
297 | static void fnic_reset_host_stats(struct Scsi_Host *host) | ||
298 | { | ||
299 | int ret; | ||
300 | struct fc_lport *lp = shost_priv(host); | ||
301 | struct fnic *fnic = lport_priv(lp); | ||
302 | struct fc_host_statistics *stats; | ||
303 | unsigned long flags; | ||
304 | |||
305 | /* dump current stats, before clearing them */ | ||
306 | stats = fnic_get_stats(host); | ||
307 | fnic_dump_fchost_stats(host, stats); | ||
308 | |||
309 | spin_lock_irqsave(&fnic->fnic_lock, flags); | ||
310 | ret = vnic_dev_stats_clear(fnic->vdev); | ||
311 | spin_unlock_irqrestore(&fnic->fnic_lock, flags); | ||
312 | |||
313 | if (ret) { | ||
314 | FNIC_MAIN_DBG(KERN_DEBUG, fnic->lport->host, | ||
315 | "fnic: Reset vnic stats failed" | ||
316 | " 0x%x", ret); | ||
317 | return; | ||
318 | } | ||
319 | fnic->stats_reset_time = jiffies; | ||
320 | memset(stats, 0, sizeof(*stats)); | ||
321 | |||
322 | return; | ||
323 | } | ||
324 | |||
216 | void fnic_log_q_error(struct fnic *fnic) | 325 | void fnic_log_q_error(struct fnic *fnic) |
217 | { | 326 | { |
218 | unsigned int i; | 327 | unsigned int i; |
@@ -447,13 +556,6 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
447 | 556 | ||
448 | host->transportt = fnic_fc_transport; | 557 | host->transportt = fnic_fc_transport; |
449 | 558 | ||
450 | err = scsi_init_shared_tag_map(host, FNIC_MAX_IO_REQ); | ||
451 | if (err) { | ||
452 | shost_printk(KERN_ERR, fnic->lport->host, | ||
453 | "Unable to alloc shared tag map\n"); | ||
454 | goto err_out_free_hba; | ||
455 | } | ||
456 | |||
457 | /* Setup PCI resources */ | 559 | /* Setup PCI resources */ |
458 | pci_set_drvdata(pdev, fnic); | 560 | pci_set_drvdata(pdev, fnic); |
459 | 561 | ||
@@ -476,10 +578,10 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
476 | pci_set_master(pdev); | 578 | pci_set_master(pdev); |
477 | 579 | ||
478 | /* Query PCI controller on system for DMA addressing | 580 | /* Query PCI controller on system for DMA addressing |
479 | * limitation for the device. Try 40-bit first, and | 581 | * limitation for the device. Try 64-bit first, and |
480 | * fail to 32-bit. | 582 | * fail to 32-bit. |
481 | */ | 583 | */ |
482 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); | 584 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); |
483 | if (err) { | 585 | if (err) { |
484 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | 586 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
485 | if (err) { | 587 | if (err) { |
@@ -496,10 +598,10 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
496 | goto err_out_release_regions; | 598 | goto err_out_release_regions; |
497 | } | 599 | } |
498 | } else { | 600 | } else { |
499 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); | 601 | err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); |
500 | if (err) { | 602 | if (err) { |
501 | shost_printk(KERN_ERR, fnic->lport->host, | 603 | shost_printk(KERN_ERR, fnic->lport->host, |
502 | "Unable to obtain 40-bit DMA " | 604 | "Unable to obtain 64-bit DMA " |
503 | "for consistent allocations, aborting.\n"); | 605 | "for consistent allocations, aborting.\n"); |
504 | goto err_out_release_regions; | 606 | goto err_out_release_regions; |
505 | } | 607 | } |
@@ -566,6 +668,22 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
566 | "aborting.\n"); | 668 | "aborting.\n"); |
567 | goto err_out_dev_close; | 669 | goto err_out_dev_close; |
568 | } | 670 | } |
671 | |||
672 | /* Configure Maximum Outstanding IO reqs*/ | ||
673 | if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD) { | ||
674 | host->can_queue = min_t(u32, FNIC_MAX_IO_REQ, | ||
675 | max_t(u32, FNIC_MIN_IO_REQ, | ||
676 | fnic->config.io_throttle_count)); | ||
677 | } | ||
678 | fnic->fnic_max_tag_id = host->can_queue; | ||
679 | |||
680 | err = scsi_init_shared_tag_map(host, fnic->fnic_max_tag_id); | ||
681 | if (err) { | ||
682 | shost_printk(KERN_ERR, fnic->lport->host, | ||
683 | "Unable to alloc shared tag map\n"); | ||
684 | goto err_out_dev_close; | ||
685 | } | ||
686 | |||
569 | host->max_lun = fnic->config.luns_per_tgt; | 687 | host->max_lun = fnic->config.luns_per_tgt; |
570 | host->max_id = FNIC_MAX_FCP_TARGET; | 688 | host->max_id = FNIC_MAX_FCP_TARGET; |
571 | host->max_cmd_len = FCOE_MAX_CMD_LEN; | 689 | host->max_cmd_len = FCOE_MAX_CMD_LEN; |
@@ -719,6 +837,7 @@ static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
719 | } | 837 | } |
720 | 838 | ||
721 | fc_lport_init_stats(lp); | 839 | fc_lport_init_stats(lp); |
840 | fnic->stats_reset_time = jiffies; | ||
722 | 841 | ||
723 | fc_lport_config(lp); | 842 | fc_lport_config(lp); |
724 | 843 | ||
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index a97e6e584f8c..d014aae19134 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c | |||
@@ -111,6 +111,12 @@ static inline spinlock_t *fnic_io_lock_hash(struct fnic *fnic, | |||
111 | return &fnic->io_req_lock[hash]; | 111 | return &fnic->io_req_lock[hash]; |
112 | } | 112 | } |
113 | 113 | ||
114 | static inline spinlock_t *fnic_io_lock_tag(struct fnic *fnic, | ||
115 | int tag) | ||
116 | { | ||
117 | return &fnic->io_req_lock[tag & (FNIC_IO_LOCKS - 1)]; | ||
118 | } | ||
119 | |||
114 | /* | 120 | /* |
115 | * Unmap the data buffer and sense buffer for an io_req, | 121 | * Unmap the data buffer and sense buffer for an io_req, |
116 | * also unmap and free the device-private scatter/gather list. | 122 | * also unmap and free the device-private scatter/gather list. |
@@ -730,7 +736,7 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, | |||
730 | fcpio_tag_id_dec(&tag, &id); | 736 | fcpio_tag_id_dec(&tag, &id); |
731 | icmnd_cmpl = &desc->u.icmnd_cmpl; | 737 | icmnd_cmpl = &desc->u.icmnd_cmpl; |
732 | 738 | ||
733 | if (id >= FNIC_MAX_IO_REQ) { | 739 | if (id >= fnic->fnic_max_tag_id) { |
734 | shost_printk(KERN_ERR, fnic->lport->host, | 740 | shost_printk(KERN_ERR, fnic->lport->host, |
735 | "Tag out of range tag %x hdr status = %s\n", | 741 | "Tag out of range tag %x hdr status = %s\n", |
736 | id, fnic_fcpio_status_to_str(hdr_status)); | 742 | id, fnic_fcpio_status_to_str(hdr_status)); |
@@ -818,38 +824,6 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic, | |||
818 | if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER) | 824 | if (icmnd_cmpl->flags & FCPIO_ICMND_CMPL_RESID_UNDER) |
819 | xfer_len -= icmnd_cmpl->residual; | 825 | xfer_len -= icmnd_cmpl->residual; |
820 | 826 | ||
821 | /* | ||
822 | * If queue_full, then try to reduce queue depth for all | ||
823 | * LUNS on the target. Todo: this should be accompanied | ||
824 | * by a periodic queue_depth rampup based on successful | ||
825 | * IO completion. | ||
826 | */ | ||
827 | if (icmnd_cmpl->scsi_status == QUEUE_FULL) { | ||
828 | struct scsi_device *t_sdev; | ||
829 | int qd = 0; | ||
830 | |||
831 | shost_for_each_device(t_sdev, sc->device->host) { | ||
832 | if (t_sdev->id != sc->device->id) | ||
833 | continue; | ||
834 | |||
835 | if (t_sdev->queue_depth > 1) { | ||
836 | qd = scsi_track_queue_full | ||
837 | (t_sdev, | ||
838 | t_sdev->queue_depth - 1); | ||
839 | if (qd == -1) | ||
840 | qd = t_sdev->host->cmd_per_lun; | ||
841 | shost_printk(KERN_INFO, | ||
842 | fnic->lport->host, | ||
843 | "scsi[%d:%d:%d:%d" | ||
844 | "] queue full detected," | ||
845 | "new depth = %d\n", | ||
846 | t_sdev->host->host_no, | ||
847 | t_sdev->channel, | ||
848 | t_sdev->id, t_sdev->lun, | ||
849 | t_sdev->queue_depth); | ||
850 | } | ||
851 | } | ||
852 | } | ||
853 | break; | 827 | break; |
854 | 828 | ||
855 | case FCPIO_TIMEOUT: /* request was timed out */ | 829 | case FCPIO_TIMEOUT: /* request was timed out */ |
@@ -939,7 +913,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, | |||
939 | fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); | 913 | fcpio_header_dec(&desc->hdr, &type, &hdr_status, &tag); |
940 | fcpio_tag_id_dec(&tag, &id); | 914 | fcpio_tag_id_dec(&tag, &id); |
941 | 915 | ||
942 | if ((id & FNIC_TAG_MASK) >= FNIC_MAX_IO_REQ) { | 916 | if ((id & FNIC_TAG_MASK) >= fnic->fnic_max_tag_id) { |
943 | shost_printk(KERN_ERR, fnic->lport->host, | 917 | shost_printk(KERN_ERR, fnic->lport->host, |
944 | "Tag out of range tag %x hdr status = %s\n", | 918 | "Tag out of range tag %x hdr status = %s\n", |
945 | id, fnic_fcpio_status_to_str(hdr_status)); | 919 | id, fnic_fcpio_status_to_str(hdr_status)); |
@@ -988,9 +962,7 @@ static void fnic_fcpio_itmf_cmpl_handler(struct fnic *fnic, | |||
988 | spin_unlock_irqrestore(io_lock, flags); | 962 | spin_unlock_irqrestore(io_lock, flags); |
989 | return; | 963 | return; |
990 | } | 964 | } |
991 | CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; | ||
992 | CMD_ABTS_STATUS(sc) = hdr_status; | 965 | CMD_ABTS_STATUS(sc) = hdr_status; |
993 | |||
994 | CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE; | 966 | CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE; |
995 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, | 967 | FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, |
996 | "abts cmpl recd. id %d status %s\n", | 968 | "abts cmpl recd. id %d status %s\n", |
@@ -1148,23 +1120,25 @@ int fnic_wq_copy_cmpl_handler(struct fnic *fnic, int copy_work_to_do) | |||
1148 | 1120 | ||
1149 | static void fnic_cleanup_io(struct fnic *fnic, int exclude_id) | 1121 | static void fnic_cleanup_io(struct fnic *fnic, int exclude_id) |
1150 | { | 1122 | { |
1151 | unsigned int i; | 1123 | int i; |
1152 | struct fnic_io_req *io_req; | 1124 | struct fnic_io_req *io_req; |
1153 | unsigned long flags = 0; | 1125 | unsigned long flags = 0; |
1154 | struct scsi_cmnd *sc; | 1126 | struct scsi_cmnd *sc; |
1155 | spinlock_t *io_lock; | 1127 | spinlock_t *io_lock; |
1156 | unsigned long start_time = 0; | 1128 | unsigned long start_time = 0; |
1157 | 1129 | ||
1158 | for (i = 0; i < FNIC_MAX_IO_REQ; i++) { | 1130 | for (i = 0; i < fnic->fnic_max_tag_id; i++) { |
1159 | if (i == exclude_id) | 1131 | if (i == exclude_id) |
1160 | continue; | 1132 | continue; |
1161 | 1133 | ||
1134 | io_lock = fnic_io_lock_tag(fnic, i); | ||
1135 | spin_lock_irqsave(io_lock, flags); | ||
1162 | sc = scsi_host_find_tag(fnic->lport->host, i); | 1136 | sc = scsi_host_find_tag(fnic->lport->host, i); |
1163 | if (!sc) | 1137 | if (!sc) { |
1138 | spin_unlock_irqrestore(io_lock, flags); | ||
1164 | continue; | 1139 | continue; |
1140 | } | ||
1165 | 1141 | ||
1166 | io_lock = fnic_io_lock_hash(fnic, sc); | ||
1167 | spin_lock_irqsave(io_lock, flags); | ||
1168 | io_req = (struct fnic_io_req *)CMD_SP(sc); | 1142 | io_req = (struct fnic_io_req *)CMD_SP(sc); |
1169 | if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && | 1143 | if ((CMD_FLAGS(sc) & FNIC_DEVICE_RESET) && |
1170 | !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) { | 1144 | !(CMD_FLAGS(sc) & FNIC_DEV_RST_DONE)) { |
@@ -1236,7 +1210,7 @@ void fnic_wq_copy_cleanup_handler(struct vnic_wq_copy *wq, | |||
1236 | fcpio_tag_id_dec(&desc->hdr.tag, &id); | 1210 | fcpio_tag_id_dec(&desc->hdr.tag, &id); |
1237 | id &= FNIC_TAG_MASK; | 1211 | id &= FNIC_TAG_MASK; |
1238 | 1212 | ||
1239 | if (id >= FNIC_MAX_IO_REQ) | 1213 | if (id >= fnic->fnic_max_tag_id) |
1240 | return; | 1214 | return; |
1241 | 1215 | ||
1242 | sc = scsi_host_find_tag(fnic->lport->host, id); | 1216 | sc = scsi_host_find_tag(fnic->lport->host, id); |
@@ -1340,14 +1314,15 @@ static void fnic_rport_exch_reset(struct fnic *fnic, u32 port_id) | |||
1340 | if (fnic->in_remove) | 1314 | if (fnic->in_remove) |
1341 | return; | 1315 | return; |
1342 | 1316 | ||
1343 | for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { | 1317 | for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) { |
1344 | abt_tag = tag; | 1318 | abt_tag = tag; |
1319 | io_lock = fnic_io_lock_tag(fnic, tag); | ||
1320 | spin_lock_irqsave(io_lock, flags); | ||
1345 | sc = scsi_host_find_tag(fnic->lport->host, tag); | 1321 | sc = scsi_host_find_tag(fnic->lport->host, tag); |
1346 | if (!sc) | 1322 | if (!sc) { |
1323 | spin_unlock_irqrestore(io_lock, flags); | ||
1347 | continue; | 1324 | continue; |
1348 | 1325 | } | |
1349 | io_lock = fnic_io_lock_hash(fnic, sc); | ||
1350 | spin_lock_irqsave(io_lock, flags); | ||
1351 | 1326 | ||
1352 | io_req = (struct fnic_io_req *)CMD_SP(sc); | 1327 | io_req = (struct fnic_io_req *)CMD_SP(sc); |
1353 | 1328 | ||
@@ -1441,12 +1416,29 @@ void fnic_terminate_rport_io(struct fc_rport *rport) | |||
1441 | unsigned long flags; | 1416 | unsigned long flags; |
1442 | struct scsi_cmnd *sc; | 1417 | struct scsi_cmnd *sc; |
1443 | struct scsi_lun fc_lun; | 1418 | struct scsi_lun fc_lun; |
1444 | struct fc_rport_libfc_priv *rdata = rport->dd_data; | 1419 | struct fc_rport_libfc_priv *rdata; |
1445 | struct fc_lport *lport = rdata->local_port; | 1420 | struct fc_lport *lport; |
1446 | struct fnic *fnic = lport_priv(lport); | 1421 | struct fnic *fnic; |
1447 | struct fc_rport *cmd_rport; | 1422 | struct fc_rport *cmd_rport; |
1448 | enum fnic_ioreq_state old_ioreq_state; | 1423 | enum fnic_ioreq_state old_ioreq_state; |
1449 | 1424 | ||
1425 | if (!rport) { | ||
1426 | printk(KERN_ERR "fnic_terminate_rport_io: rport is NULL\n"); | ||
1427 | return; | ||
1428 | } | ||
1429 | rdata = rport->dd_data; | ||
1430 | |||
1431 | if (!rdata) { | ||
1432 | printk(KERN_ERR "fnic_terminate_rport_io: rdata is NULL\n"); | ||
1433 | return; | ||
1434 | } | ||
1435 | lport = rdata->local_port; | ||
1436 | |||
1437 | if (!lport) { | ||
1438 | printk(KERN_ERR "fnic_terminate_rport_io: lport is NULL\n"); | ||
1439 | return; | ||
1440 | } | ||
1441 | fnic = lport_priv(lport); | ||
1450 | FNIC_SCSI_DBG(KERN_DEBUG, | 1442 | FNIC_SCSI_DBG(KERN_DEBUG, |
1451 | fnic->lport->host, "fnic_terminate_rport_io called" | 1443 | fnic->lport->host, "fnic_terminate_rport_io called" |
1452 | " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n", | 1444 | " wwpn 0x%llx, wwnn0x%llx, rport 0x%p, portid 0x%06x\n", |
@@ -1456,18 +1448,21 @@ void fnic_terminate_rport_io(struct fc_rport *rport) | |||
1456 | if (fnic->in_remove) | 1448 | if (fnic->in_remove) |
1457 | return; | 1449 | return; |
1458 | 1450 | ||
1459 | for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { | 1451 | for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) { |
1460 | abt_tag = tag; | 1452 | abt_tag = tag; |
1453 | io_lock = fnic_io_lock_tag(fnic, tag); | ||
1454 | spin_lock_irqsave(io_lock, flags); | ||
1461 | sc = scsi_host_find_tag(fnic->lport->host, tag); | 1455 | sc = scsi_host_find_tag(fnic->lport->host, tag); |
1462 | if (!sc) | 1456 | if (!sc) { |
1457 | spin_unlock_irqrestore(io_lock, flags); | ||
1463 | continue; | 1458 | continue; |
1459 | } | ||
1464 | 1460 | ||
1465 | cmd_rport = starget_to_rport(scsi_target(sc->device)); | 1461 | cmd_rport = starget_to_rport(scsi_target(sc->device)); |
1466 | if (rport != cmd_rport) | 1462 | if (rport != cmd_rport) { |
1463 | spin_unlock_irqrestore(io_lock, flags); | ||
1467 | continue; | 1464 | continue; |
1468 | 1465 | } | |
1469 | io_lock = fnic_io_lock_hash(fnic, sc); | ||
1470 | spin_lock_irqsave(io_lock, flags); | ||
1471 | 1466 | ||
1472 | io_req = (struct fnic_io_req *)CMD_SP(sc); | 1467 | io_req = (struct fnic_io_req *)CMD_SP(sc); |
1473 | 1468 | ||
@@ -1680,13 +1675,15 @@ int fnic_abort_cmd(struct scsi_cmnd *sc) | |||
1680 | io_req->abts_done = NULL; | 1675 | io_req->abts_done = NULL; |
1681 | 1676 | ||
1682 | /* fw did not complete abort, timed out */ | 1677 | /* fw did not complete abort, timed out */ |
1683 | if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { | 1678 | if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) { |
1684 | spin_unlock_irqrestore(io_lock, flags); | 1679 | spin_unlock_irqrestore(io_lock, flags); |
1685 | CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT; | 1680 | CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_TIMED_OUT; |
1686 | ret = FAILED; | 1681 | ret = FAILED; |
1687 | goto fnic_abort_cmd_end; | 1682 | goto fnic_abort_cmd_end; |
1688 | } | 1683 | } |
1689 | 1684 | ||
1685 | CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; | ||
1686 | |||
1690 | /* | 1687 | /* |
1691 | * firmware completed the abort, check the status, | 1688 | * firmware completed the abort, check the status, |
1692 | * free the io_req irrespective of failure or success | 1689 | * free the io_req irrespective of failure or success |
@@ -1784,17 +1781,18 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, | |||
1784 | DECLARE_COMPLETION_ONSTACK(tm_done); | 1781 | DECLARE_COMPLETION_ONSTACK(tm_done); |
1785 | enum fnic_ioreq_state old_ioreq_state; | 1782 | enum fnic_ioreq_state old_ioreq_state; |
1786 | 1783 | ||
1787 | for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { | 1784 | for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) { |
1785 | io_lock = fnic_io_lock_tag(fnic, tag); | ||
1786 | spin_lock_irqsave(io_lock, flags); | ||
1788 | sc = scsi_host_find_tag(fnic->lport->host, tag); | 1787 | sc = scsi_host_find_tag(fnic->lport->host, tag); |
1789 | /* | 1788 | /* |
1790 | * ignore this lun reset cmd or cmds that do not belong to | 1789 | * ignore this lun reset cmd or cmds that do not belong to |
1791 | * this lun | 1790 | * this lun |
1792 | */ | 1791 | */ |
1793 | if (!sc || sc == lr_sc || sc->device != lun_dev) | 1792 | if (!sc || sc == lr_sc || sc->device != lun_dev) { |
1793 | spin_unlock_irqrestore(io_lock, flags); | ||
1794 | continue; | 1794 | continue; |
1795 | 1795 | } | |
1796 | io_lock = fnic_io_lock_hash(fnic, sc); | ||
1797 | spin_lock_irqsave(io_lock, flags); | ||
1798 | 1796 | ||
1799 | io_req = (struct fnic_io_req *)CMD_SP(sc); | 1797 | io_req = (struct fnic_io_req *)CMD_SP(sc); |
1800 | 1798 | ||
@@ -1823,6 +1821,11 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, | |||
1823 | spin_unlock_irqrestore(io_lock, flags); | 1821 | spin_unlock_irqrestore(io_lock, flags); |
1824 | continue; | 1822 | continue; |
1825 | } | 1823 | } |
1824 | |||
1825 | if (io_req->abts_done) | ||
1826 | shost_printk(KERN_ERR, fnic->lport->host, | ||
1827 | "%s: io_req->abts_done is set state is %s\n", | ||
1828 | __func__, fnic_ioreq_state_to_str(CMD_STATE(sc))); | ||
1826 | old_ioreq_state = CMD_STATE(sc); | 1829 | old_ioreq_state = CMD_STATE(sc); |
1827 | /* | 1830 | /* |
1828 | * Any pending IO issued prior to reset is expected to be | 1831 | * Any pending IO issued prior to reset is expected to be |
@@ -1833,11 +1836,6 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, | |||
1833 | */ | 1836 | */ |
1834 | CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; | 1837 | CMD_STATE(sc) = FNIC_IOREQ_ABTS_PENDING; |
1835 | 1838 | ||
1836 | if (io_req->abts_done) | ||
1837 | shost_printk(KERN_ERR, fnic->lport->host, | ||
1838 | "%s: io_req->abts_done is set state is %s\n", | ||
1839 | __func__, fnic_ioreq_state_to_str(CMD_STATE(sc))); | ||
1840 | |||
1841 | BUG_ON(io_req->abts_done); | 1839 | BUG_ON(io_req->abts_done); |
1842 | 1840 | ||
1843 | abt_tag = tag; | 1841 | abt_tag = tag; |
@@ -1890,12 +1888,13 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, | |||
1890 | io_req->abts_done = NULL; | 1888 | io_req->abts_done = NULL; |
1891 | 1889 | ||
1892 | /* if abort is still pending with fw, fail */ | 1890 | /* if abort is still pending with fw, fail */ |
1893 | if (CMD_STATE(sc) == FNIC_IOREQ_ABTS_PENDING) { | 1891 | if (CMD_ABTS_STATUS(sc) == FCPIO_INVALID_CODE) { |
1894 | spin_unlock_irqrestore(io_lock, flags); | 1892 | spin_unlock_irqrestore(io_lock, flags); |
1895 | CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE; | 1893 | CMD_FLAGS(sc) |= FNIC_IO_ABT_TERM_DONE; |
1896 | ret = 1; | 1894 | ret = 1; |
1897 | goto clean_pending_aborts_end; | 1895 | goto clean_pending_aborts_end; |
1898 | } | 1896 | } |
1897 | CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE; | ||
1899 | CMD_SP(sc) = NULL; | 1898 | CMD_SP(sc) = NULL; |
1900 | spin_unlock_irqrestore(io_lock, flags); | 1899 | spin_unlock_irqrestore(io_lock, flags); |
1901 | 1900 | ||
@@ -2093,8 +2092,8 @@ int fnic_device_reset(struct scsi_cmnd *sc) | |||
2093 | spin_unlock_irqrestore(io_lock, flags); | 2092 | spin_unlock_irqrestore(io_lock, flags); |
2094 | int_to_scsilun(sc->device->lun, &fc_lun); | 2093 | int_to_scsilun(sc->device->lun, &fc_lun); |
2095 | /* | 2094 | /* |
2096 | * Issue abort and terminate on the device reset request. | 2095 | * Issue abort and terminate on device reset request. |
2097 | * If q'ing of the abort fails, retry issue it after a delay. | 2096 | * If q'ing of terminate fails, retry it after a delay. |
2098 | */ | 2097 | */ |
2099 | while (1) { | 2098 | while (1) { |
2100 | spin_lock_irqsave(io_lock, flags); | 2099 | spin_lock_irqsave(io_lock, flags); |
@@ -2405,7 +2404,7 @@ int fnic_is_abts_pending(struct fnic *fnic, struct scsi_cmnd *lr_sc) | |||
2405 | lun_dev = lr_sc->device; | 2404 | lun_dev = lr_sc->device; |
2406 | 2405 | ||
2407 | /* walk again to check, if IOs are still pending in fw */ | 2406 | /* walk again to check, if IOs are still pending in fw */ |
2408 | for (tag = 0; tag < FNIC_MAX_IO_REQ; tag++) { | 2407 | for (tag = 0; tag < fnic->fnic_max_tag_id; tag++) { |
2409 | sc = scsi_host_find_tag(fnic->lport->host, tag); | 2408 | sc = scsi_host_find_tag(fnic->lport->host, tag); |
2410 | /* | 2409 | /* |
2411 | * ignore this lun reset cmd or cmds that do not belong to | 2410 | * ignore this lun reset cmd or cmds that do not belong to |
diff --git a/drivers/scsi/fnic/vnic_scsi.h b/drivers/scsi/fnic/vnic_scsi.h index fbb55364e272..e343e1d0f801 100644 --- a/drivers/scsi/fnic/vnic_scsi.h +++ b/drivers/scsi/fnic/vnic_scsi.h | |||
@@ -54,8 +54,8 @@ | |||
54 | #define VNIC_FNIC_PLOGI_TIMEOUT_MIN 1000 | 54 | #define VNIC_FNIC_PLOGI_TIMEOUT_MIN 1000 |
55 | #define VNIC_FNIC_PLOGI_TIMEOUT_MAX 255000 | 55 | #define VNIC_FNIC_PLOGI_TIMEOUT_MAX 255000 |
56 | 56 | ||
57 | #define VNIC_FNIC_IO_THROTTLE_COUNT_MIN 256 | 57 | #define VNIC_FNIC_IO_THROTTLE_COUNT_MIN 1 |
58 | #define VNIC_FNIC_IO_THROTTLE_COUNT_MAX 4096 | 58 | #define VNIC_FNIC_IO_THROTTLE_COUNT_MAX 2048 |
59 | 59 | ||
60 | #define VNIC_FNIC_LINK_DOWN_TIMEOUT_MIN 0 | 60 | #define VNIC_FNIC_LINK_DOWN_TIMEOUT_MIN 0 |
61 | #define VNIC_FNIC_LINK_DOWN_TIMEOUT_MAX 240000 | 61 | #define VNIC_FNIC_LINK_DOWN_TIMEOUT_MAX 240000 |
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index fac8cf5832dd..891c86b66253 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c | |||
@@ -54,7 +54,7 @@ | |||
54 | #include "hpsa.h" | 54 | #include "hpsa.h" |
55 | 55 | ||
56 | /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ | 56 | /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ |
57 | #define HPSA_DRIVER_VERSION "2.0.2-1" | 57 | #define HPSA_DRIVER_VERSION "3.4.0-1" |
58 | #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" | 58 | #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" |
59 | #define HPSA "hpsa" | 59 | #define HPSA "hpsa" |
60 | 60 | ||
@@ -89,13 +89,14 @@ static const struct pci_device_id hpsa_pci_device_id[] = { | |||
89 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, | 89 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, |
90 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247}, | 90 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247}, |
91 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, | 91 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, |
92 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a}, | 92 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A}, |
93 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b}, | 93 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B}, |
94 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233}, | 94 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233}, |
95 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350}, | 95 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350}, |
96 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351}, | 96 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351}, |
97 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352}, | 97 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352}, |
98 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353}, | 98 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353}, |
99 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x334D}, | ||
99 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354}, | 100 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354}, |
100 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355}, | 101 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355}, |
101 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356}, | 102 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356}, |
@@ -107,7 +108,19 @@ static const struct pci_device_id hpsa_pci_device_id[] = { | |||
107 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1925}, | 108 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1925}, |
108 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926}, | 109 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1926}, |
109 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928}, | 110 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1928}, |
110 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x334d}, | 111 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSH, 0x103C, 0x1929}, |
112 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BD}, | ||
113 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BE}, | ||
114 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21BF}, | ||
115 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C0}, | ||
116 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C1}, | ||
117 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C2}, | ||
118 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C3}, | ||
119 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C4}, | ||
120 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C5}, | ||
121 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C7}, | ||
122 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C8}, | ||
123 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSI, 0x103C, 0x21C9}, | ||
111 | {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, | 124 | {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, |
112 | PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, | 125 | PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, |
113 | {0,} | 126 | {0,} |
@@ -125,24 +138,35 @@ static struct board_type products[] = { | |||
125 | {0x3245103C, "Smart Array P410i", &SA5_access}, | 138 | {0x3245103C, "Smart Array P410i", &SA5_access}, |
126 | {0x3247103C, "Smart Array P411", &SA5_access}, | 139 | {0x3247103C, "Smart Array P411", &SA5_access}, |
127 | {0x3249103C, "Smart Array P812", &SA5_access}, | 140 | {0x3249103C, "Smart Array P812", &SA5_access}, |
128 | {0x324a103C, "Smart Array P712m", &SA5_access}, | 141 | {0x324A103C, "Smart Array P712m", &SA5_access}, |
129 | {0x324b103C, "Smart Array P711m", &SA5_access}, | 142 | {0x324B103C, "Smart Array P711m", &SA5_access}, |
130 | {0x3350103C, "Smart Array P222", &SA5_access}, | 143 | {0x3350103C, "Smart Array P222", &SA5_access}, |
131 | {0x3351103C, "Smart Array P420", &SA5_access}, | 144 | {0x3351103C, "Smart Array P420", &SA5_access}, |
132 | {0x3352103C, "Smart Array P421", &SA5_access}, | 145 | {0x3352103C, "Smart Array P421", &SA5_access}, |
133 | {0x3353103C, "Smart Array P822", &SA5_access}, | 146 | {0x3353103C, "Smart Array P822", &SA5_access}, |
147 | {0x334D103C, "Smart Array P822se", &SA5_access}, | ||
134 | {0x3354103C, "Smart Array P420i", &SA5_access}, | 148 | {0x3354103C, "Smart Array P420i", &SA5_access}, |
135 | {0x3355103C, "Smart Array P220i", &SA5_access}, | 149 | {0x3355103C, "Smart Array P220i", &SA5_access}, |
136 | {0x3356103C, "Smart Array P721m", &SA5_access}, | 150 | {0x3356103C, "Smart Array P721m", &SA5_access}, |
137 | {0x1920103C, "Smart Array", &SA5_access}, | 151 | {0x1921103C, "Smart Array P830i", &SA5_access}, |
138 | {0x1921103C, "Smart Array", &SA5_access}, | 152 | {0x1922103C, "Smart Array P430", &SA5_access}, |
139 | {0x1922103C, "Smart Array", &SA5_access}, | 153 | {0x1923103C, "Smart Array P431", &SA5_access}, |
140 | {0x1923103C, "Smart Array", &SA5_access}, | 154 | {0x1924103C, "Smart Array P830", &SA5_access}, |
141 | {0x1924103C, "Smart Array", &SA5_access}, | 155 | {0x1926103C, "Smart Array P731m", &SA5_access}, |
142 | {0x1925103C, "Smart Array", &SA5_access}, | 156 | {0x1928103C, "Smart Array P230i", &SA5_access}, |
143 | {0x1926103C, "Smart Array", &SA5_access}, | 157 | {0x1929103C, "Smart Array P530", &SA5_access}, |
144 | {0x1928103C, "Smart Array", &SA5_access}, | 158 | {0x21BD103C, "Smart Array", &SA5_access}, |
145 | {0x334d103C, "Smart Array P822se", &SA5_access}, | 159 | {0x21BE103C, "Smart Array", &SA5_access}, |
160 | {0x21BF103C, "Smart Array", &SA5_access}, | ||
161 | {0x21C0103C, "Smart Array", &SA5_access}, | ||
162 | {0x21C1103C, "Smart Array", &SA5_access}, | ||
163 | {0x21C2103C, "Smart Array", &SA5_access}, | ||
164 | {0x21C3103C, "Smart Array", &SA5_access}, | ||
165 | {0x21C4103C, "Smart Array", &SA5_access}, | ||
166 | {0x21C5103C, "Smart Array", &SA5_access}, | ||
167 | {0x21C7103C, "Smart Array", &SA5_access}, | ||
168 | {0x21C8103C, "Smart Array", &SA5_access}, | ||
169 | {0x21C9103C, "Smart Array", &SA5_access}, | ||
146 | {0xFFFF103C, "Unknown Smart Array", &SA5_access}, | 170 | {0xFFFF103C, "Unknown Smart Array", &SA5_access}, |
147 | }; | 171 | }; |
148 | 172 | ||
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 4e31caa21ddf..23f5ba5e6472 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c | |||
@@ -2208,7 +2208,10 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type) | |||
2208 | 2208 | ||
2209 | if (rsp_rc != 0) { | 2209 | if (rsp_rc != 0) { |
2210 | sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc); | 2210 | sdev_printk(KERN_ERR, sdev, "Failed to send cancel event. rc=%d\n", rsp_rc); |
2211 | return -EIO; | 2211 | /* If failure is received, the host adapter is most likely going |
2212 | through reset, return success so the caller will wait for the command | ||
2213 | being cancelled to get returned */ | ||
2214 | return 0; | ||
2212 | } | 2215 | } |
2213 | 2216 | ||
2214 | sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n"); | 2217 | sdev_printk(KERN_INFO, sdev, "Cancelling outstanding commands.\n"); |
@@ -2221,7 +2224,15 @@ static int ibmvfc_cancel_all(struct scsi_device *sdev, int type) | |||
2221 | 2224 | ||
2222 | if (status != IBMVFC_MAD_SUCCESS) { | 2225 | if (status != IBMVFC_MAD_SUCCESS) { |
2223 | sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status); | 2226 | sdev_printk(KERN_WARNING, sdev, "Cancel failed with rc=%x\n", status); |
2224 | return -EIO; | 2227 | switch (status) { |
2228 | case IBMVFC_MAD_DRIVER_FAILED: | ||
2229 | case IBMVFC_MAD_CRQ_ERROR: | ||
2230 | /* Host adapter most likely going through reset, return success to | ||
2231 | the caller will wait for the command being cancelled to get returned */ | ||
2232 | return 0; | ||
2233 | default: | ||
2234 | return -EIO; | ||
2235 | }; | ||
2225 | } | 2236 | } |
2226 | 2237 | ||
2227 | sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n"); | 2238 | sdev_printk(KERN_INFO, sdev, "Successfully cancelled outstanding commands\n"); |
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index d0fa4b6c551f..fa764406df68 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c | |||
@@ -241,7 +241,7 @@ static void gather_partition_info(void) | |||
241 | struct device_node *rootdn; | 241 | struct device_node *rootdn; |
242 | 242 | ||
243 | const char *ppartition_name; | 243 | const char *ppartition_name; |
244 | const unsigned int *p_number_ptr; | 244 | const __be32 *p_number_ptr; |
245 | 245 | ||
246 | /* Retrieve information about this partition */ | 246 | /* Retrieve information about this partition */ |
247 | rootdn = of_find_node_by_path("/"); | 247 | rootdn = of_find_node_by_path("/"); |
@@ -255,7 +255,7 @@ static void gather_partition_info(void) | |||
255 | sizeof(partition_name)); | 255 | sizeof(partition_name)); |
256 | p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL); | 256 | p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL); |
257 | if (p_number_ptr) | 257 | if (p_number_ptr) |
258 | partition_number = *p_number_ptr; | 258 | partition_number = of_read_number(p_number_ptr, 1); |
259 | of_node_put(rootdn); | 259 | of_node_put(rootdn); |
260 | } | 260 | } |
261 | 261 | ||
@@ -270,10 +270,11 @@ static void set_adapter_info(struct ibmvscsi_host_data *hostdata) | |||
270 | strncpy(hostdata->madapter_info.partition_name, partition_name, | 270 | strncpy(hostdata->madapter_info.partition_name, partition_name, |
271 | sizeof(hostdata->madapter_info.partition_name)); | 271 | sizeof(hostdata->madapter_info.partition_name)); |
272 | 272 | ||
273 | hostdata->madapter_info.partition_number = partition_number; | 273 | hostdata->madapter_info.partition_number = |
274 | cpu_to_be32(partition_number); | ||
274 | 275 | ||
275 | hostdata->madapter_info.mad_version = 1; | 276 | hostdata->madapter_info.mad_version = cpu_to_be32(1); |
276 | hostdata->madapter_info.os_type = 2; | 277 | hostdata->madapter_info.os_type = cpu_to_be32(2); |
277 | } | 278 | } |
278 | 279 | ||
279 | /** | 280 | /** |
@@ -464,9 +465,9 @@ static int initialize_event_pool(struct event_pool *pool, | |||
464 | memset(&evt->crq, 0x00, sizeof(evt->crq)); | 465 | memset(&evt->crq, 0x00, sizeof(evt->crq)); |
465 | atomic_set(&evt->free, 1); | 466 | atomic_set(&evt->free, 1); |
466 | evt->crq.valid = 0x80; | 467 | evt->crq.valid = 0x80; |
467 | evt->crq.IU_length = sizeof(*evt->xfer_iu); | 468 | evt->crq.IU_length = cpu_to_be16(sizeof(*evt->xfer_iu)); |
468 | evt->crq.IU_data_ptr = pool->iu_token + | 469 | evt->crq.IU_data_ptr = cpu_to_be64(pool->iu_token + |
469 | sizeof(*evt->xfer_iu) * i; | 470 | sizeof(*evt->xfer_iu) * i); |
470 | evt->xfer_iu = pool->iu_storage + i; | 471 | evt->xfer_iu = pool->iu_storage + i; |
471 | evt->hostdata = hostdata; | 472 | evt->hostdata = hostdata; |
472 | evt->ext_list = NULL; | 473 | evt->ext_list = NULL; |
@@ -588,7 +589,7 @@ static void init_event_struct(struct srp_event_struct *evt_struct, | |||
588 | evt_struct->cmnd_done = NULL; | 589 | evt_struct->cmnd_done = NULL; |
589 | evt_struct->sync_srp = NULL; | 590 | evt_struct->sync_srp = NULL; |
590 | evt_struct->crq.format = format; | 591 | evt_struct->crq.format = format; |
591 | evt_struct->crq.timeout = timeout; | 592 | evt_struct->crq.timeout = cpu_to_be16(timeout); |
592 | evt_struct->done = done; | 593 | evt_struct->done = done; |
593 | } | 594 | } |
594 | 595 | ||
@@ -659,8 +660,8 @@ static int map_sg_list(struct scsi_cmnd *cmd, int nseg, | |||
659 | 660 | ||
660 | scsi_for_each_sg(cmd, sg, nseg, i) { | 661 | scsi_for_each_sg(cmd, sg, nseg, i) { |
661 | struct srp_direct_buf *descr = md + i; | 662 | struct srp_direct_buf *descr = md + i; |
662 | descr->va = sg_dma_address(sg); | 663 | descr->va = cpu_to_be64(sg_dma_address(sg)); |
663 | descr->len = sg_dma_len(sg); | 664 | descr->len = cpu_to_be32(sg_dma_len(sg)); |
664 | descr->key = 0; | 665 | descr->key = 0; |
665 | total_length += sg_dma_len(sg); | 666 | total_length += sg_dma_len(sg); |
666 | } | 667 | } |
@@ -703,13 +704,14 @@ static int map_sg_data(struct scsi_cmnd *cmd, | |||
703 | } | 704 | } |
704 | 705 | ||
705 | indirect->table_desc.va = 0; | 706 | indirect->table_desc.va = 0; |
706 | indirect->table_desc.len = sg_mapped * sizeof(struct srp_direct_buf); | 707 | indirect->table_desc.len = cpu_to_be32(sg_mapped * |
708 | sizeof(struct srp_direct_buf)); | ||
707 | indirect->table_desc.key = 0; | 709 | indirect->table_desc.key = 0; |
708 | 710 | ||
709 | if (sg_mapped <= MAX_INDIRECT_BUFS) { | 711 | if (sg_mapped <= MAX_INDIRECT_BUFS) { |
710 | total_length = map_sg_list(cmd, sg_mapped, | 712 | total_length = map_sg_list(cmd, sg_mapped, |
711 | &indirect->desc_list[0]); | 713 | &indirect->desc_list[0]); |
712 | indirect->len = total_length; | 714 | indirect->len = cpu_to_be32(total_length); |
713 | return 1; | 715 | return 1; |
714 | } | 716 | } |
715 | 717 | ||
@@ -731,9 +733,10 @@ static int map_sg_data(struct scsi_cmnd *cmd, | |||
731 | 733 | ||
732 | total_length = map_sg_list(cmd, sg_mapped, evt_struct->ext_list); | 734 | total_length = map_sg_list(cmd, sg_mapped, evt_struct->ext_list); |
733 | 735 | ||
734 | indirect->len = total_length; | 736 | indirect->len = cpu_to_be32(total_length); |
735 | indirect->table_desc.va = evt_struct->ext_list_token; | 737 | indirect->table_desc.va = cpu_to_be64(evt_struct->ext_list_token); |
736 | indirect->table_desc.len = sg_mapped * sizeof(indirect->desc_list[0]); | 738 | indirect->table_desc.len = cpu_to_be32(sg_mapped * |
739 | sizeof(indirect->desc_list[0])); | ||
737 | memcpy(indirect->desc_list, evt_struct->ext_list, | 740 | memcpy(indirect->desc_list, evt_struct->ext_list, |
738 | MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf)); | 741 | MAX_INDIRECT_BUFS * sizeof(struct srp_direct_buf)); |
739 | return 1; | 742 | return 1; |
@@ -849,7 +852,7 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, | |||
849 | struct ibmvscsi_host_data *hostdata, | 852 | struct ibmvscsi_host_data *hostdata, |
850 | unsigned long timeout) | 853 | unsigned long timeout) |
851 | { | 854 | { |
852 | u64 *crq_as_u64 = (u64 *) &evt_struct->crq; | 855 | __be64 *crq_as_u64 = (__be64 *)&evt_struct->crq; |
853 | int request_status = 0; | 856 | int request_status = 0; |
854 | int rc; | 857 | int rc; |
855 | int srp_req = 0; | 858 | int srp_req = 0; |
@@ -920,8 +923,9 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, | |||
920 | add_timer(&evt_struct->timer); | 923 | add_timer(&evt_struct->timer); |
921 | } | 924 | } |
922 | 925 | ||
923 | if ((rc = | 926 | rc = ibmvscsi_send_crq(hostdata, be64_to_cpu(crq_as_u64[0]), |
924 | ibmvscsi_send_crq(hostdata, crq_as_u64[0], crq_as_u64[1])) != 0) { | 927 | be64_to_cpu(crq_as_u64[1])); |
928 | if (rc != 0) { | ||
925 | list_del(&evt_struct->list); | 929 | list_del(&evt_struct->list); |
926 | del_timer(&evt_struct->timer); | 930 | del_timer(&evt_struct->timer); |
927 | 931 | ||
@@ -987,15 +991,16 @@ static void handle_cmd_rsp(struct srp_event_struct *evt_struct) | |||
987 | if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION) | 991 | if (((cmnd->result >> 1) & 0x1f) == CHECK_CONDITION) |
988 | memcpy(cmnd->sense_buffer, | 992 | memcpy(cmnd->sense_buffer, |
989 | rsp->data, | 993 | rsp->data, |
990 | rsp->sense_data_len); | 994 | be32_to_cpu(rsp->sense_data_len)); |
991 | unmap_cmd_data(&evt_struct->iu.srp.cmd, | 995 | unmap_cmd_data(&evt_struct->iu.srp.cmd, |
992 | evt_struct, | 996 | evt_struct, |
993 | evt_struct->hostdata->dev); | 997 | evt_struct->hostdata->dev); |
994 | 998 | ||
995 | if (rsp->flags & SRP_RSP_FLAG_DOOVER) | 999 | if (rsp->flags & SRP_RSP_FLAG_DOOVER) |
996 | scsi_set_resid(cmnd, rsp->data_out_res_cnt); | 1000 | scsi_set_resid(cmnd, |
1001 | be32_to_cpu(rsp->data_out_res_cnt)); | ||
997 | else if (rsp->flags & SRP_RSP_FLAG_DIOVER) | 1002 | else if (rsp->flags & SRP_RSP_FLAG_DIOVER) |
998 | scsi_set_resid(cmnd, rsp->data_in_res_cnt); | 1003 | scsi_set_resid(cmnd, be32_to_cpu(rsp->data_in_res_cnt)); |
999 | } | 1004 | } |
1000 | 1005 | ||
1001 | if (evt_struct->cmnd_done) | 1006 | if (evt_struct->cmnd_done) |
@@ -1037,7 +1042,7 @@ static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd, | |||
1037 | memset(srp_cmd, 0x00, SRP_MAX_IU_LEN); | 1042 | memset(srp_cmd, 0x00, SRP_MAX_IU_LEN); |
1038 | srp_cmd->opcode = SRP_CMD; | 1043 | srp_cmd->opcode = SRP_CMD; |
1039 | memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(srp_cmd->cdb)); | 1044 | memcpy(srp_cmd->cdb, cmnd->cmnd, sizeof(srp_cmd->cdb)); |
1040 | srp_cmd->lun = ((u64) lun) << 48; | 1045 | srp_cmd->lun = cpu_to_be64(((u64)lun) << 48); |
1041 | 1046 | ||
1042 | if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) { | 1047 | if (!map_data_for_srp_cmd(cmnd, evt_struct, srp_cmd, hostdata->dev)) { |
1043 | if (!firmware_has_feature(FW_FEATURE_CMO)) | 1048 | if (!firmware_has_feature(FW_FEATURE_CMO)) |
@@ -1062,9 +1067,10 @@ static int ibmvscsi_queuecommand_lck(struct scsi_cmnd *cmnd, | |||
1062 | if ((in_fmt == SRP_DATA_DESC_INDIRECT || | 1067 | if ((in_fmt == SRP_DATA_DESC_INDIRECT || |
1063 | out_fmt == SRP_DATA_DESC_INDIRECT) && | 1068 | out_fmt == SRP_DATA_DESC_INDIRECT) && |
1064 | indirect->table_desc.va == 0) { | 1069 | indirect->table_desc.va == 0) { |
1065 | indirect->table_desc.va = evt_struct->crq.IU_data_ptr + | 1070 | indirect->table_desc.va = |
1071 | cpu_to_be64(be64_to_cpu(evt_struct->crq.IU_data_ptr) + | ||
1066 | offsetof(struct srp_cmd, add_data) + | 1072 | offsetof(struct srp_cmd, add_data) + |
1067 | offsetof(struct srp_indirect_buf, desc_list); | 1073 | offsetof(struct srp_indirect_buf, desc_list)); |
1068 | } | 1074 | } |
1069 | 1075 | ||
1070 | return ibmvscsi_send_srp_event(evt_struct, hostdata, 0); | 1076 | return ibmvscsi_send_srp_event(evt_struct, hostdata, 0); |
@@ -1158,7 +1164,7 @@ static void login_rsp(struct srp_event_struct *evt_struct) | |||
1158 | * request_limit could have been set to -1 by this client. | 1164 | * request_limit could have been set to -1 by this client. |
1159 | */ | 1165 | */ |
1160 | atomic_set(&hostdata->request_limit, | 1166 | atomic_set(&hostdata->request_limit, |
1161 | evt_struct->xfer_iu->srp.login_rsp.req_lim_delta); | 1167 | be32_to_cpu(evt_struct->xfer_iu->srp.login_rsp.req_lim_delta)); |
1162 | 1168 | ||
1163 | /* If we had any pending I/Os, kick them */ | 1169 | /* If we had any pending I/Os, kick them */ |
1164 | scsi_unblock_requests(hostdata->host); | 1170 | scsi_unblock_requests(hostdata->host); |
@@ -1184,8 +1190,9 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata) | |||
1184 | login = &evt_struct->iu.srp.login_req; | 1190 | login = &evt_struct->iu.srp.login_req; |
1185 | memset(login, 0, sizeof(*login)); | 1191 | memset(login, 0, sizeof(*login)); |
1186 | login->opcode = SRP_LOGIN_REQ; | 1192 | login->opcode = SRP_LOGIN_REQ; |
1187 | login->req_it_iu_len = sizeof(union srp_iu); | 1193 | login->req_it_iu_len = cpu_to_be32(sizeof(union srp_iu)); |
1188 | login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT; | 1194 | login->req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | |
1195 | SRP_BUF_FORMAT_INDIRECT); | ||
1189 | 1196 | ||
1190 | spin_lock_irqsave(hostdata->host->host_lock, flags); | 1197 | spin_lock_irqsave(hostdata->host->host_lock, flags); |
1191 | /* Start out with a request limit of 0, since this is negotiated in | 1198 | /* Start out with a request limit of 0, since this is negotiated in |
@@ -1214,12 +1221,13 @@ static void capabilities_rsp(struct srp_event_struct *evt_struct) | |||
1214 | dev_err(hostdata->dev, "error 0x%X getting capabilities info\n", | 1221 | dev_err(hostdata->dev, "error 0x%X getting capabilities info\n", |
1215 | evt_struct->xfer_iu->mad.capabilities.common.status); | 1222 | evt_struct->xfer_iu->mad.capabilities.common.status); |
1216 | } else { | 1223 | } else { |
1217 | if (hostdata->caps.migration.common.server_support != SERVER_SUPPORTS_CAP) | 1224 | if (hostdata->caps.migration.common.server_support != |
1225 | cpu_to_be16(SERVER_SUPPORTS_CAP)) | ||
1218 | dev_info(hostdata->dev, "Partition migration not supported\n"); | 1226 | dev_info(hostdata->dev, "Partition migration not supported\n"); |
1219 | 1227 | ||
1220 | if (client_reserve) { | 1228 | if (client_reserve) { |
1221 | if (hostdata->caps.reserve.common.server_support == | 1229 | if (hostdata->caps.reserve.common.server_support == |
1222 | SERVER_SUPPORTS_CAP) | 1230 | cpu_to_be16(SERVER_SUPPORTS_CAP)) |
1223 | dev_info(hostdata->dev, "Client reserve enabled\n"); | 1231 | dev_info(hostdata->dev, "Client reserve enabled\n"); |
1224 | else | 1232 | else |
1225 | dev_info(hostdata->dev, "Client reserve not supported\n"); | 1233 | dev_info(hostdata->dev, "Client reserve not supported\n"); |
@@ -1251,9 +1259,9 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata) | |||
1251 | req = &evt_struct->iu.mad.capabilities; | 1259 | req = &evt_struct->iu.mad.capabilities; |
1252 | memset(req, 0, sizeof(*req)); | 1260 | memset(req, 0, sizeof(*req)); |
1253 | 1261 | ||
1254 | hostdata->caps.flags = CAP_LIST_SUPPORTED; | 1262 | hostdata->caps.flags = cpu_to_be32(CAP_LIST_SUPPORTED); |
1255 | if (hostdata->client_migrated) | 1263 | if (hostdata->client_migrated) |
1256 | hostdata->caps.flags |= CLIENT_MIGRATED; | 1264 | hostdata->caps.flags |= cpu_to_be32(CLIENT_MIGRATED); |
1257 | 1265 | ||
1258 | strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev), | 1266 | strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev), |
1259 | sizeof(hostdata->caps.name)); | 1267 | sizeof(hostdata->caps.name)); |
@@ -1264,22 +1272,31 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata) | |||
1264 | strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc)); | 1272 | strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc)); |
1265 | hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0'; | 1273 | hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0'; |
1266 | 1274 | ||
1267 | req->common.type = VIOSRP_CAPABILITIES_TYPE; | 1275 | req->common.type = cpu_to_be32(VIOSRP_CAPABILITIES_TYPE); |
1268 | req->buffer = hostdata->caps_addr; | 1276 | req->buffer = cpu_to_be64(hostdata->caps_addr); |
1269 | 1277 | ||
1270 | hostdata->caps.migration.common.cap_type = MIGRATION_CAPABILITIES; | 1278 | hostdata->caps.migration.common.cap_type = |
1271 | hostdata->caps.migration.common.length = sizeof(hostdata->caps.migration); | 1279 | cpu_to_be32(MIGRATION_CAPABILITIES); |
1272 | hostdata->caps.migration.common.server_support = SERVER_SUPPORTS_CAP; | 1280 | hostdata->caps.migration.common.length = |
1273 | hostdata->caps.migration.ecl = 1; | 1281 | cpu_to_be16(sizeof(hostdata->caps.migration)); |
1282 | hostdata->caps.migration.common.server_support = | ||
1283 | cpu_to_be16(SERVER_SUPPORTS_CAP); | ||
1284 | hostdata->caps.migration.ecl = cpu_to_be32(1); | ||
1274 | 1285 | ||
1275 | if (client_reserve) { | 1286 | if (client_reserve) { |
1276 | hostdata->caps.reserve.common.cap_type = RESERVATION_CAPABILITIES; | 1287 | hostdata->caps.reserve.common.cap_type = |
1277 | hostdata->caps.reserve.common.length = sizeof(hostdata->caps.reserve); | 1288 | cpu_to_be32(RESERVATION_CAPABILITIES); |
1278 | hostdata->caps.reserve.common.server_support = SERVER_SUPPORTS_CAP; | 1289 | hostdata->caps.reserve.common.length = |
1279 | hostdata->caps.reserve.type = CLIENT_RESERVE_SCSI_2; | 1290 | cpu_to_be16(sizeof(hostdata->caps.reserve)); |
1280 | req->common.length = sizeof(hostdata->caps); | 1291 | hostdata->caps.reserve.common.server_support = |
1292 | cpu_to_be16(SERVER_SUPPORTS_CAP); | ||
1293 | hostdata->caps.reserve.type = | ||
1294 | cpu_to_be32(CLIENT_RESERVE_SCSI_2); | ||
1295 | req->common.length = | ||
1296 | cpu_to_be16(sizeof(hostdata->caps)); | ||
1281 | } else | 1297 | } else |
1282 | req->common.length = sizeof(hostdata->caps) - sizeof(hostdata->caps.reserve); | 1298 | req->common.length = cpu_to_be16(sizeof(hostdata->caps) - |
1299 | sizeof(hostdata->caps.reserve)); | ||
1283 | 1300 | ||
1284 | spin_lock_irqsave(hostdata->host->host_lock, flags); | 1301 | spin_lock_irqsave(hostdata->host->host_lock, flags); |
1285 | if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2)) | 1302 | if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2)) |
@@ -1297,7 +1314,7 @@ static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata) | |||
1297 | static void fast_fail_rsp(struct srp_event_struct *evt_struct) | 1314 | static void fast_fail_rsp(struct srp_event_struct *evt_struct) |
1298 | { | 1315 | { |
1299 | struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; | 1316 | struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; |
1300 | u8 status = evt_struct->xfer_iu->mad.fast_fail.common.status; | 1317 | u16 status = be16_to_cpu(evt_struct->xfer_iu->mad.fast_fail.common.status); |
1301 | 1318 | ||
1302 | if (status == VIOSRP_MAD_NOT_SUPPORTED) | 1319 | if (status == VIOSRP_MAD_NOT_SUPPORTED) |
1303 | dev_err(hostdata->dev, "fast_fail not supported in server\n"); | 1320 | dev_err(hostdata->dev, "fast_fail not supported in server\n"); |
@@ -1334,8 +1351,8 @@ static int enable_fast_fail(struct ibmvscsi_host_data *hostdata) | |||
1334 | 1351 | ||
1335 | fast_fail_mad = &evt_struct->iu.mad.fast_fail; | 1352 | fast_fail_mad = &evt_struct->iu.mad.fast_fail; |
1336 | memset(fast_fail_mad, 0, sizeof(*fast_fail_mad)); | 1353 | memset(fast_fail_mad, 0, sizeof(*fast_fail_mad)); |
1337 | fast_fail_mad->common.type = VIOSRP_ENABLE_FAST_FAIL; | 1354 | fast_fail_mad->common.type = cpu_to_be32(VIOSRP_ENABLE_FAST_FAIL); |
1338 | fast_fail_mad->common.length = sizeof(*fast_fail_mad); | 1355 | fast_fail_mad->common.length = cpu_to_be16(sizeof(*fast_fail_mad)); |
1339 | 1356 | ||
1340 | spin_lock_irqsave(hostdata->host->host_lock, flags); | 1357 | spin_lock_irqsave(hostdata->host->host_lock, flags); |
1341 | rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2); | 1358 | rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2); |
@@ -1362,15 +1379,15 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct) | |||
1362 | "host partition %s (%d), OS %d, max io %u\n", | 1379 | "host partition %s (%d), OS %d, max io %u\n", |
1363 | hostdata->madapter_info.srp_version, | 1380 | hostdata->madapter_info.srp_version, |
1364 | hostdata->madapter_info.partition_name, | 1381 | hostdata->madapter_info.partition_name, |
1365 | hostdata->madapter_info.partition_number, | 1382 | be32_to_cpu(hostdata->madapter_info.partition_number), |
1366 | hostdata->madapter_info.os_type, | 1383 | be32_to_cpu(hostdata->madapter_info.os_type), |
1367 | hostdata->madapter_info.port_max_txu[0]); | 1384 | be32_to_cpu(hostdata->madapter_info.port_max_txu[0])); |
1368 | 1385 | ||
1369 | if (hostdata->madapter_info.port_max_txu[0]) | 1386 | if (hostdata->madapter_info.port_max_txu[0]) |
1370 | hostdata->host->max_sectors = | 1387 | hostdata->host->max_sectors = |
1371 | hostdata->madapter_info.port_max_txu[0] >> 9; | 1388 | be32_to_cpu(hostdata->madapter_info.port_max_txu[0]) >> 9; |
1372 | 1389 | ||
1373 | if (hostdata->madapter_info.os_type == 3 && | 1390 | if (be32_to_cpu(hostdata->madapter_info.os_type) == 3 && |
1374 | strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) { | 1391 | strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) { |
1375 | dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n", | 1392 | dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n", |
1376 | hostdata->madapter_info.srp_version); | 1393 | hostdata->madapter_info.srp_version); |
@@ -1379,7 +1396,7 @@ static void adapter_info_rsp(struct srp_event_struct *evt_struct) | |||
1379 | hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS; | 1396 | hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS; |
1380 | } | 1397 | } |
1381 | 1398 | ||
1382 | if (hostdata->madapter_info.os_type == 3) { | 1399 | if (be32_to_cpu(hostdata->madapter_info.os_type) == 3) { |
1383 | enable_fast_fail(hostdata); | 1400 | enable_fast_fail(hostdata); |
1384 | return; | 1401 | return; |
1385 | } | 1402 | } |
@@ -1414,9 +1431,9 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata) | |||
1414 | req = &evt_struct->iu.mad.adapter_info; | 1431 | req = &evt_struct->iu.mad.adapter_info; |
1415 | memset(req, 0x00, sizeof(*req)); | 1432 | memset(req, 0x00, sizeof(*req)); |
1416 | 1433 | ||
1417 | req->common.type = VIOSRP_ADAPTER_INFO_TYPE; | 1434 | req->common.type = cpu_to_be32(VIOSRP_ADAPTER_INFO_TYPE); |
1418 | req->common.length = sizeof(hostdata->madapter_info); | 1435 | req->common.length = cpu_to_be16(sizeof(hostdata->madapter_info)); |
1419 | req->buffer = hostdata->adapter_info_addr; | 1436 | req->buffer = cpu_to_be64(hostdata->adapter_info_addr); |
1420 | 1437 | ||
1421 | spin_lock_irqsave(hostdata->host->host_lock, flags); | 1438 | spin_lock_irqsave(hostdata->host->host_lock, flags); |
1422 | if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2)) | 1439 | if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2)) |
@@ -1501,7 +1518,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd) | |||
1501 | /* Set up an abort SRP command */ | 1518 | /* Set up an abort SRP command */ |
1502 | memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); | 1519 | memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); |
1503 | tsk_mgmt->opcode = SRP_TSK_MGMT; | 1520 | tsk_mgmt->opcode = SRP_TSK_MGMT; |
1504 | tsk_mgmt->lun = ((u64) lun) << 48; | 1521 | tsk_mgmt->lun = cpu_to_be64(((u64) lun) << 48); |
1505 | tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK; | 1522 | tsk_mgmt->tsk_mgmt_func = SRP_TSK_ABORT_TASK; |
1506 | tsk_mgmt->task_tag = (u64) found_evt; | 1523 | tsk_mgmt->task_tag = (u64) found_evt; |
1507 | 1524 | ||
@@ -1624,7 +1641,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd) | |||
1624 | /* Set up a lun reset SRP command */ | 1641 | /* Set up a lun reset SRP command */ |
1625 | memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); | 1642 | memset(tsk_mgmt, 0x00, sizeof(*tsk_mgmt)); |
1626 | tsk_mgmt->opcode = SRP_TSK_MGMT; | 1643 | tsk_mgmt->opcode = SRP_TSK_MGMT; |
1627 | tsk_mgmt->lun = ((u64) lun) << 48; | 1644 | tsk_mgmt->lun = cpu_to_be64(((u64) lun) << 48); |
1628 | tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET; | 1645 | tsk_mgmt->tsk_mgmt_func = SRP_TSK_LUN_RESET; |
1629 | 1646 | ||
1630 | evt->sync_srp = &srp_rsp; | 1647 | evt->sync_srp = &srp_rsp; |
@@ -1735,8 +1752,9 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq, | |||
1735 | { | 1752 | { |
1736 | long rc; | 1753 | long rc; |
1737 | unsigned long flags; | 1754 | unsigned long flags; |
1755 | /* The hypervisor copies our tag value here so no byteswapping */ | ||
1738 | struct srp_event_struct *evt_struct = | 1756 | struct srp_event_struct *evt_struct = |
1739 | (struct srp_event_struct *)crq->IU_data_ptr; | 1757 | (__force struct srp_event_struct *)crq->IU_data_ptr; |
1740 | switch (crq->valid) { | 1758 | switch (crq->valid) { |
1741 | case 0xC0: /* initialization */ | 1759 | case 0xC0: /* initialization */ |
1742 | switch (crq->format) { | 1760 | switch (crq->format) { |
@@ -1792,18 +1810,18 @@ static void ibmvscsi_handle_crq(struct viosrp_crq *crq, | |||
1792 | */ | 1810 | */ |
1793 | if (!valid_event_struct(&hostdata->pool, evt_struct)) { | 1811 | if (!valid_event_struct(&hostdata->pool, evt_struct)) { |
1794 | dev_err(hostdata->dev, "returned correlation_token 0x%p is invalid!\n", | 1812 | dev_err(hostdata->dev, "returned correlation_token 0x%p is invalid!\n", |
1795 | (void *)crq->IU_data_ptr); | 1813 | evt_struct); |
1796 | return; | 1814 | return; |
1797 | } | 1815 | } |
1798 | 1816 | ||
1799 | if (atomic_read(&evt_struct->free)) { | 1817 | if (atomic_read(&evt_struct->free)) { |
1800 | dev_err(hostdata->dev, "received duplicate correlation_token 0x%p!\n", | 1818 | dev_err(hostdata->dev, "received duplicate correlation_token 0x%p!\n", |
1801 | (void *)crq->IU_data_ptr); | 1819 | evt_struct); |
1802 | return; | 1820 | return; |
1803 | } | 1821 | } |
1804 | 1822 | ||
1805 | if (crq->format == VIOSRP_SRP_FORMAT) | 1823 | if (crq->format == VIOSRP_SRP_FORMAT) |
1806 | atomic_add(evt_struct->xfer_iu->srp.rsp.req_lim_delta, | 1824 | atomic_add(be32_to_cpu(evt_struct->xfer_iu->srp.rsp.req_lim_delta), |
1807 | &hostdata->request_limit); | 1825 | &hostdata->request_limit); |
1808 | 1826 | ||
1809 | del_timer(&evt_struct->timer); | 1827 | del_timer(&evt_struct->timer); |
@@ -1856,13 +1874,11 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, | |||
1856 | 1874 | ||
1857 | /* Set up a lun reset SRP command */ | 1875 | /* Set up a lun reset SRP command */ |
1858 | memset(host_config, 0x00, sizeof(*host_config)); | 1876 | memset(host_config, 0x00, sizeof(*host_config)); |
1859 | host_config->common.type = VIOSRP_HOST_CONFIG_TYPE; | 1877 | host_config->common.type = cpu_to_be32(VIOSRP_HOST_CONFIG_TYPE); |
1860 | host_config->common.length = length; | 1878 | host_config->common.length = cpu_to_be16(length); |
1861 | host_config->buffer = addr = dma_map_single(hostdata->dev, buffer, | 1879 | addr = dma_map_single(hostdata->dev, buffer, length, DMA_BIDIRECTIONAL); |
1862 | length, | ||
1863 | DMA_BIDIRECTIONAL); | ||
1864 | 1880 | ||
1865 | if (dma_mapping_error(hostdata->dev, host_config->buffer)) { | 1881 | if (dma_mapping_error(hostdata->dev, addr)) { |
1866 | if (!firmware_has_feature(FW_FEATURE_CMO)) | 1882 | if (!firmware_has_feature(FW_FEATURE_CMO)) |
1867 | dev_err(hostdata->dev, | 1883 | dev_err(hostdata->dev, |
1868 | "dma_mapping error getting host config\n"); | 1884 | "dma_mapping error getting host config\n"); |
@@ -1870,6 +1886,8 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata, | |||
1870 | return -1; | 1886 | return -1; |
1871 | } | 1887 | } |
1872 | 1888 | ||
1889 | host_config->buffer = cpu_to_be64(addr); | ||
1890 | |||
1873 | init_completion(&evt_struct->comp); | 1891 | init_completion(&evt_struct->comp); |
1874 | spin_lock_irqsave(hostdata->host->host_lock, flags); | 1892 | spin_lock_irqsave(hostdata->host->host_lock, flags); |
1875 | rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2); | 1893 | rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2); |
diff --git a/drivers/scsi/ibmvscsi/viosrp.h b/drivers/scsi/ibmvscsi/viosrp.h index 2cd735d1d196..116243087622 100644 --- a/drivers/scsi/ibmvscsi/viosrp.h +++ b/drivers/scsi/ibmvscsi/viosrp.h | |||
@@ -75,9 +75,9 @@ struct viosrp_crq { | |||
75 | u8 format; /* SCSI vs out-of-band */ | 75 | u8 format; /* SCSI vs out-of-band */ |
76 | u8 reserved; | 76 | u8 reserved; |
77 | u8 status; /* non-scsi failure? (e.g. DMA failure) */ | 77 | u8 status; /* non-scsi failure? (e.g. DMA failure) */ |
78 | u16 timeout; /* in seconds */ | 78 | __be16 timeout; /* in seconds */ |
79 | u16 IU_length; /* in bytes */ | 79 | __be16 IU_length; /* in bytes */ |
80 | u64 IU_data_ptr; /* the TCE for transferring data */ | 80 | __be64 IU_data_ptr; /* the TCE for transferring data */ |
81 | }; | 81 | }; |
82 | 82 | ||
83 | /* MADs are Management requests above and beyond the IUs defined in the SRP | 83 | /* MADs are Management requests above and beyond the IUs defined in the SRP |
@@ -124,10 +124,10 @@ enum viosrp_capability_flag { | |||
124 | * Common MAD header | 124 | * Common MAD header |
125 | */ | 125 | */ |
126 | struct mad_common { | 126 | struct mad_common { |
127 | u32 type; | 127 | __be32 type; |
128 | u16 status; | 128 | __be16 status; |
129 | u16 length; | 129 | __be16 length; |
130 | u64 tag; | 130 | __be64 tag; |
131 | }; | 131 | }; |
132 | 132 | ||
133 | /* | 133 | /* |
@@ -139,23 +139,23 @@ struct mad_common { | |||
139 | */ | 139 | */ |
140 | struct viosrp_empty_iu { | 140 | struct viosrp_empty_iu { |
141 | struct mad_common common; | 141 | struct mad_common common; |
142 | u64 buffer; | 142 | __be64 buffer; |
143 | u32 port; | 143 | __be32 port; |
144 | }; | 144 | }; |
145 | 145 | ||
146 | struct viosrp_error_log { | 146 | struct viosrp_error_log { |
147 | struct mad_common common; | 147 | struct mad_common common; |
148 | u64 buffer; | 148 | __be64 buffer; |
149 | }; | 149 | }; |
150 | 150 | ||
151 | struct viosrp_adapter_info { | 151 | struct viosrp_adapter_info { |
152 | struct mad_common common; | 152 | struct mad_common common; |
153 | u64 buffer; | 153 | __be64 buffer; |
154 | }; | 154 | }; |
155 | 155 | ||
156 | struct viosrp_host_config { | 156 | struct viosrp_host_config { |
157 | struct mad_common common; | 157 | struct mad_common common; |
158 | u64 buffer; | 158 | __be64 buffer; |
159 | }; | 159 | }; |
160 | 160 | ||
161 | struct viosrp_fast_fail { | 161 | struct viosrp_fast_fail { |
@@ -164,27 +164,27 @@ struct viosrp_fast_fail { | |||
164 | 164 | ||
165 | struct viosrp_capabilities { | 165 | struct viosrp_capabilities { |
166 | struct mad_common common; | 166 | struct mad_common common; |
167 | u64 buffer; | 167 | __be64 buffer; |
168 | }; | 168 | }; |
169 | 169 | ||
170 | struct mad_capability_common { | 170 | struct mad_capability_common { |
171 | u32 cap_type; | 171 | __be32 cap_type; |
172 | u16 length; | 172 | __be16 length; |
173 | u16 server_support; | 173 | __be16 server_support; |
174 | }; | 174 | }; |
175 | 175 | ||
176 | struct mad_reserve_cap { | 176 | struct mad_reserve_cap { |
177 | struct mad_capability_common common; | 177 | struct mad_capability_common common; |
178 | u32 type; | 178 | __be32 type; |
179 | }; | 179 | }; |
180 | 180 | ||
181 | struct mad_migration_cap { | 181 | struct mad_migration_cap { |
182 | struct mad_capability_common common; | 182 | struct mad_capability_common common; |
183 | u32 ecl; | 183 | __be32 ecl; |
184 | }; | 184 | }; |
185 | 185 | ||
186 | struct capabilities{ | 186 | struct capabilities{ |
187 | u32 flags; | 187 | __be32 flags; |
188 | char name[SRP_MAX_LOC_LEN]; | 188 | char name[SRP_MAX_LOC_LEN]; |
189 | char loc[SRP_MAX_LOC_LEN]; | 189 | char loc[SRP_MAX_LOC_LEN]; |
190 | struct mad_migration_cap migration; | 190 | struct mad_migration_cap migration; |
@@ -208,10 +208,10 @@ union viosrp_iu { | |||
208 | struct mad_adapter_info_data { | 208 | struct mad_adapter_info_data { |
209 | char srp_version[8]; | 209 | char srp_version[8]; |
210 | char partition_name[96]; | 210 | char partition_name[96]; |
211 | u32 partition_number; | 211 | __be32 partition_number; |
212 | u32 mad_version; | 212 | __be32 mad_version; |
213 | u32 os_type; | 213 | __be32 os_type; |
214 | u32 port_max_txu[8]; /* per-port maximum transfer */ | 214 | __be32 port_max_txu[8]; /* per-port maximum transfer */ |
215 | }; | 215 | }; |
216 | 216 | ||
217 | #endif | 217 | #endif |
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h index df43bfe6d573..4e1b75ca7451 100644 --- a/drivers/scsi/lpfc/lpfc.h +++ b/drivers/scsi/lpfc/lpfc.h | |||
@@ -708,6 +708,7 @@ struct lpfc_hba { | |||
708 | uint32_t cfg_multi_ring_type; | 708 | uint32_t cfg_multi_ring_type; |
709 | uint32_t cfg_poll; | 709 | uint32_t cfg_poll; |
710 | uint32_t cfg_poll_tmo; | 710 | uint32_t cfg_poll_tmo; |
711 | uint32_t cfg_task_mgmt_tmo; | ||
711 | uint32_t cfg_use_msi; | 712 | uint32_t cfg_use_msi; |
712 | uint32_t cfg_fcp_imax; | 713 | uint32_t cfg_fcp_imax; |
713 | uint32_t cfg_fcp_cpu_map; | 714 | uint32_t cfg_fcp_cpu_map; |
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c index 16498e030c70..00656fc92b93 100644 --- a/drivers/scsi/lpfc/lpfc_attr.c +++ b/drivers/scsi/lpfc/lpfc_attr.c | |||
@@ -1865,8 +1865,10 @@ lpfc_##attr##_set(struct lpfc_vport *vport, uint val) \ | |||
1865 | { \ | 1865 | { \ |
1866 | if (val >= minval && val <= maxval) {\ | 1866 | if (val >= minval && val <= maxval) {\ |
1867 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ | 1867 | lpfc_printf_vlog(vport, KERN_ERR, LOG_INIT, \ |
1868 | "3053 lpfc_" #attr " changed from %d to %d\n", \ | 1868 | "3053 lpfc_" #attr \ |
1869 | vport->cfg_##attr, val); \ | 1869 | " changed from %d (x%x) to %d (x%x)\n", \ |
1870 | vport->cfg_##attr, vport->cfg_##attr, \ | ||
1871 | val, val); \ | ||
1870 | vport->cfg_##attr = val;\ | 1872 | vport->cfg_##attr = val;\ |
1871 | return 0;\ | 1873 | return 0;\ |
1872 | }\ | 1874 | }\ |
@@ -4011,8 +4013,11 @@ LPFC_ATTR_R(ack0, 0, 0, 1, "Enable ACK0 support"); | |||
4011 | # For [0], FCP commands are issued to Work Queues ina round robin fashion. | 4013 | # For [0], FCP commands are issued to Work Queues ina round robin fashion. |
4012 | # For [1], FCP commands are issued to a Work Queue associated with the | 4014 | # For [1], FCP commands are issued to a Work Queue associated with the |
4013 | # current CPU. | 4015 | # current CPU. |
4016 | # It would be set to 1 by the driver if it's able to set up cpu affinity | ||
4017 | # for FCP I/Os through Work Queue associated with the current CPU. Otherwise, | ||
4018 | # roundrobin scheduling of FCP I/Os through WQs will be used. | ||
4014 | */ | 4019 | */ |
4015 | LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algrithmn for " | 4020 | LPFC_ATTR_RW(fcp_io_sched, 0, 0, 1, "Determine scheduling algorithm for " |
4016 | "issuing commands [0] - Round Robin, [1] - Current CPU"); | 4021 | "issuing commands [0] - Round Robin, [1] - Current CPU"); |
4017 | 4022 | ||
4018 | /* | 4023 | /* |
@@ -4110,6 +4115,12 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255, | |||
4110 | "Milliseconds driver will wait between polling FCP ring"); | 4115 | "Milliseconds driver will wait between polling FCP ring"); |
4111 | 4116 | ||
4112 | /* | 4117 | /* |
4118 | # lpfc_task_mgmt_tmo: Maximum time to wait for task management commands | ||
4119 | # to complete in seconds. Value range is [5,180], default value is 60. | ||
4120 | */ | ||
4121 | LPFC_ATTR_RW(task_mgmt_tmo, 60, 5, 180, | ||
4122 | "Maximum time to wait for task management commands to complete"); | ||
4123 | /* | ||
4113 | # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that | 4124 | # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that |
4114 | # support this feature | 4125 | # support this feature |
4115 | # 0 = MSI disabled | 4126 | # 0 = MSI disabled |
@@ -4295,6 +4306,7 @@ struct device_attribute *lpfc_hba_attrs[] = { | |||
4295 | &dev_attr_issue_reset, | 4306 | &dev_attr_issue_reset, |
4296 | &dev_attr_lpfc_poll, | 4307 | &dev_attr_lpfc_poll, |
4297 | &dev_attr_lpfc_poll_tmo, | 4308 | &dev_attr_lpfc_poll_tmo, |
4309 | &dev_attr_lpfc_task_mgmt_tmo, | ||
4298 | &dev_attr_lpfc_use_msi, | 4310 | &dev_attr_lpfc_use_msi, |
4299 | &dev_attr_lpfc_fcp_imax, | 4311 | &dev_attr_lpfc_fcp_imax, |
4300 | &dev_attr_lpfc_fcp_cpu_map, | 4312 | &dev_attr_lpfc_fcp_cpu_map, |
@@ -5274,6 +5286,7 @@ lpfc_get_cfgparam(struct lpfc_hba *phba) | |||
5274 | lpfc_topology_init(phba, lpfc_topology); | 5286 | lpfc_topology_init(phba, lpfc_topology); |
5275 | lpfc_link_speed_init(phba, lpfc_link_speed); | 5287 | lpfc_link_speed_init(phba, lpfc_link_speed); |
5276 | lpfc_poll_tmo_init(phba, lpfc_poll_tmo); | 5288 | lpfc_poll_tmo_init(phba, lpfc_poll_tmo); |
5289 | lpfc_task_mgmt_tmo_init(phba, lpfc_task_mgmt_tmo); | ||
5277 | lpfc_enable_npiv_init(phba, lpfc_enable_npiv); | 5290 | lpfc_enable_npiv_init(phba, lpfc_enable_npiv); |
5278 | lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy); | 5291 | lpfc_fcf_failover_policy_init(phba, lpfc_fcf_failover_policy); |
5279 | lpfc_enable_rrq_init(phba, lpfc_enable_rrq); | 5292 | lpfc_enable_rrq_init(phba, lpfc_enable_rrq); |
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 79c13c3263f1..b92aec989d60 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c | |||
@@ -317,6 +317,11 @@ lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba, | |||
317 | } | 317 | } |
318 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | 318 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
319 | 319 | ||
320 | /* Close the timeout handler abort window */ | ||
321 | spin_lock_irqsave(&phba->hbalock, flags); | ||
322 | cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING; | ||
323 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
324 | |||
320 | iocb = &dd_data->context_un.iocb; | 325 | iocb = &dd_data->context_un.iocb; |
321 | ndlp = iocb->ndlp; | 326 | ndlp = iocb->ndlp; |
322 | rmp = iocb->rmp; | 327 | rmp = iocb->rmp; |
@@ -387,6 +392,7 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) | |||
387 | int request_nseg; | 392 | int request_nseg; |
388 | int reply_nseg; | 393 | int reply_nseg; |
389 | struct bsg_job_data *dd_data; | 394 | struct bsg_job_data *dd_data; |
395 | unsigned long flags; | ||
390 | uint32_t creg_val; | 396 | uint32_t creg_val; |
391 | int rc = 0; | 397 | int rc = 0; |
392 | int iocb_stat; | 398 | int iocb_stat; |
@@ -501,14 +507,24 @@ lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job) | |||
501 | } | 507 | } |
502 | 508 | ||
503 | iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); | 509 | iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); |
504 | if (iocb_stat == IOCB_SUCCESS) | 510 | |
511 | if (iocb_stat == IOCB_SUCCESS) { | ||
512 | spin_lock_irqsave(&phba->hbalock, flags); | ||
513 | /* make sure the I/O had not been completed yet */ | ||
514 | if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) { | ||
515 | /* open up abort window to timeout handler */ | ||
516 | cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING; | ||
517 | } | ||
518 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
505 | return 0; /* done for now */ | 519 | return 0; /* done for now */ |
506 | else if (iocb_stat == IOCB_BUSY) | 520 | } else if (iocb_stat == IOCB_BUSY) { |
507 | rc = -EAGAIN; | 521 | rc = -EAGAIN; |
508 | else | 522 | } else { |
509 | rc = -EIO; | 523 | rc = -EIO; |
524 | } | ||
510 | 525 | ||
511 | /* iocb failed so cleanup */ | 526 | /* iocb failed so cleanup */ |
527 | job->dd_data = NULL; | ||
512 | 528 | ||
513 | free_rmp: | 529 | free_rmp: |
514 | lpfc_free_bsg_buffers(phba, rmp); | 530 | lpfc_free_bsg_buffers(phba, rmp); |
@@ -577,6 +593,11 @@ lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba, | |||
577 | } | 593 | } |
578 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | 594 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
579 | 595 | ||
596 | /* Close the timeout handler abort window */ | ||
597 | spin_lock_irqsave(&phba->hbalock, flags); | ||
598 | cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING; | ||
599 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
600 | |||
580 | rsp = &rspiocbq->iocb; | 601 | rsp = &rspiocbq->iocb; |
581 | pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2; | 602 | pcmd = (struct lpfc_dmabuf *)cmdiocbq->context2; |
582 | prsp = (struct lpfc_dmabuf *)pcmd->list.next; | 603 | prsp = (struct lpfc_dmabuf *)pcmd->list.next; |
@@ -639,6 +660,7 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) | |||
639 | struct lpfc_iocbq *cmdiocbq; | 660 | struct lpfc_iocbq *cmdiocbq; |
640 | uint16_t rpi = 0; | 661 | uint16_t rpi = 0; |
641 | struct bsg_job_data *dd_data; | 662 | struct bsg_job_data *dd_data; |
663 | unsigned long flags; | ||
642 | uint32_t creg_val; | 664 | uint32_t creg_val; |
643 | int rc = 0; | 665 | int rc = 0; |
644 | 666 | ||
@@ -721,15 +743,25 @@ lpfc_bsg_rport_els(struct fc_bsg_job *job) | |||
721 | 743 | ||
722 | rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); | 744 | rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); |
723 | 745 | ||
724 | if (rc == IOCB_SUCCESS) | 746 | if (rc == IOCB_SUCCESS) { |
747 | spin_lock_irqsave(&phba->hbalock, flags); | ||
748 | /* make sure the I/O had not been completed/released */ | ||
749 | if (cmdiocbq->iocb_flag & LPFC_IO_LIBDFC) { | ||
750 | /* open up abort window to timeout handler */ | ||
751 | cmdiocbq->iocb_flag |= LPFC_IO_CMD_OUTSTANDING; | ||
752 | } | ||
753 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
725 | return 0; /* done for now */ | 754 | return 0; /* done for now */ |
726 | else if (rc == IOCB_BUSY) | 755 | } else if (rc == IOCB_BUSY) { |
727 | rc = -EAGAIN; | 756 | rc = -EAGAIN; |
728 | else | 757 | } else { |
729 | rc = -EIO; | 758 | rc = -EIO; |
759 | } | ||
730 | 760 | ||
731 | linkdown_err: | 761 | /* iocb failed so cleanup */ |
762 | job->dd_data = NULL; | ||
732 | 763 | ||
764 | linkdown_err: | ||
733 | cmdiocbq->context1 = ndlp; | 765 | cmdiocbq->context1 = ndlp; |
734 | lpfc_els_free_iocb(phba, cmdiocbq); | 766 | lpfc_els_free_iocb(phba, cmdiocbq); |
735 | 767 | ||
@@ -1249,7 +1281,7 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job) | |||
1249 | struct lpfc_hba *phba = vport->phba; | 1281 | struct lpfc_hba *phba = vport->phba; |
1250 | struct get_ct_event *event_req; | 1282 | struct get_ct_event *event_req; |
1251 | struct get_ct_event_reply *event_reply; | 1283 | struct get_ct_event_reply *event_reply; |
1252 | struct lpfc_bsg_event *evt; | 1284 | struct lpfc_bsg_event *evt, *evt_next; |
1253 | struct event_data *evt_dat = NULL; | 1285 | struct event_data *evt_dat = NULL; |
1254 | unsigned long flags; | 1286 | unsigned long flags; |
1255 | uint32_t rc = 0; | 1287 | uint32_t rc = 0; |
@@ -1269,7 +1301,7 @@ lpfc_bsg_hba_get_event(struct fc_bsg_job *job) | |||
1269 | event_reply = (struct get_ct_event_reply *) | 1301 | event_reply = (struct get_ct_event_reply *) |
1270 | job->reply->reply_data.vendor_reply.vendor_rsp; | 1302 | job->reply->reply_data.vendor_reply.vendor_rsp; |
1271 | spin_lock_irqsave(&phba->ct_ev_lock, flags); | 1303 | spin_lock_irqsave(&phba->ct_ev_lock, flags); |
1272 | list_for_each_entry(evt, &phba->ct_ev_waiters, node) { | 1304 | list_for_each_entry_safe(evt, evt_next, &phba->ct_ev_waiters, node) { |
1273 | if (evt->reg_id == event_req->ev_reg_id) { | 1305 | if (evt->reg_id == event_req->ev_reg_id) { |
1274 | if (list_empty(&evt->events_to_get)) | 1306 | if (list_empty(&evt->events_to_get)) |
1275 | break; | 1307 | break; |
@@ -1370,6 +1402,11 @@ lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba, | |||
1370 | } | 1402 | } |
1371 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | 1403 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
1372 | 1404 | ||
1405 | /* Close the timeout handler abort window */ | ||
1406 | spin_lock_irqsave(&phba->hbalock, flags); | ||
1407 | cmdiocbq->iocb_flag &= ~LPFC_IO_CMD_OUTSTANDING; | ||
1408 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
1409 | |||
1373 | ndlp = dd_data->context_un.iocb.ndlp; | 1410 | ndlp = dd_data->context_un.iocb.ndlp; |
1374 | cmp = cmdiocbq->context2; | 1411 | cmp = cmdiocbq->context2; |
1375 | bmp = cmdiocbq->context3; | 1412 | bmp = cmdiocbq->context3; |
@@ -1433,6 +1470,7 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, | |||
1433 | int rc = 0; | 1470 | int rc = 0; |
1434 | struct lpfc_nodelist *ndlp = NULL; | 1471 | struct lpfc_nodelist *ndlp = NULL; |
1435 | struct bsg_job_data *dd_data; | 1472 | struct bsg_job_data *dd_data; |
1473 | unsigned long flags; | ||
1436 | uint32_t creg_val; | 1474 | uint32_t creg_val; |
1437 | 1475 | ||
1438 | /* allocate our bsg tracking structure */ | 1476 | /* allocate our bsg tracking structure */ |
@@ -1542,8 +1580,19 @@ lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag, | |||
1542 | 1580 | ||
1543 | rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); | 1581 | rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0); |
1544 | 1582 | ||
1545 | if (rc == IOCB_SUCCESS) | 1583 | if (rc == IOCB_SUCCESS) { |
1584 | spin_lock_irqsave(&phba->hbalock, flags); | ||
1585 | /* make sure the I/O had not been completed/released */ | ||
1586 | if (ctiocb->iocb_flag & LPFC_IO_LIBDFC) { | ||
1587 | /* open up abort window to timeout handler */ | ||
1588 | ctiocb->iocb_flag |= LPFC_IO_CMD_OUTSTANDING; | ||
1589 | } | ||
1590 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
1546 | return 0; /* done for now */ | 1591 | return 0; /* done for now */ |
1592 | } | ||
1593 | |||
1594 | /* iocb failed so cleanup */ | ||
1595 | job->dd_data = NULL; | ||
1547 | 1596 | ||
1548 | issue_ct_rsp_exit: | 1597 | issue_ct_rsp_exit: |
1549 | lpfc_sli_release_iocbq(phba, ctiocb); | 1598 | lpfc_sli_release_iocbq(phba, ctiocb); |
@@ -5284,9 +5333,15 @@ lpfc_bsg_timeout(struct fc_bsg_job *job) | |||
5284 | * remove it from the txq queue and call cancel iocbs. | 5333 | * remove it from the txq queue and call cancel iocbs. |
5285 | * Otherwise, call abort iotag | 5334 | * Otherwise, call abort iotag |
5286 | */ | 5335 | */ |
5287 | |||
5288 | cmdiocb = dd_data->context_un.iocb.cmdiocbq; | 5336 | cmdiocb = dd_data->context_un.iocb.cmdiocbq; |
5289 | spin_lock_irq(&phba->hbalock); | 5337 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
5338 | |||
5339 | spin_lock_irqsave(&phba->hbalock, flags); | ||
5340 | /* make sure the I/O abort window is still open */ | ||
5341 | if (!(cmdiocb->iocb_flag & LPFC_IO_CMD_OUTSTANDING)) { | ||
5342 | spin_unlock_irqrestore(&phba->hbalock, flags); | ||
5343 | return -EAGAIN; | ||
5344 | } | ||
5290 | list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, | 5345 | list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, |
5291 | list) { | 5346 | list) { |
5292 | if (check_iocb == cmdiocb) { | 5347 | if (check_iocb == cmdiocb) { |
@@ -5296,8 +5351,7 @@ lpfc_bsg_timeout(struct fc_bsg_job *job) | |||
5296 | } | 5351 | } |
5297 | if (list_empty(&completions)) | 5352 | if (list_empty(&completions)) |
5298 | lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); | 5353 | lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); |
5299 | spin_unlock_irq(&phba->hbalock); | 5354 | spin_unlock_irqrestore(&phba->hbalock, flags); |
5300 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
5301 | if (!list_empty(&completions)) { | 5355 | if (!list_empty(&completions)) { |
5302 | lpfc_sli_cancel_iocbs(phba, &completions, | 5356 | lpfc_sli_cancel_iocbs(phba, &completions, |
5303 | IOSTAT_LOCAL_REJECT, | 5357 | IOSTAT_LOCAL_REJECT, |
@@ -5321,9 +5375,10 @@ lpfc_bsg_timeout(struct fc_bsg_job *job) | |||
5321 | * remove it from the txq queue and call cancel iocbs. | 5375 | * remove it from the txq queue and call cancel iocbs. |
5322 | * Otherwise, call abort iotag. | 5376 | * Otherwise, call abort iotag. |
5323 | */ | 5377 | */ |
5324 | |||
5325 | cmdiocb = dd_data->context_un.menlo.cmdiocbq; | 5378 | cmdiocb = dd_data->context_un.menlo.cmdiocbq; |
5326 | spin_lock_irq(&phba->hbalock); | 5379 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); |
5380 | |||
5381 | spin_lock_irqsave(&phba->hbalock, flags); | ||
5327 | list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, | 5382 | list_for_each_entry_safe(check_iocb, next_iocb, &pring->txq, |
5328 | list) { | 5383 | list) { |
5329 | if (check_iocb == cmdiocb) { | 5384 | if (check_iocb == cmdiocb) { |
@@ -5333,8 +5388,7 @@ lpfc_bsg_timeout(struct fc_bsg_job *job) | |||
5333 | } | 5388 | } |
5334 | if (list_empty(&completions)) | 5389 | if (list_empty(&completions)) |
5335 | lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); | 5390 | lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb); |
5336 | spin_unlock_irq(&phba->hbalock); | 5391 | spin_unlock_irqrestore(&phba->hbalock, flags); |
5337 | spin_unlock_irqrestore(&phba->ct_ev_lock, flags); | ||
5338 | if (!list_empty(&completions)) { | 5392 | if (!list_empty(&completions)) { |
5339 | lpfc_sli_cancel_iocbs(phba, &completions, | 5393 | lpfc_sli_cancel_iocbs(phba, &completions, |
5340 | IOSTAT_LOCAL_REJECT, | 5394 | IOSTAT_LOCAL_REJECT, |
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 60d6ca2f68c2..7801601aa5d9 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c | |||
@@ -4437,6 +4437,7 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) | |||
4437 | if (!ndlp) | 4437 | if (!ndlp) |
4438 | return; | 4438 | return; |
4439 | lpfc_issue_els_logo(vport, ndlp, 0); | 4439 | lpfc_issue_els_logo(vport, ndlp, 0); |
4440 | mempool_free(pmb, phba->mbox_mem_pool); | ||
4440 | } | 4441 | } |
4441 | 4442 | ||
4442 | /* | 4443 | /* |
@@ -4456,7 +4457,15 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) | |||
4456 | int rc; | 4457 | int rc; |
4457 | uint16_t rpi; | 4458 | uint16_t rpi; |
4458 | 4459 | ||
4459 | if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { | 4460 | if (ndlp->nlp_flag & NLP_RPI_REGISTERED || |
4461 | ndlp->nlp_flag & NLP_REG_LOGIN_SEND) { | ||
4462 | if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) | ||
4463 | lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI, | ||
4464 | "3366 RPI x%x needs to be " | ||
4465 | "unregistered nlp_flag x%x " | ||
4466 | "did x%x\n", | ||
4467 | ndlp->nlp_rpi, ndlp->nlp_flag, | ||
4468 | ndlp->nlp_DID); | ||
4460 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); | 4469 | mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); |
4461 | if (mbox) { | 4470 | if (mbox) { |
4462 | /* SLI4 ports require the physical rpi value. */ | 4471 | /* SLI4 ports require the physical rpi value. */ |
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 501147c4a147..647f5bfb3bd3 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c | |||
@@ -3031,10 +3031,10 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) | |||
3031 | phba->sli4_hba.scsi_xri_max); | 3031 | phba->sli4_hba.scsi_xri_max); |
3032 | 3032 | ||
3033 | spin_lock_irq(&phba->scsi_buf_list_get_lock); | 3033 | spin_lock_irq(&phba->scsi_buf_list_get_lock); |
3034 | spin_lock_irq(&phba->scsi_buf_list_put_lock); | 3034 | spin_lock(&phba->scsi_buf_list_put_lock); |
3035 | list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list); | 3035 | list_splice_init(&phba->lpfc_scsi_buf_list_get, &scsi_sgl_list); |
3036 | list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list); | 3036 | list_splice(&phba->lpfc_scsi_buf_list_put, &scsi_sgl_list); |
3037 | spin_unlock_irq(&phba->scsi_buf_list_put_lock); | 3037 | spin_unlock(&phba->scsi_buf_list_put_lock); |
3038 | spin_unlock_irq(&phba->scsi_buf_list_get_lock); | 3038 | spin_unlock_irq(&phba->scsi_buf_list_get_lock); |
3039 | 3039 | ||
3040 | if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) { | 3040 | if (phba->sli4_hba.scsi_xri_cnt > phba->sli4_hba.scsi_xri_max) { |
@@ -3070,10 +3070,10 @@ lpfc_sli4_xri_sgl_update(struct lpfc_hba *phba) | |||
3070 | psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; | 3070 | psb->cur_iocbq.sli4_xritag = phba->sli4_hba.xri_ids[lxri]; |
3071 | } | 3071 | } |
3072 | spin_lock_irq(&phba->scsi_buf_list_get_lock); | 3072 | spin_lock_irq(&phba->scsi_buf_list_get_lock); |
3073 | spin_lock_irq(&phba->scsi_buf_list_put_lock); | 3073 | spin_lock(&phba->scsi_buf_list_put_lock); |
3074 | list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get); | 3074 | list_splice_init(&scsi_sgl_list, &phba->lpfc_scsi_buf_list_get); |
3075 | INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); | 3075 | INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); |
3076 | spin_unlock_irq(&phba->scsi_buf_list_put_lock); | 3076 | spin_unlock(&phba->scsi_buf_list_put_lock); |
3077 | spin_unlock_irq(&phba->scsi_buf_list_get_lock); | 3077 | spin_unlock_irq(&phba->scsi_buf_list_get_lock); |
3078 | 3078 | ||
3079 | return 0; | 3079 | return 0; |
@@ -4859,6 +4859,9 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) | |||
4859 | struct lpfc_mqe *mqe; | 4859 | struct lpfc_mqe *mqe; |
4860 | int longs; | 4860 | int longs; |
4861 | 4861 | ||
4862 | /* Get all the module params for configuring this host */ | ||
4863 | lpfc_get_cfgparam(phba); | ||
4864 | |||
4862 | /* Before proceed, wait for POST done and device ready */ | 4865 | /* Before proceed, wait for POST done and device ready */ |
4863 | rc = lpfc_sli4_post_status_check(phba); | 4866 | rc = lpfc_sli4_post_status_check(phba); |
4864 | if (rc) | 4867 | if (rc) |
@@ -4902,15 +4905,6 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) | |||
4902 | sizeof(struct lpfc_mbox_ext_buf_ctx)); | 4905 | sizeof(struct lpfc_mbox_ext_buf_ctx)); |
4903 | INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); | 4906 | INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list); |
4904 | 4907 | ||
4905 | /* | ||
4906 | * We need to do a READ_CONFIG mailbox command here before | ||
4907 | * calling lpfc_get_cfgparam. For VFs this will report the | ||
4908 | * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings. | ||
4909 | * All of the resources allocated | ||
4910 | * for this Port are tied to these values. | ||
4911 | */ | ||
4912 | /* Get all the module params for configuring this host */ | ||
4913 | lpfc_get_cfgparam(phba); | ||
4914 | phba->max_vpi = LPFC_MAX_VPI; | 4908 | phba->max_vpi = LPFC_MAX_VPI; |
4915 | 4909 | ||
4916 | /* This will be set to correct value after the read_config mbox */ | 4910 | /* This will be set to correct value after the read_config mbox */ |
@@ -7141,19 +7135,6 @@ lpfc_sli4_queue_destroy(struct lpfc_hba *phba) | |||
7141 | phba->sli4_hba.fcp_wq = NULL; | 7135 | phba->sli4_hba.fcp_wq = NULL; |
7142 | } | 7136 | } |
7143 | 7137 | ||
7144 | if (phba->pci_bar0_memmap_p) { | ||
7145 | iounmap(phba->pci_bar0_memmap_p); | ||
7146 | phba->pci_bar0_memmap_p = NULL; | ||
7147 | } | ||
7148 | if (phba->pci_bar2_memmap_p) { | ||
7149 | iounmap(phba->pci_bar2_memmap_p); | ||
7150 | phba->pci_bar2_memmap_p = NULL; | ||
7151 | } | ||
7152 | if (phba->pci_bar4_memmap_p) { | ||
7153 | iounmap(phba->pci_bar4_memmap_p); | ||
7154 | phba->pci_bar4_memmap_p = NULL; | ||
7155 | } | ||
7156 | |||
7157 | /* Release FCP CQ mapping array */ | 7138 | /* Release FCP CQ mapping array */ |
7158 | if (phba->sli4_hba.fcp_cq_map != NULL) { | 7139 | if (phba->sli4_hba.fcp_cq_map != NULL) { |
7159 | kfree(phba->sli4_hba.fcp_cq_map); | 7140 | kfree(phba->sli4_hba.fcp_cq_map); |
@@ -7942,9 +7923,9 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) | |||
7942 | * particular PCI BARs regions is dependent on the type of | 7923 | * particular PCI BARs regions is dependent on the type of |
7943 | * SLI4 device. | 7924 | * SLI4 device. |
7944 | */ | 7925 | */ |
7945 | if (pci_resource_start(pdev, 0)) { | 7926 | if (pci_resource_start(pdev, PCI_64BIT_BAR0)) { |
7946 | phba->pci_bar0_map = pci_resource_start(pdev, 0); | 7927 | phba->pci_bar0_map = pci_resource_start(pdev, PCI_64BIT_BAR0); |
7947 | bar0map_len = pci_resource_len(pdev, 0); | 7928 | bar0map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); |
7948 | 7929 | ||
7949 | /* | 7930 | /* |
7950 | * Map SLI4 PCI Config Space Register base to a kernel virtual | 7931 | * Map SLI4 PCI Config Space Register base to a kernel virtual |
@@ -7958,6 +7939,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) | |||
7958 | "registers.\n"); | 7939 | "registers.\n"); |
7959 | goto out; | 7940 | goto out; |
7960 | } | 7941 | } |
7942 | phba->pci_bar0_memmap_p = phba->sli4_hba.conf_regs_memmap_p; | ||
7961 | /* Set up BAR0 PCI config space register memory map */ | 7943 | /* Set up BAR0 PCI config space register memory map */ |
7962 | lpfc_sli4_bar0_register_memmap(phba, if_type); | 7944 | lpfc_sli4_bar0_register_memmap(phba, if_type); |
7963 | } else { | 7945 | } else { |
@@ -7980,13 +7962,13 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) | |||
7980 | } | 7962 | } |
7981 | 7963 | ||
7982 | if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && | 7964 | if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && |
7983 | (pci_resource_start(pdev, 2))) { | 7965 | (pci_resource_start(pdev, PCI_64BIT_BAR2))) { |
7984 | /* | 7966 | /* |
7985 | * Map SLI4 if type 0 HBA Control Register base to a kernel | 7967 | * Map SLI4 if type 0 HBA Control Register base to a kernel |
7986 | * virtual address and setup the registers. | 7968 | * virtual address and setup the registers. |
7987 | */ | 7969 | */ |
7988 | phba->pci_bar1_map = pci_resource_start(pdev, 2); | 7970 | phba->pci_bar1_map = pci_resource_start(pdev, PCI_64BIT_BAR2); |
7989 | bar1map_len = pci_resource_len(pdev, 2); | 7971 | bar1map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); |
7990 | phba->sli4_hba.ctrl_regs_memmap_p = | 7972 | phba->sli4_hba.ctrl_regs_memmap_p = |
7991 | ioremap(phba->pci_bar1_map, bar1map_len); | 7973 | ioremap(phba->pci_bar1_map, bar1map_len); |
7992 | if (!phba->sli4_hba.ctrl_regs_memmap_p) { | 7974 | if (!phba->sli4_hba.ctrl_regs_memmap_p) { |
@@ -7994,17 +7976,18 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) | |||
7994 | "ioremap failed for SLI4 HBA control registers.\n"); | 7976 | "ioremap failed for SLI4 HBA control registers.\n"); |
7995 | goto out_iounmap_conf; | 7977 | goto out_iounmap_conf; |
7996 | } | 7978 | } |
7979 | phba->pci_bar2_memmap_p = phba->sli4_hba.ctrl_regs_memmap_p; | ||
7997 | lpfc_sli4_bar1_register_memmap(phba); | 7980 | lpfc_sli4_bar1_register_memmap(phba); |
7998 | } | 7981 | } |
7999 | 7982 | ||
8000 | if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && | 7983 | if ((if_type == LPFC_SLI_INTF_IF_TYPE_0) && |
8001 | (pci_resource_start(pdev, 4))) { | 7984 | (pci_resource_start(pdev, PCI_64BIT_BAR4))) { |
8002 | /* | 7985 | /* |
8003 | * Map SLI4 if type 0 HBA Doorbell Register base to a kernel | 7986 | * Map SLI4 if type 0 HBA Doorbell Register base to a kernel |
8004 | * virtual address and setup the registers. | 7987 | * virtual address and setup the registers. |
8005 | */ | 7988 | */ |
8006 | phba->pci_bar2_map = pci_resource_start(pdev, 4); | 7989 | phba->pci_bar2_map = pci_resource_start(pdev, PCI_64BIT_BAR4); |
8007 | bar2map_len = pci_resource_len(pdev, 4); | 7990 | bar2map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); |
8008 | phba->sli4_hba.drbl_regs_memmap_p = | 7991 | phba->sli4_hba.drbl_regs_memmap_p = |
8009 | ioremap(phba->pci_bar2_map, bar2map_len); | 7992 | ioremap(phba->pci_bar2_map, bar2map_len); |
8010 | if (!phba->sli4_hba.drbl_regs_memmap_p) { | 7993 | if (!phba->sli4_hba.drbl_regs_memmap_p) { |
@@ -8012,6 +7995,7 @@ lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba) | |||
8012 | "ioremap failed for SLI4 HBA doorbell registers.\n"); | 7995 | "ioremap failed for SLI4 HBA doorbell registers.\n"); |
8013 | goto out_iounmap_ctrl; | 7996 | goto out_iounmap_ctrl; |
8014 | } | 7997 | } |
7998 | phba->pci_bar4_memmap_p = phba->sli4_hba.drbl_regs_memmap_p; | ||
8015 | error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); | 7999 | error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0); |
8016 | if (error) | 8000 | if (error) |
8017 | goto out_iounmap_all; | 8001 | goto out_iounmap_all; |
@@ -8405,7 +8389,8 @@ static int | |||
8405 | lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) | 8389 | lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) |
8406 | { | 8390 | { |
8407 | int i, idx, saved_chann, used_chann, cpu, phys_id; | 8391 | int i, idx, saved_chann, used_chann, cpu, phys_id; |
8408 | int max_phys_id, num_io_channel, first_cpu; | 8392 | int max_phys_id, min_phys_id; |
8393 | int num_io_channel, first_cpu, chan; | ||
8409 | struct lpfc_vector_map_info *cpup; | 8394 | struct lpfc_vector_map_info *cpup; |
8410 | #ifdef CONFIG_X86 | 8395 | #ifdef CONFIG_X86 |
8411 | struct cpuinfo_x86 *cpuinfo; | 8396 | struct cpuinfo_x86 *cpuinfo; |
@@ -8423,6 +8408,7 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) | |||
8423 | phba->sli4_hba.num_present_cpu)); | 8408 | phba->sli4_hba.num_present_cpu)); |
8424 | 8409 | ||
8425 | max_phys_id = 0; | 8410 | max_phys_id = 0; |
8411 | min_phys_id = 0xff; | ||
8426 | phys_id = 0; | 8412 | phys_id = 0; |
8427 | num_io_channel = 0; | 8413 | num_io_channel = 0; |
8428 | first_cpu = LPFC_VECTOR_MAP_EMPTY; | 8414 | first_cpu = LPFC_VECTOR_MAP_EMPTY; |
@@ -8446,9 +8432,12 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) | |||
8446 | 8432 | ||
8447 | if (cpup->phys_id > max_phys_id) | 8433 | if (cpup->phys_id > max_phys_id) |
8448 | max_phys_id = cpup->phys_id; | 8434 | max_phys_id = cpup->phys_id; |
8435 | if (cpup->phys_id < min_phys_id) | ||
8436 | min_phys_id = cpup->phys_id; | ||
8449 | cpup++; | 8437 | cpup++; |
8450 | } | 8438 | } |
8451 | 8439 | ||
8440 | phys_id = min_phys_id; | ||
8452 | /* Now associate the HBA vectors with specific CPUs */ | 8441 | /* Now associate the HBA vectors with specific CPUs */ |
8453 | for (idx = 0; idx < vectors; idx++) { | 8442 | for (idx = 0; idx < vectors; idx++) { |
8454 | cpup = phba->sli4_hba.cpu_map; | 8443 | cpup = phba->sli4_hba.cpu_map; |
@@ -8459,13 +8448,25 @@ lpfc_sli4_set_affinity(struct lpfc_hba *phba, int vectors) | |||
8459 | for (i = 1; i < max_phys_id; i++) { | 8448 | for (i = 1; i < max_phys_id; i++) { |
8460 | phys_id++; | 8449 | phys_id++; |
8461 | if (phys_id > max_phys_id) | 8450 | if (phys_id > max_phys_id) |
8462 | phys_id = 0; | 8451 | phys_id = min_phys_id; |
8463 | cpu = lpfc_find_next_cpu(phba, phys_id); | 8452 | cpu = lpfc_find_next_cpu(phba, phys_id); |
8464 | if (cpu == LPFC_VECTOR_MAP_EMPTY) | 8453 | if (cpu == LPFC_VECTOR_MAP_EMPTY) |
8465 | continue; | 8454 | continue; |
8466 | goto found; | 8455 | goto found; |
8467 | } | 8456 | } |
8468 | 8457 | ||
8458 | /* Use round robin for scheduling */ | ||
8459 | phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_ROUND_ROBIN; | ||
8460 | chan = 0; | ||
8461 | cpup = phba->sli4_hba.cpu_map; | ||
8462 | for (i = 0; i < phba->sli4_hba.num_present_cpu; i++) { | ||
8463 | cpup->channel_id = chan; | ||
8464 | cpup++; | ||
8465 | chan++; | ||
8466 | if (chan >= phba->cfg_fcp_io_channel) | ||
8467 | chan = 0; | ||
8468 | } | ||
8469 | |||
8469 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 8470 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
8470 | "3329 Cannot set affinity:" | 8471 | "3329 Cannot set affinity:" |
8471 | "Error mapping vector %d (%d)\n", | 8472 | "Error mapping vector %d (%d)\n", |
@@ -8503,7 +8504,7 @@ found: | |||
8503 | /* Spread vector mapping across multple physical CPU nodes */ | 8504 | /* Spread vector mapping across multple physical CPU nodes */ |
8504 | phys_id++; | 8505 | phys_id++; |
8505 | if (phys_id > max_phys_id) | 8506 | if (phys_id > max_phys_id) |
8506 | phys_id = 0; | 8507 | phys_id = min_phys_id; |
8507 | } | 8508 | } |
8508 | 8509 | ||
8509 | /* | 8510 | /* |
@@ -8513,7 +8514,7 @@ found: | |||
8513 | * Base the remaining IO channel assigned, to IO channels already | 8514 | * Base the remaining IO channel assigned, to IO channels already |
8514 | * assigned to other CPUs on the same phys_id. | 8515 | * assigned to other CPUs on the same phys_id. |
8515 | */ | 8516 | */ |
8516 | for (i = 0; i <= max_phys_id; i++) { | 8517 | for (i = min_phys_id; i <= max_phys_id; i++) { |
8517 | /* | 8518 | /* |
8518 | * If there are no io channels already mapped to | 8519 | * If there are no io channels already mapped to |
8519 | * this phys_id, just round robin thru the io_channels. | 8520 | * this phys_id, just round robin thru the io_channels. |
@@ -8595,10 +8596,11 @@ out: | |||
8595 | if (num_io_channel != phba->sli4_hba.num_present_cpu) | 8596 | if (num_io_channel != phba->sli4_hba.num_present_cpu) |
8596 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, | 8597 | lpfc_printf_log(phba, KERN_ERR, LOG_INIT, |
8597 | "3333 Set affinity mismatch:" | 8598 | "3333 Set affinity mismatch:" |
8598 | "%d chann != %d cpus: %d vactors\n", | 8599 | "%d chann != %d cpus: %d vectors\n", |
8599 | num_io_channel, phba->sli4_hba.num_present_cpu, | 8600 | num_io_channel, phba->sli4_hba.num_present_cpu, |
8600 | vectors); | 8601 | vectors); |
8601 | 8602 | ||
8603 | /* Enable using cpu affinity for scheduling */ | ||
8602 | phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU; | 8604 | phba->cfg_fcp_io_sched = LPFC_FCP_SCHED_BY_CPU; |
8603 | return 1; | 8605 | return 1; |
8604 | } | 8606 | } |
@@ -8689,9 +8691,12 @@ enable_msix_vectors: | |||
8689 | 8691 | ||
8690 | cfg_fail_out: | 8692 | cfg_fail_out: |
8691 | /* free the irq already requested */ | 8693 | /* free the irq already requested */ |
8692 | for (--index; index >= 0; index--) | 8694 | for (--index; index >= 0; index--) { |
8695 | irq_set_affinity_hint(phba->sli4_hba.msix_entries[index]. | ||
8696 | vector, NULL); | ||
8693 | free_irq(phba->sli4_hba.msix_entries[index].vector, | 8697 | free_irq(phba->sli4_hba.msix_entries[index].vector, |
8694 | &phba->sli4_hba.fcp_eq_hdl[index]); | 8698 | &phba->sli4_hba.fcp_eq_hdl[index]); |
8699 | } | ||
8695 | 8700 | ||
8696 | msi_fail_out: | 8701 | msi_fail_out: |
8697 | /* Unconfigure MSI-X capability structure */ | 8702 | /* Unconfigure MSI-X capability structure */ |
@@ -8712,9 +8717,12 @@ lpfc_sli4_disable_msix(struct lpfc_hba *phba) | |||
8712 | int index; | 8717 | int index; |
8713 | 8718 | ||
8714 | /* Free up MSI-X multi-message vectors */ | 8719 | /* Free up MSI-X multi-message vectors */ |
8715 | for (index = 0; index < phba->cfg_fcp_io_channel; index++) | 8720 | for (index = 0; index < phba->cfg_fcp_io_channel; index++) { |
8721 | irq_set_affinity_hint(phba->sli4_hba.msix_entries[index]. | ||
8722 | vector, NULL); | ||
8716 | free_irq(phba->sli4_hba.msix_entries[index].vector, | 8723 | free_irq(phba->sli4_hba.msix_entries[index].vector, |
8717 | &phba->sli4_hba.fcp_eq_hdl[index]); | 8724 | &phba->sli4_hba.fcp_eq_hdl[index]); |
8725 | } | ||
8718 | 8726 | ||
8719 | /* Disable MSI-X */ | 8727 | /* Disable MSI-X */ |
8720 | pci_disable_msix(phba->pcidev); | 8728 | pci_disable_msix(phba->pcidev); |
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 1242b6c4308b..c913e8cc3b26 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c | |||
@@ -926,10 +926,10 @@ lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba) | |||
926 | 926 | ||
927 | /* get all SCSI buffers need to repost to a local list */ | 927 | /* get all SCSI buffers need to repost to a local list */ |
928 | spin_lock_irq(&phba->scsi_buf_list_get_lock); | 928 | spin_lock_irq(&phba->scsi_buf_list_get_lock); |
929 | spin_lock_irq(&phba->scsi_buf_list_put_lock); | 929 | spin_lock(&phba->scsi_buf_list_put_lock); |
930 | list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist); | 930 | list_splice_init(&phba->lpfc_scsi_buf_list_get, &post_sblist); |
931 | list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist); | 931 | list_splice(&phba->lpfc_scsi_buf_list_put, &post_sblist); |
932 | spin_unlock_irq(&phba->scsi_buf_list_put_lock); | 932 | spin_unlock(&phba->scsi_buf_list_put_lock); |
933 | spin_unlock_irq(&phba->scsi_buf_list_get_lock); | 933 | spin_unlock_irq(&phba->scsi_buf_list_get_lock); |
934 | 934 | ||
935 | /* post the list of scsi buffer sgls to port if available */ | 935 | /* post the list of scsi buffer sgls to port if available */ |
@@ -1000,9 +1000,12 @@ lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc) | |||
1000 | } | 1000 | } |
1001 | memset(psb->data, 0, phba->cfg_sg_dma_buf_size); | 1001 | memset(psb->data, 0, phba->cfg_sg_dma_buf_size); |
1002 | 1002 | ||
1003 | /* Page alignment is CRITICAL, double check to be sure */ | 1003 | /* |
1004 | if (((unsigned long)(psb->data) & | 1004 | * 4K Page alignment is CRITICAL to BlockGuard, double check |
1005 | (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0) { | 1005 | * to be sure. |
1006 | */ | ||
1007 | if (phba->cfg_enable_bg && (((unsigned long)(psb->data) & | ||
1008 | (unsigned long)(SLI4_PAGE_SIZE - 1)) != 0)) { | ||
1006 | pci_pool_free(phba->lpfc_scsi_dma_buf_pool, | 1009 | pci_pool_free(phba->lpfc_scsi_dma_buf_pool, |
1007 | psb->data, psb->dma_handle); | 1010 | psb->data, psb->dma_handle); |
1008 | kfree(psb); | 1011 | kfree(psb); |
@@ -1134,22 +1137,21 @@ lpfc_get_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) | |||
1134 | { | 1137 | { |
1135 | struct lpfc_scsi_buf * lpfc_cmd = NULL; | 1138 | struct lpfc_scsi_buf * lpfc_cmd = NULL; |
1136 | struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get; | 1139 | struct list_head *scsi_buf_list_get = &phba->lpfc_scsi_buf_list_get; |
1137 | unsigned long gflag = 0; | 1140 | unsigned long iflag = 0; |
1138 | unsigned long pflag = 0; | ||
1139 | 1141 | ||
1140 | spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag); | 1142 | spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag); |
1141 | list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf, | 1143 | list_remove_head(scsi_buf_list_get, lpfc_cmd, struct lpfc_scsi_buf, |
1142 | list); | 1144 | list); |
1143 | if (!lpfc_cmd) { | 1145 | if (!lpfc_cmd) { |
1144 | spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag); | 1146 | spin_lock(&phba->scsi_buf_list_put_lock); |
1145 | list_splice(&phba->lpfc_scsi_buf_list_put, | 1147 | list_splice(&phba->lpfc_scsi_buf_list_put, |
1146 | &phba->lpfc_scsi_buf_list_get); | 1148 | &phba->lpfc_scsi_buf_list_get); |
1147 | INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); | 1149 | INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); |
1148 | list_remove_head(scsi_buf_list_get, lpfc_cmd, | 1150 | list_remove_head(scsi_buf_list_get, lpfc_cmd, |
1149 | struct lpfc_scsi_buf, list); | 1151 | struct lpfc_scsi_buf, list); |
1150 | spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag); | 1152 | spin_unlock(&phba->scsi_buf_list_put_lock); |
1151 | } | 1153 | } |
1152 | spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag); | 1154 | spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag); |
1153 | return lpfc_cmd; | 1155 | return lpfc_cmd; |
1154 | } | 1156 | } |
1155 | /** | 1157 | /** |
@@ -1167,11 +1169,10 @@ static struct lpfc_scsi_buf* | |||
1167 | lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) | 1169 | lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) |
1168 | { | 1170 | { |
1169 | struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next; | 1171 | struct lpfc_scsi_buf *lpfc_cmd, *lpfc_cmd_next; |
1170 | unsigned long gflag = 0; | 1172 | unsigned long iflag = 0; |
1171 | unsigned long pflag = 0; | ||
1172 | int found = 0; | 1173 | int found = 0; |
1173 | 1174 | ||
1174 | spin_lock_irqsave(&phba->scsi_buf_list_get_lock, gflag); | 1175 | spin_lock_irqsave(&phba->scsi_buf_list_get_lock, iflag); |
1175 | list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, | 1176 | list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, |
1176 | &phba->lpfc_scsi_buf_list_get, list) { | 1177 | &phba->lpfc_scsi_buf_list_get, list) { |
1177 | if (lpfc_test_rrq_active(phba, ndlp, | 1178 | if (lpfc_test_rrq_active(phba, ndlp, |
@@ -1182,11 +1183,11 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) | |||
1182 | break; | 1183 | break; |
1183 | } | 1184 | } |
1184 | if (!found) { | 1185 | if (!found) { |
1185 | spin_lock_irqsave(&phba->scsi_buf_list_put_lock, pflag); | 1186 | spin_lock(&phba->scsi_buf_list_put_lock); |
1186 | list_splice(&phba->lpfc_scsi_buf_list_put, | 1187 | list_splice(&phba->lpfc_scsi_buf_list_put, |
1187 | &phba->lpfc_scsi_buf_list_get); | 1188 | &phba->lpfc_scsi_buf_list_get); |
1188 | INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); | 1189 | INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list_put); |
1189 | spin_unlock_irqrestore(&phba->scsi_buf_list_put_lock, pflag); | 1190 | spin_unlock(&phba->scsi_buf_list_put_lock); |
1190 | list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, | 1191 | list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next, |
1191 | &phba->lpfc_scsi_buf_list_get, list) { | 1192 | &phba->lpfc_scsi_buf_list_get, list) { |
1192 | if (lpfc_test_rrq_active( | 1193 | if (lpfc_test_rrq_active( |
@@ -1197,7 +1198,7 @@ lpfc_get_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) | |||
1197 | break; | 1198 | break; |
1198 | } | 1199 | } |
1199 | } | 1200 | } |
1200 | spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, gflag); | 1201 | spin_unlock_irqrestore(&phba->scsi_buf_list_get_lock, iflag); |
1201 | if (!found) | 1202 | if (!found) |
1202 | return NULL; | 1203 | return NULL; |
1203 | return lpfc_cmd; | 1204 | return lpfc_cmd; |
@@ -3966,11 +3967,11 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
3966 | 3967 | ||
3967 | /* | 3968 | /* |
3968 | * Check SLI validation that all the transfer was actually done | 3969 | * Check SLI validation that all the transfer was actually done |
3969 | * (fcpi_parm should be zero). | 3970 | * (fcpi_parm should be zero). Apply check only to reads. |
3970 | */ | 3971 | */ |
3971 | } else if (fcpi_parm) { | 3972 | } else if (fcpi_parm && (cmnd->sc_data_direction == DMA_FROM_DEVICE)) { |
3972 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, | 3973 | lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP | LOG_FCP_ERROR, |
3973 | "9029 FCP Data Transfer Check Error: " | 3974 | "9029 FCP Read Check Error Data: " |
3974 | "x%x x%x x%x x%x x%x\n", | 3975 | "x%x x%x x%x x%x x%x\n", |
3975 | be32_to_cpu(fcpcmd->fcpDl), | 3976 | be32_to_cpu(fcpcmd->fcpDl), |
3976 | be32_to_cpu(fcprsp->rspResId), | 3977 | be32_to_cpu(fcprsp->rspResId), |
@@ -4342,6 +4343,7 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
4342 | char tag[2]; | 4343 | char tag[2]; |
4343 | uint8_t *ptr; | 4344 | uint8_t *ptr; |
4344 | bool sli4; | 4345 | bool sli4; |
4346 | uint32_t fcpdl; | ||
4345 | 4347 | ||
4346 | if (!pnode || !NLP_CHK_NODE_ACT(pnode)) | 4348 | if (!pnode || !NLP_CHK_NODE_ACT(pnode)) |
4347 | return; | 4349 | return; |
@@ -4389,8 +4391,12 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd, | |||
4389 | iocb_cmd->ulpPU = PARM_READ_CHECK; | 4391 | iocb_cmd->ulpPU = PARM_READ_CHECK; |
4390 | if (vport->cfg_first_burst_size && | 4392 | if (vport->cfg_first_burst_size && |
4391 | (pnode->nlp_flag & NLP_FIRSTBURST)) { | 4393 | (pnode->nlp_flag & NLP_FIRSTBURST)) { |
4392 | piocbq->iocb.un.fcpi.fcpi_XRdy = | 4394 | fcpdl = scsi_bufflen(scsi_cmnd); |
4393 | vport->cfg_first_burst_size; | 4395 | if (fcpdl < vport->cfg_first_burst_size) |
4396 | piocbq->iocb.un.fcpi.fcpi_XRdy = fcpdl; | ||
4397 | else | ||
4398 | piocbq->iocb.un.fcpi.fcpi_XRdy = | ||
4399 | vport->cfg_first_burst_size; | ||
4394 | } | 4400 | } |
4395 | fcp_cmnd->fcpCntl3 = WRITE_DATA; | 4401 | fcp_cmnd->fcpCntl3 = WRITE_DATA; |
4396 | phba->fc4OutputRequests++; | 4402 | phba->fc4OutputRequests++; |
@@ -4878,6 +4884,9 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd) | |||
4878 | goto out_unlock; | 4884 | goto out_unlock; |
4879 | } | 4885 | } |
4880 | 4886 | ||
4887 | /* Indicate the IO is being aborted by the driver. */ | ||
4888 | iocb->iocb_flag |= LPFC_DRIVER_ABORTED; | ||
4889 | |||
4881 | /* | 4890 | /* |
4882 | * The scsi command can not be in txq and it is in flight because the | 4891 | * The scsi command can not be in txq and it is in flight because the |
4883 | * pCmd is still pointig at the SCSI command we have to abort. There | 4892 | * pCmd is still pointig at the SCSI command we have to abort. There |
@@ -5006,7 +5015,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct lpfc_rport_data *rdata, | |||
5006 | lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode); | 5015 | lpfc_cmd = lpfc_get_scsi_buf(phba, rdata->pnode); |
5007 | if (lpfc_cmd == NULL) | 5016 | if (lpfc_cmd == NULL) |
5008 | return FAILED; | 5017 | return FAILED; |
5009 | lpfc_cmd->timeout = 60; | 5018 | lpfc_cmd->timeout = phba->cfg_task_mgmt_tmo; |
5010 | lpfc_cmd->rdata = rdata; | 5019 | lpfc_cmd->rdata = rdata; |
5011 | 5020 | ||
5012 | status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id, | 5021 | status = lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun_id, |
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 0392e114531c..612f48973ff2 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c | |||
@@ -9831,6 +9831,13 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, | |||
9831 | abort_cmd) != 0) | 9831 | abort_cmd) != 0) |
9832 | continue; | 9832 | continue; |
9833 | 9833 | ||
9834 | /* | ||
9835 | * If the iocbq is already being aborted, don't take a second | ||
9836 | * action, but do count it. | ||
9837 | */ | ||
9838 | if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED) | ||
9839 | continue; | ||
9840 | |||
9834 | /* issue ABTS for this IOCB based on iotag */ | 9841 | /* issue ABTS for this IOCB based on iotag */ |
9835 | abtsiocb = lpfc_sli_get_iocbq(phba); | 9842 | abtsiocb = lpfc_sli_get_iocbq(phba); |
9836 | if (abtsiocb == NULL) { | 9843 | if (abtsiocb == NULL) { |
@@ -9838,6 +9845,9 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, | |||
9838 | continue; | 9845 | continue; |
9839 | } | 9846 | } |
9840 | 9847 | ||
9848 | /* indicate the IO is being aborted by the driver. */ | ||
9849 | iocbq->iocb_flag |= LPFC_DRIVER_ABORTED; | ||
9850 | |||
9841 | cmd = &iocbq->iocb; | 9851 | cmd = &iocbq->iocb; |
9842 | abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; | 9852 | abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS; |
9843 | abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; | 9853 | abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext; |
@@ -9847,7 +9857,7 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring, | |||
9847 | abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; | 9857 | abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag; |
9848 | abtsiocb->iocb.ulpLe = 1; | 9858 | abtsiocb->iocb.ulpLe = 1; |
9849 | abtsiocb->iocb.ulpClass = cmd->ulpClass; | 9859 | abtsiocb->iocb.ulpClass = cmd->ulpClass; |
9850 | abtsiocb->vport = phba->pport; | 9860 | abtsiocb->vport = vport; |
9851 | 9861 | ||
9852 | /* ABTS WQE must go to the same WQ as the WQE to be aborted */ | 9862 | /* ABTS WQE must go to the same WQ as the WQE to be aborted */ |
9853 | abtsiocb->fcp_wqidx = iocbq->fcp_wqidx; | 9863 | abtsiocb->fcp_wqidx = iocbq->fcp_wqidx; |
@@ -12233,7 +12243,6 @@ static void __iomem * | |||
12233 | lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) | 12243 | lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) |
12234 | { | 12244 | { |
12235 | struct pci_dev *pdev; | 12245 | struct pci_dev *pdev; |
12236 | unsigned long bar_map, bar_map_len; | ||
12237 | 12246 | ||
12238 | if (!phba->pcidev) | 12247 | if (!phba->pcidev) |
12239 | return NULL; | 12248 | return NULL; |
@@ -12242,25 +12251,10 @@ lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset) | |||
12242 | 12251 | ||
12243 | switch (pci_barset) { | 12252 | switch (pci_barset) { |
12244 | case WQ_PCI_BAR_0_AND_1: | 12253 | case WQ_PCI_BAR_0_AND_1: |
12245 | if (!phba->pci_bar0_memmap_p) { | ||
12246 | bar_map = pci_resource_start(pdev, PCI_64BIT_BAR0); | ||
12247 | bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR0); | ||
12248 | phba->pci_bar0_memmap_p = ioremap(bar_map, bar_map_len); | ||
12249 | } | ||
12250 | return phba->pci_bar0_memmap_p; | 12254 | return phba->pci_bar0_memmap_p; |
12251 | case WQ_PCI_BAR_2_AND_3: | 12255 | case WQ_PCI_BAR_2_AND_3: |
12252 | if (!phba->pci_bar2_memmap_p) { | ||
12253 | bar_map = pci_resource_start(pdev, PCI_64BIT_BAR2); | ||
12254 | bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR2); | ||
12255 | phba->pci_bar2_memmap_p = ioremap(bar_map, bar_map_len); | ||
12256 | } | ||
12257 | return phba->pci_bar2_memmap_p; | 12256 | return phba->pci_bar2_memmap_p; |
12258 | case WQ_PCI_BAR_4_AND_5: | 12257 | case WQ_PCI_BAR_4_AND_5: |
12259 | if (!phba->pci_bar4_memmap_p) { | ||
12260 | bar_map = pci_resource_start(pdev, PCI_64BIT_BAR4); | ||
12261 | bar_map_len = pci_resource_len(pdev, PCI_64BIT_BAR4); | ||
12262 | phba->pci_bar4_memmap_p = ioremap(bar_map, bar_map_len); | ||
12263 | } | ||
12264 | return phba->pci_bar4_memmap_p; | 12258 | return phba->pci_bar4_memmap_p; |
12265 | default: | 12259 | default: |
12266 | break; | 12260 | break; |
@@ -15808,7 +15802,7 @@ lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index) | |||
15808 | void | 15802 | void |
15809 | lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) | 15803 | lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) |
15810 | { | 15804 | { |
15811 | struct lpfc_fcf_pri *fcf_pri; | 15805 | struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next; |
15812 | if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { | 15806 | if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) { |
15813 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP, | 15807 | lpfc_printf_log(phba, KERN_ERR, LOG_FIP, |
15814 | "2762 FCF (x%x) reached driver's book " | 15808 | "2762 FCF (x%x) reached driver's book " |
@@ -15818,7 +15812,8 @@ lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index) | |||
15818 | } | 15812 | } |
15819 | /* Clear the eligible FCF record index bmask */ | 15813 | /* Clear the eligible FCF record index bmask */ |
15820 | spin_lock_irq(&phba->hbalock); | 15814 | spin_lock_irq(&phba->hbalock); |
15821 | list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) { | 15815 | list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list, |
15816 | list) { | ||
15822 | if (fcf_pri->fcf_rec.fcf_index == fcf_index) { | 15817 | if (fcf_pri->fcf_rec.fcf_index == fcf_index) { |
15823 | list_del_init(&fcf_pri->list); | 15818 | list_del_init(&fcf_pri->list); |
15824 | break; | 15819 | break; |
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h index 97617996206d..6b0f2478706e 100644 --- a/drivers/scsi/lpfc/lpfc_sli.h +++ b/drivers/scsi/lpfc/lpfc_sli.h | |||
@@ -58,7 +58,7 @@ struct lpfc_iocbq { | |||
58 | 58 | ||
59 | IOCB_t iocb; /* IOCB cmd */ | 59 | IOCB_t iocb; /* IOCB cmd */ |
60 | uint8_t retry; /* retry counter for IOCB cmd - if needed */ | 60 | uint8_t retry; /* retry counter for IOCB cmd - if needed */ |
61 | uint16_t iocb_flag; | 61 | uint32_t iocb_flag; |
62 | #define LPFC_IO_LIBDFC 1 /* libdfc iocb */ | 62 | #define LPFC_IO_LIBDFC 1 /* libdfc iocb */ |
63 | #define LPFC_IO_WAKE 2 /* Synchronous I/O completed */ | 63 | #define LPFC_IO_WAKE 2 /* Synchronous I/O completed */ |
64 | #define LPFC_IO_WAKE_TMO LPFC_IO_WAKE /* Synchronous I/O timed out */ | 64 | #define LPFC_IO_WAKE_TMO LPFC_IO_WAKE /* Synchronous I/O timed out */ |
@@ -73,11 +73,11 @@ struct lpfc_iocbq { | |||
73 | #define LPFC_IO_DIF_PASS 0x400 /* T10 DIF IO pass-thru prot */ | 73 | #define LPFC_IO_DIF_PASS 0x400 /* T10 DIF IO pass-thru prot */ |
74 | #define LPFC_IO_DIF_STRIP 0x800 /* T10 DIF IO strip prot */ | 74 | #define LPFC_IO_DIF_STRIP 0x800 /* T10 DIF IO strip prot */ |
75 | #define LPFC_IO_DIF_INSERT 0x1000 /* T10 DIF IO insert prot */ | 75 | #define LPFC_IO_DIF_INSERT 0x1000 /* T10 DIF IO insert prot */ |
76 | #define LPFC_IO_CMD_OUTSTANDING 0x2000 /* timeout handler abort window */ | ||
76 | 77 | ||
77 | #define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */ | 78 | #define LPFC_FIP_ELS_ID_MASK 0xc000 /* ELS_ID range 0-3, non-shifted mask */ |
78 | #define LPFC_FIP_ELS_ID_SHIFT 14 | 79 | #define LPFC_FIP_ELS_ID_SHIFT 14 |
79 | 80 | ||
80 | uint8_t rsvd2; | ||
81 | uint32_t drvrTimeout; /* driver timeout in seconds */ | 81 | uint32_t drvrTimeout; /* driver timeout in seconds */ |
82 | uint32_t fcp_wqidx; /* index to FCP work queue */ | 82 | uint32_t fcp_wqidx; /* index to FCP work queue */ |
83 | struct lpfc_vport *vport;/* virtual port pointer */ | 83 | struct lpfc_vport *vport;/* virtual port pointer */ |
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h index 5bcc38223ac9..85120b77aa0e 100644 --- a/drivers/scsi/lpfc/lpfc_sli4.h +++ b/drivers/scsi/lpfc/lpfc_sli4.h | |||
@@ -523,7 +523,7 @@ struct lpfc_sli4_hba { | |||
523 | struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */ | 523 | struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */ |
524 | struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */ | 524 | struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */ |
525 | 525 | ||
526 | uint8_t fw_func_mode; /* FW function protocol mode */ | 526 | uint32_t fw_func_mode; /* FW function protocol mode */ |
527 | uint32_t ulp0_mode; /* ULP0 protocol mode */ | 527 | uint32_t ulp0_mode; /* ULP0 protocol mode */ |
528 | uint32_t ulp1_mode; /* ULP1 protocol mode */ | 528 | uint32_t ulp1_mode; /* ULP1 protocol mode */ |
529 | 529 | ||
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 21859d2006ce..f58f18342bc3 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h | |||
@@ -18,7 +18,7 @@ | |||
18 | * included with this package. * | 18 | * included with this package. * |
19 | *******************************************************************/ | 19 | *******************************************************************/ |
20 | 20 | ||
21 | #define LPFC_DRIVER_VERSION "8.3.41" | 21 | #define LPFC_DRIVER_VERSION "8.3.42" |
22 | #define LPFC_DRIVER_NAME "lpfc" | 22 | #define LPFC_DRIVER_NAME "lpfc" |
23 | 23 | ||
24 | /* Used for SLI 2/3 */ | 24 | /* Used for SLI 2/3 */ |
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h index 04a42a505852..0c73ba4bf451 100644 --- a/drivers/scsi/megaraid/megaraid_sas.h +++ b/drivers/scsi/megaraid/megaraid_sas.h | |||
@@ -33,9 +33,9 @@ | |||
33 | /* | 33 | /* |
34 | * MegaRAID SAS Driver meta data | 34 | * MegaRAID SAS Driver meta data |
35 | */ | 35 | */ |
36 | #define MEGASAS_VERSION "06.600.18.00-rc1" | 36 | #define MEGASAS_VERSION "06.700.06.00-rc1" |
37 | #define MEGASAS_RELDATE "May. 15, 2013" | 37 | #define MEGASAS_RELDATE "Aug. 31, 2013" |
38 | #define MEGASAS_EXT_VERSION "Wed. May. 15 17:00:00 PDT 2013" | 38 | #define MEGASAS_EXT_VERSION "Sat. Aug. 31 17:00:00 PDT 2013" |
39 | 39 | ||
40 | /* | 40 | /* |
41 | * Device IDs | 41 | * Device IDs |
@@ -170,6 +170,7 @@ | |||
170 | 170 | ||
171 | #define MR_DCMD_CTRL_GET_INFO 0x01010000 | 171 | #define MR_DCMD_CTRL_GET_INFO 0x01010000 |
172 | #define MR_DCMD_LD_GET_LIST 0x03010000 | 172 | #define MR_DCMD_LD_GET_LIST 0x03010000 |
173 | #define MR_DCMD_LD_LIST_QUERY 0x03010100 | ||
173 | 174 | ||
174 | #define MR_DCMD_CTRL_CACHE_FLUSH 0x01101000 | 175 | #define MR_DCMD_CTRL_CACHE_FLUSH 0x01101000 |
175 | #define MR_FLUSH_CTRL_CACHE 0x01 | 176 | #define MR_FLUSH_CTRL_CACHE 0x01 |
@@ -345,6 +346,15 @@ enum MR_PD_QUERY_TYPE { | |||
345 | MR_PD_QUERY_TYPE_EXPOSED_TO_HOST = 5, | 346 | MR_PD_QUERY_TYPE_EXPOSED_TO_HOST = 5, |
346 | }; | 347 | }; |
347 | 348 | ||
349 | enum MR_LD_QUERY_TYPE { | ||
350 | MR_LD_QUERY_TYPE_ALL = 0, | ||
351 | MR_LD_QUERY_TYPE_EXPOSED_TO_HOST = 1, | ||
352 | MR_LD_QUERY_TYPE_USED_TGT_IDS = 2, | ||
353 | MR_LD_QUERY_TYPE_CLUSTER_ACCESS = 3, | ||
354 | MR_LD_QUERY_TYPE_CLUSTER_LOCALE = 4, | ||
355 | }; | ||
356 | |||
357 | |||
348 | #define MR_EVT_CFG_CLEARED 0x0004 | 358 | #define MR_EVT_CFG_CLEARED 0x0004 |
349 | #define MR_EVT_LD_STATE_CHANGE 0x0051 | 359 | #define MR_EVT_LD_STATE_CHANGE 0x0051 |
350 | #define MR_EVT_PD_INSERTED 0x005b | 360 | #define MR_EVT_PD_INSERTED 0x005b |
@@ -435,6 +445,14 @@ struct MR_LD_LIST { | |||
435 | } ldList[MAX_LOGICAL_DRIVES]; | 445 | } ldList[MAX_LOGICAL_DRIVES]; |
436 | } __packed; | 446 | } __packed; |
437 | 447 | ||
448 | struct MR_LD_TARGETID_LIST { | ||
449 | u32 size; | ||
450 | u32 count; | ||
451 | u8 pad[3]; | ||
452 | u8 targetId[MAX_LOGICAL_DRIVES]; | ||
453 | }; | ||
454 | |||
455 | |||
438 | /* | 456 | /* |
439 | * SAS controller properties | 457 | * SAS controller properties |
440 | */ | 458 | */ |
@@ -474,21 +492,39 @@ struct megasas_ctrl_prop { | |||
474 | * a bit in the following structure. | 492 | * a bit in the following structure. |
475 | */ | 493 | */ |
476 | struct { | 494 | struct { |
477 | u32 copyBackDisabled : 1; | 495 | #if defined(__BIG_ENDIAN_BITFIELD) |
478 | u32 SMARTerEnabled : 1; | 496 | u32 reserved:18; |
479 | u32 prCorrectUnconfiguredAreas : 1; | 497 | u32 enableJBOD:1; |
480 | u32 useFdeOnly : 1; | 498 | u32 disableSpinDownHS:1; |
481 | u32 disableNCQ : 1; | 499 | u32 allowBootWithPinnedCache:1; |
482 | u32 SSDSMARTerEnabled : 1; | 500 | u32 disableOnlineCtrlReset:1; |
483 | u32 SSDPatrolReadEnabled : 1; | 501 | u32 enableSecretKeyControl:1; |
484 | u32 enableSpinDownUnconfigured : 1; | 502 | u32 autoEnhancedImport:1; |
485 | u32 autoEnhancedImport : 1; | 503 | u32 enableSpinDownUnconfigured:1; |
486 | u32 enableSecretKeyControl : 1; | 504 | u32 SSDPatrolReadEnabled:1; |
487 | u32 disableOnlineCtrlReset : 1; | 505 | u32 SSDSMARTerEnabled:1; |
488 | u32 allowBootWithPinnedCache : 1; | 506 | u32 disableNCQ:1; |
489 | u32 disableSpinDownHS : 1; | 507 | u32 useFdeOnly:1; |
490 | u32 enableJBOD : 1; | 508 | u32 prCorrectUnconfiguredAreas:1; |
491 | u32 reserved :18; | 509 | u32 SMARTerEnabled:1; |
510 | u32 copyBackDisabled:1; | ||
511 | #else | ||
512 | u32 copyBackDisabled:1; | ||
513 | u32 SMARTerEnabled:1; | ||
514 | u32 prCorrectUnconfiguredAreas:1; | ||
515 | u32 useFdeOnly:1; | ||
516 | u32 disableNCQ:1; | ||
517 | u32 SSDSMARTerEnabled:1; | ||
518 | u32 SSDPatrolReadEnabled:1; | ||
519 | u32 enableSpinDownUnconfigured:1; | ||
520 | u32 autoEnhancedImport:1; | ||
521 | u32 enableSecretKeyControl:1; | ||
522 | u32 disableOnlineCtrlReset:1; | ||
523 | u32 allowBootWithPinnedCache:1; | ||
524 | u32 disableSpinDownHS:1; | ||
525 | u32 enableJBOD:1; | ||
526 | u32 reserved:18; | ||
527 | #endif | ||
492 | } OnOffProperties; | 528 | } OnOffProperties; |
493 | u8 autoSnapVDSpace; | 529 | u8 autoSnapVDSpace; |
494 | u8 viewSpace; | 530 | u8 viewSpace; |
@@ -802,6 +838,30 @@ struct megasas_ctrl_info { | |||
802 | u16 cacheMemorySize; /*7A2h */ | 838 | u16 cacheMemorySize; /*7A2h */ |
803 | 839 | ||
804 | struct { /*7A4h */ | 840 | struct { /*7A4h */ |
841 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
842 | u32 reserved:11; | ||
843 | u32 supportUnevenSpans:1; | ||
844 | u32 dedicatedHotSparesLimited:1; | ||
845 | u32 headlessMode:1; | ||
846 | u32 supportEmulatedDrives:1; | ||
847 | u32 supportResetNow:1; | ||
848 | u32 realTimeScheduler:1; | ||
849 | u32 supportSSDPatrolRead:1; | ||
850 | u32 supportPerfTuning:1; | ||
851 | u32 disableOnlinePFKChange:1; | ||
852 | u32 supportJBOD:1; | ||
853 | u32 supportBootTimePFKChange:1; | ||
854 | u32 supportSetLinkSpeed:1; | ||
855 | u32 supportEmergencySpares:1; | ||
856 | u32 supportSuspendResumeBGops:1; | ||
857 | u32 blockSSDWriteCacheChange:1; | ||
858 | u32 supportShieldState:1; | ||
859 | u32 supportLdBBMInfo:1; | ||
860 | u32 supportLdPIType3:1; | ||
861 | u32 supportLdPIType2:1; | ||
862 | u32 supportLdPIType1:1; | ||
863 | u32 supportPIcontroller:1; | ||
864 | #else | ||
805 | u32 supportPIcontroller:1; | 865 | u32 supportPIcontroller:1; |
806 | u32 supportLdPIType1:1; | 866 | u32 supportLdPIType1:1; |
807 | u32 supportLdPIType2:1; | 867 | u32 supportLdPIType2:1; |
@@ -827,6 +887,7 @@ struct megasas_ctrl_info { | |||
827 | 887 | ||
828 | u32 supportUnevenSpans:1; | 888 | u32 supportUnevenSpans:1; |
829 | u32 reserved:11; | 889 | u32 reserved:11; |
890 | #endif | ||
830 | } adapterOperations2; | 891 | } adapterOperations2; |
831 | 892 | ||
832 | u8 driverVersion[32]; /*7A8h */ | 893 | u8 driverVersion[32]; /*7A8h */ |
@@ -863,7 +924,7 @@ struct megasas_ctrl_info { | |||
863 | * =============================== | 924 | * =============================== |
864 | */ | 925 | */ |
865 | #define MEGASAS_MAX_PD_CHANNELS 2 | 926 | #define MEGASAS_MAX_PD_CHANNELS 2 |
866 | #define MEGASAS_MAX_LD_CHANNELS 2 | 927 | #define MEGASAS_MAX_LD_CHANNELS 1 |
867 | #define MEGASAS_MAX_CHANNELS (MEGASAS_MAX_PD_CHANNELS + \ | 928 | #define MEGASAS_MAX_CHANNELS (MEGASAS_MAX_PD_CHANNELS + \ |
868 | MEGASAS_MAX_LD_CHANNELS) | 929 | MEGASAS_MAX_LD_CHANNELS) |
869 | #define MEGASAS_MAX_DEV_PER_CHANNEL 128 | 930 | #define MEGASAS_MAX_DEV_PER_CHANNEL 128 |
@@ -1051,9 +1112,15 @@ union megasas_sgl_frame { | |||
1051 | 1112 | ||
1052 | typedef union _MFI_CAPABILITIES { | 1113 | typedef union _MFI_CAPABILITIES { |
1053 | struct { | 1114 | struct { |
1115 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
1116 | u32 reserved:30; | ||
1117 | u32 support_additional_msix:1; | ||
1118 | u32 support_fp_remote_lun:1; | ||
1119 | #else | ||
1054 | u32 support_fp_remote_lun:1; | 1120 | u32 support_fp_remote_lun:1; |
1055 | u32 support_additional_msix:1; | 1121 | u32 support_additional_msix:1; |
1056 | u32 reserved:30; | 1122 | u32 reserved:30; |
1123 | #endif | ||
1057 | } mfi_capabilities; | 1124 | } mfi_capabilities; |
1058 | u32 reg; | 1125 | u32 reg; |
1059 | } MFI_CAPABILITIES; | 1126 | } MFI_CAPABILITIES; |
@@ -1656,4 +1723,16 @@ struct megasas_mgmt_info { | |||
1656 | int max_index; | 1723 | int max_index; |
1657 | }; | 1724 | }; |
1658 | 1725 | ||
1726 | u8 | ||
1727 | MR_BuildRaidContext(struct megasas_instance *instance, | ||
1728 | struct IO_REQUEST_INFO *io_info, | ||
1729 | struct RAID_CONTEXT *pRAID_Context, | ||
1730 | struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN); | ||
1731 | u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map); | ||
1732 | struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map); | ||
1733 | u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map); | ||
1734 | u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map); | ||
1735 | u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map); | ||
1736 | u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map); | ||
1737 | |||
1659 | #endif /*LSI_MEGARAID_SAS_H */ | 1738 | #endif /*LSI_MEGARAID_SAS_H */ |
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 1f0ca68409d4..3020921a4746 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c | |||
@@ -18,7 +18,7 @@ | |||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
19 | * | 19 | * |
20 | * FILE: megaraid_sas_base.c | 20 | * FILE: megaraid_sas_base.c |
21 | * Version : 06.600.18.00-rc1 | 21 | * Version : 06.700.06.00-rc1 |
22 | * | 22 | * |
23 | * Authors: LSI Corporation | 23 | * Authors: LSI Corporation |
24 | * Sreenivas Bagalkote | 24 | * Sreenivas Bagalkote |
@@ -92,6 +92,8 @@ MODULE_DESCRIPTION("LSI MegaRAID SAS Driver"); | |||
92 | 92 | ||
93 | int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); | 93 | int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); |
94 | static int megasas_get_pd_list(struct megasas_instance *instance); | 94 | static int megasas_get_pd_list(struct megasas_instance *instance); |
95 | static int megasas_ld_list_query(struct megasas_instance *instance, | ||
96 | u8 query_type); | ||
95 | static int megasas_issue_init_mfi(struct megasas_instance *instance); | 97 | static int megasas_issue_init_mfi(struct megasas_instance *instance); |
96 | static int megasas_register_aen(struct megasas_instance *instance, | 98 | static int megasas_register_aen(struct megasas_instance *instance, |
97 | u32 seq_num, u32 class_locale_word); | 99 | u32 seq_num, u32 class_locale_word); |
@@ -374,13 +376,11 @@ static int | |||
374 | megasas_check_reset_xscale(struct megasas_instance *instance, | 376 | megasas_check_reset_xscale(struct megasas_instance *instance, |
375 | struct megasas_register_set __iomem *regs) | 377 | struct megasas_register_set __iomem *regs) |
376 | { | 378 | { |
377 | u32 consumer; | ||
378 | consumer = *instance->consumer; | ||
379 | 379 | ||
380 | if ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) && | 380 | if ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) && |
381 | (*instance->consumer == MEGASAS_ADPRESET_INPROG_SIGN)) { | 381 | (le32_to_cpu(*instance->consumer) == |
382 | MEGASAS_ADPRESET_INPROG_SIGN)) | ||
382 | return 1; | 383 | return 1; |
383 | } | ||
384 | return 0; | 384 | return 0; |
385 | } | 385 | } |
386 | 386 | ||
@@ -629,9 +629,10 @@ megasas_fire_cmd_skinny(struct megasas_instance *instance, | |||
629 | { | 629 | { |
630 | unsigned long flags; | 630 | unsigned long flags; |
631 | spin_lock_irqsave(&instance->hba_lock, flags); | 631 | spin_lock_irqsave(&instance->hba_lock, flags); |
632 | writel(0, &(regs)->inbound_high_queue_port); | 632 | writel(upper_32_bits(frame_phys_addr), |
633 | writel((frame_phys_addr | (frame_count<<1))|1, | 633 | &(regs)->inbound_high_queue_port); |
634 | &(regs)->inbound_low_queue_port); | 634 | writel((lower_32_bits(frame_phys_addr) | (frame_count<<1))|1, |
635 | &(regs)->inbound_low_queue_port); | ||
635 | spin_unlock_irqrestore(&instance->hba_lock, flags); | 636 | spin_unlock_irqrestore(&instance->hba_lock, flags); |
636 | } | 637 | } |
637 | 638 | ||
@@ -879,8 +880,8 @@ megasas_issue_polled(struct megasas_instance *instance, struct megasas_cmd *cmd) | |||
879 | 880 | ||
880 | struct megasas_header *frame_hdr = &cmd->frame->hdr; | 881 | struct megasas_header *frame_hdr = &cmd->frame->hdr; |
881 | 882 | ||
882 | frame_hdr->cmd_status = 0xFF; | 883 | frame_hdr->cmd_status = MFI_CMD_STATUS_POLL_MODE; |
883 | frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; | 884 | frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); |
884 | 885 | ||
885 | /* | 886 | /* |
886 | * Issue the frame using inbound queue port | 887 | * Issue the frame using inbound queue port |
@@ -944,10 +945,12 @@ megasas_issue_blocked_abort_cmd(struct megasas_instance *instance, | |||
944 | */ | 945 | */ |
945 | abort_fr->cmd = MFI_CMD_ABORT; | 946 | abort_fr->cmd = MFI_CMD_ABORT; |
946 | abort_fr->cmd_status = 0xFF; | 947 | abort_fr->cmd_status = 0xFF; |
947 | abort_fr->flags = 0; | 948 | abort_fr->flags = cpu_to_le16(0); |
948 | abort_fr->abort_context = cmd_to_abort->index; | 949 | abort_fr->abort_context = cpu_to_le32(cmd_to_abort->index); |
949 | abort_fr->abort_mfi_phys_addr_lo = cmd_to_abort->frame_phys_addr; | 950 | abort_fr->abort_mfi_phys_addr_lo = |
950 | abort_fr->abort_mfi_phys_addr_hi = 0; | 951 | cpu_to_le32(lower_32_bits(cmd_to_abort->frame_phys_addr)); |
952 | abort_fr->abort_mfi_phys_addr_hi = | ||
953 | cpu_to_le32(upper_32_bits(cmd_to_abort->frame_phys_addr)); | ||
951 | 954 | ||
952 | cmd->sync_cmd = 1; | 955 | cmd->sync_cmd = 1; |
953 | cmd->cmd_status = 0xFF; | 956 | cmd->cmd_status = 0xFF; |
@@ -986,8 +989,8 @@ megasas_make_sgl32(struct megasas_instance *instance, struct scsi_cmnd *scp, | |||
986 | 989 | ||
987 | if (sge_count) { | 990 | if (sge_count) { |
988 | scsi_for_each_sg(scp, os_sgl, sge_count, i) { | 991 | scsi_for_each_sg(scp, os_sgl, sge_count, i) { |
989 | mfi_sgl->sge32[i].length = sg_dma_len(os_sgl); | 992 | mfi_sgl->sge32[i].length = cpu_to_le32(sg_dma_len(os_sgl)); |
990 | mfi_sgl->sge32[i].phys_addr = sg_dma_address(os_sgl); | 993 | mfi_sgl->sge32[i].phys_addr = cpu_to_le32(sg_dma_address(os_sgl)); |
991 | } | 994 | } |
992 | } | 995 | } |
993 | return sge_count; | 996 | return sge_count; |
@@ -1015,8 +1018,8 @@ megasas_make_sgl64(struct megasas_instance *instance, struct scsi_cmnd *scp, | |||
1015 | 1018 | ||
1016 | if (sge_count) { | 1019 | if (sge_count) { |
1017 | scsi_for_each_sg(scp, os_sgl, sge_count, i) { | 1020 | scsi_for_each_sg(scp, os_sgl, sge_count, i) { |
1018 | mfi_sgl->sge64[i].length = sg_dma_len(os_sgl); | 1021 | mfi_sgl->sge64[i].length = cpu_to_le32(sg_dma_len(os_sgl)); |
1019 | mfi_sgl->sge64[i].phys_addr = sg_dma_address(os_sgl); | 1022 | mfi_sgl->sge64[i].phys_addr = cpu_to_le64(sg_dma_address(os_sgl)); |
1020 | } | 1023 | } |
1021 | } | 1024 | } |
1022 | return sge_count; | 1025 | return sge_count; |
@@ -1043,10 +1046,11 @@ megasas_make_sgl_skinny(struct megasas_instance *instance, | |||
1043 | 1046 | ||
1044 | if (sge_count) { | 1047 | if (sge_count) { |
1045 | scsi_for_each_sg(scp, os_sgl, sge_count, i) { | 1048 | scsi_for_each_sg(scp, os_sgl, sge_count, i) { |
1046 | mfi_sgl->sge_skinny[i].length = sg_dma_len(os_sgl); | 1049 | mfi_sgl->sge_skinny[i].length = |
1050 | cpu_to_le32(sg_dma_len(os_sgl)); | ||
1047 | mfi_sgl->sge_skinny[i].phys_addr = | 1051 | mfi_sgl->sge_skinny[i].phys_addr = |
1048 | sg_dma_address(os_sgl); | 1052 | cpu_to_le64(sg_dma_address(os_sgl)); |
1049 | mfi_sgl->sge_skinny[i].flag = 0; | 1053 | mfi_sgl->sge_skinny[i].flag = cpu_to_le32(0); |
1050 | } | 1054 | } |
1051 | } | 1055 | } |
1052 | return sge_count; | 1056 | return sge_count; |
@@ -1155,8 +1159,8 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, | |||
1155 | pthru->cdb_len = scp->cmd_len; | 1159 | pthru->cdb_len = scp->cmd_len; |
1156 | pthru->timeout = 0; | 1160 | pthru->timeout = 0; |
1157 | pthru->pad_0 = 0; | 1161 | pthru->pad_0 = 0; |
1158 | pthru->flags = flags; | 1162 | pthru->flags = cpu_to_le16(flags); |
1159 | pthru->data_xfer_len = scsi_bufflen(scp); | 1163 | pthru->data_xfer_len = cpu_to_le32(scsi_bufflen(scp)); |
1160 | 1164 | ||
1161 | memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); | 1165 | memcpy(pthru->cdb, scp->cmnd, scp->cmd_len); |
1162 | 1166 | ||
@@ -1168,18 +1172,18 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, | |||
1168 | if ((scp->request->timeout / HZ) > 0xFFFF) | 1172 | if ((scp->request->timeout / HZ) > 0xFFFF) |
1169 | pthru->timeout = 0xFFFF; | 1173 | pthru->timeout = 0xFFFF; |
1170 | else | 1174 | else |
1171 | pthru->timeout = scp->request->timeout / HZ; | 1175 | pthru->timeout = cpu_to_le16(scp->request->timeout / HZ); |
1172 | } | 1176 | } |
1173 | 1177 | ||
1174 | /* | 1178 | /* |
1175 | * Construct SGL | 1179 | * Construct SGL |
1176 | */ | 1180 | */ |
1177 | if (instance->flag_ieee == 1) { | 1181 | if (instance->flag_ieee == 1) { |
1178 | pthru->flags |= MFI_FRAME_SGL64; | 1182 | pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); |
1179 | pthru->sge_count = megasas_make_sgl_skinny(instance, scp, | 1183 | pthru->sge_count = megasas_make_sgl_skinny(instance, scp, |
1180 | &pthru->sgl); | 1184 | &pthru->sgl); |
1181 | } else if (IS_DMA64) { | 1185 | } else if (IS_DMA64) { |
1182 | pthru->flags |= MFI_FRAME_SGL64; | 1186 | pthru->flags |= cpu_to_le16(MFI_FRAME_SGL64); |
1183 | pthru->sge_count = megasas_make_sgl64(instance, scp, | 1187 | pthru->sge_count = megasas_make_sgl64(instance, scp, |
1184 | &pthru->sgl); | 1188 | &pthru->sgl); |
1185 | } else | 1189 | } else |
@@ -1196,8 +1200,10 @@ megasas_build_dcdb(struct megasas_instance *instance, struct scsi_cmnd *scp, | |||
1196 | * Sense info specific | 1200 | * Sense info specific |
1197 | */ | 1201 | */ |
1198 | pthru->sense_len = SCSI_SENSE_BUFFERSIZE; | 1202 | pthru->sense_len = SCSI_SENSE_BUFFERSIZE; |
1199 | pthru->sense_buf_phys_addr_hi = 0; | 1203 | pthru->sense_buf_phys_addr_hi = |
1200 | pthru->sense_buf_phys_addr_lo = cmd->sense_phys_addr; | 1204 | cpu_to_le32(upper_32_bits(cmd->sense_phys_addr)); |
1205 | pthru->sense_buf_phys_addr_lo = | ||
1206 | cpu_to_le32(lower_32_bits(cmd->sense_phys_addr)); | ||
1201 | 1207 | ||
1202 | /* | 1208 | /* |
1203 | * Compute the total number of frames this command consumes. FW uses | 1209 | * Compute the total number of frames this command consumes. FW uses |
@@ -1248,7 +1254,7 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, | |||
1248 | ldio->timeout = 0; | 1254 | ldio->timeout = 0; |
1249 | ldio->reserved_0 = 0; | 1255 | ldio->reserved_0 = 0; |
1250 | ldio->pad_0 = 0; | 1256 | ldio->pad_0 = 0; |
1251 | ldio->flags = flags; | 1257 | ldio->flags = cpu_to_le16(flags); |
1252 | ldio->start_lba_hi = 0; | 1258 | ldio->start_lba_hi = 0; |
1253 | ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0; | 1259 | ldio->access_byte = (scp->cmd_len != 6) ? scp->cmnd[1] : 0; |
1254 | 1260 | ||
@@ -1256,52 +1262,59 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, | |||
1256 | * 6-byte READ(0x08) or WRITE(0x0A) cdb | 1262 | * 6-byte READ(0x08) or WRITE(0x0A) cdb |
1257 | */ | 1263 | */ |
1258 | if (scp->cmd_len == 6) { | 1264 | if (scp->cmd_len == 6) { |
1259 | ldio->lba_count = (u32) scp->cmnd[4]; | 1265 | ldio->lba_count = cpu_to_le32((u32) scp->cmnd[4]); |
1260 | ldio->start_lba_lo = ((u32) scp->cmnd[1] << 16) | | 1266 | ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[1] << 16) | |
1261 | ((u32) scp->cmnd[2] << 8) | (u32) scp->cmnd[3]; | 1267 | ((u32) scp->cmnd[2] << 8) | |
1268 | (u32) scp->cmnd[3]); | ||
1262 | 1269 | ||
1263 | ldio->start_lba_lo &= 0x1FFFFF; | 1270 | ldio->start_lba_lo &= cpu_to_le32(0x1FFFFF); |
1264 | } | 1271 | } |
1265 | 1272 | ||
1266 | /* | 1273 | /* |
1267 | * 10-byte READ(0x28) or WRITE(0x2A) cdb | 1274 | * 10-byte READ(0x28) or WRITE(0x2A) cdb |
1268 | */ | 1275 | */ |
1269 | else if (scp->cmd_len == 10) { | 1276 | else if (scp->cmd_len == 10) { |
1270 | ldio->lba_count = (u32) scp->cmnd[8] | | 1277 | ldio->lba_count = cpu_to_le32((u32) scp->cmnd[8] | |
1271 | ((u32) scp->cmnd[7] << 8); | 1278 | ((u32) scp->cmnd[7] << 8)); |
1272 | ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) | | 1279 | ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | |
1273 | ((u32) scp->cmnd[3] << 16) | | 1280 | ((u32) scp->cmnd[3] << 16) | |
1274 | ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; | 1281 | ((u32) scp->cmnd[4] << 8) | |
1282 | (u32) scp->cmnd[5]); | ||
1275 | } | 1283 | } |
1276 | 1284 | ||
1277 | /* | 1285 | /* |
1278 | * 12-byte READ(0xA8) or WRITE(0xAA) cdb | 1286 | * 12-byte READ(0xA8) or WRITE(0xAA) cdb |
1279 | */ | 1287 | */ |
1280 | else if (scp->cmd_len == 12) { | 1288 | else if (scp->cmd_len == 12) { |
1281 | ldio->lba_count = ((u32) scp->cmnd[6] << 24) | | 1289 | ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[6] << 24) | |
1282 | ((u32) scp->cmnd[7] << 16) | | 1290 | ((u32) scp->cmnd[7] << 16) | |
1283 | ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; | 1291 | ((u32) scp->cmnd[8] << 8) | |
1292 | (u32) scp->cmnd[9]); | ||
1284 | 1293 | ||
1285 | ldio->start_lba_lo = ((u32) scp->cmnd[2] << 24) | | 1294 | ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[2] << 24) | |
1286 | ((u32) scp->cmnd[3] << 16) | | 1295 | ((u32) scp->cmnd[3] << 16) | |
1287 | ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; | 1296 | ((u32) scp->cmnd[4] << 8) | |
1297 | (u32) scp->cmnd[5]); | ||
1288 | } | 1298 | } |
1289 | 1299 | ||
1290 | /* | 1300 | /* |
1291 | * 16-byte READ(0x88) or WRITE(0x8A) cdb | 1301 | * 16-byte READ(0x88) or WRITE(0x8A) cdb |
1292 | */ | 1302 | */ |
1293 | else if (scp->cmd_len == 16) { | 1303 | else if (scp->cmd_len == 16) { |
1294 | ldio->lba_count = ((u32) scp->cmnd[10] << 24) | | 1304 | ldio->lba_count = cpu_to_le32(((u32) scp->cmnd[10] << 24) | |
1295 | ((u32) scp->cmnd[11] << 16) | | 1305 | ((u32) scp->cmnd[11] << 16) | |
1296 | ((u32) scp->cmnd[12] << 8) | (u32) scp->cmnd[13]; | 1306 | ((u32) scp->cmnd[12] << 8) | |
1307 | (u32) scp->cmnd[13]); | ||
1297 | 1308 | ||
1298 | ldio->start_lba_lo = ((u32) scp->cmnd[6] << 24) | | 1309 | ldio->start_lba_lo = cpu_to_le32(((u32) scp->cmnd[6] << 24) | |
1299 | ((u32) scp->cmnd[7] << 16) | | 1310 | ((u32) scp->cmnd[7] << 16) | |
1300 | ((u32) scp->cmnd[8] << 8) | (u32) scp->cmnd[9]; | 1311 | ((u32) scp->cmnd[8] << 8) | |
1312 | (u32) scp->cmnd[9]); | ||
1301 | 1313 | ||
1302 | ldio->start_lba_hi = ((u32) scp->cmnd[2] << 24) | | 1314 | ldio->start_lba_hi = cpu_to_le32(((u32) scp->cmnd[2] << 24) | |
1303 | ((u32) scp->cmnd[3] << 16) | | 1315 | ((u32) scp->cmnd[3] << 16) | |
1304 | ((u32) scp->cmnd[4] << 8) | (u32) scp->cmnd[5]; | 1316 | ((u32) scp->cmnd[4] << 8) | |
1317 | (u32) scp->cmnd[5]); | ||
1305 | 1318 | ||
1306 | } | 1319 | } |
1307 | 1320 | ||
@@ -1309,11 +1322,11 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, | |||
1309 | * Construct SGL | 1322 | * Construct SGL |
1310 | */ | 1323 | */ |
1311 | if (instance->flag_ieee) { | 1324 | if (instance->flag_ieee) { |
1312 | ldio->flags |= MFI_FRAME_SGL64; | 1325 | ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); |
1313 | ldio->sge_count = megasas_make_sgl_skinny(instance, scp, | 1326 | ldio->sge_count = megasas_make_sgl_skinny(instance, scp, |
1314 | &ldio->sgl); | 1327 | &ldio->sgl); |
1315 | } else if (IS_DMA64) { | 1328 | } else if (IS_DMA64) { |
1316 | ldio->flags |= MFI_FRAME_SGL64; | 1329 | ldio->flags |= cpu_to_le16(MFI_FRAME_SGL64); |
1317 | ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl); | 1330 | ldio->sge_count = megasas_make_sgl64(instance, scp, &ldio->sgl); |
1318 | } else | 1331 | } else |
1319 | ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl); | 1332 | ldio->sge_count = megasas_make_sgl32(instance, scp, &ldio->sgl); |
@@ -1329,7 +1342,7 @@ megasas_build_ldio(struct megasas_instance *instance, struct scsi_cmnd *scp, | |||
1329 | */ | 1342 | */ |
1330 | ldio->sense_len = SCSI_SENSE_BUFFERSIZE; | 1343 | ldio->sense_len = SCSI_SENSE_BUFFERSIZE; |
1331 | ldio->sense_buf_phys_addr_hi = 0; | 1344 | ldio->sense_buf_phys_addr_hi = 0; |
1332 | ldio->sense_buf_phys_addr_lo = cmd->sense_phys_addr; | 1345 | ldio->sense_buf_phys_addr_lo = cpu_to_le32(cmd->sense_phys_addr); |
1333 | 1346 | ||
1334 | /* | 1347 | /* |
1335 | * Compute the total number of frames this command consumes. FW uses | 1348 | * Compute the total number of frames this command consumes. FW uses |
@@ -1400,20 +1413,32 @@ megasas_dump_pending_frames(struct megasas_instance *instance) | |||
1400 | ldio = (struct megasas_io_frame *)cmd->frame; | 1413 | ldio = (struct megasas_io_frame *)cmd->frame; |
1401 | mfi_sgl = &ldio->sgl; | 1414 | mfi_sgl = &ldio->sgl; |
1402 | sgcount = ldio->sge_count; | 1415 | sgcount = ldio->sge_count; |
1403 | printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",instance->host->host_no, cmd->frame_count,ldio->cmd,ldio->target_id, ldio->start_lba_lo,ldio->start_lba_hi,ldio->sense_buf_phys_addr_lo,sgcount); | 1416 | printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x," |
1417 | " lba lo : 0x%x, lba_hi : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", | ||
1418 | instance->host->host_no, cmd->frame_count, ldio->cmd, ldio->target_id, | ||
1419 | le32_to_cpu(ldio->start_lba_lo), le32_to_cpu(ldio->start_lba_hi), | ||
1420 | le32_to_cpu(ldio->sense_buf_phys_addr_lo), sgcount); | ||
1404 | } | 1421 | } |
1405 | else { | 1422 | else { |
1406 | pthru = (struct megasas_pthru_frame *) cmd->frame; | 1423 | pthru = (struct megasas_pthru_frame *) cmd->frame; |
1407 | mfi_sgl = &pthru->sgl; | 1424 | mfi_sgl = &pthru->sgl; |
1408 | sgcount = pthru->sge_count; | 1425 | sgcount = pthru->sge_count; |
1409 | printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n",instance->host->host_no,cmd->frame_count,pthru->cmd,pthru->target_id,pthru->lun,pthru->cdb_len , pthru->data_xfer_len,pthru->sense_buf_phys_addr_lo,sgcount); | 1426 | printk(KERN_ERR "megasas[%d]: frame count : 0x%x, Cmd : 0x%x, Tgt id : 0x%x, " |
1427 | "lun : 0x%x, cdb_len : 0x%x, data xfer len : 0x%x, sense_buf addr : 0x%x,sge count : 0x%x\n", | ||
1428 | instance->host->host_no, cmd->frame_count, pthru->cmd, pthru->target_id, | ||
1429 | pthru->lun, pthru->cdb_len, le32_to_cpu(pthru->data_xfer_len), | ||
1430 | le32_to_cpu(pthru->sense_buf_phys_addr_lo), sgcount); | ||
1410 | } | 1431 | } |
1411 | if(megasas_dbg_lvl & MEGASAS_DBG_LVL){ | 1432 | if(megasas_dbg_lvl & MEGASAS_DBG_LVL){ |
1412 | for (n = 0; n < sgcount; n++){ | 1433 | for (n = 0; n < sgcount; n++){ |
1413 | if (IS_DMA64) | 1434 | if (IS_DMA64) |
1414 | printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%08lx ",mfi_sgl->sge64[n].length , (unsigned long)mfi_sgl->sge64[n].phys_addr) ; | 1435 | printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%llx ", |
1436 | le32_to_cpu(mfi_sgl->sge64[n].length), | ||
1437 | le64_to_cpu(mfi_sgl->sge64[n].phys_addr)); | ||
1415 | else | 1438 | else |
1416 | printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%x ",mfi_sgl->sge32[n].length , mfi_sgl->sge32[n].phys_addr) ; | 1439 | printk(KERN_ERR "megasas: sgl len : 0x%x, sgl addr : 0x%x ", |
1440 | le32_to_cpu(mfi_sgl->sge32[n].length), | ||
1441 | le32_to_cpu(mfi_sgl->sge32[n].phys_addr)); | ||
1417 | } | 1442 | } |
1418 | } | 1443 | } |
1419 | printk(KERN_ERR "\n"); | 1444 | printk(KERN_ERR "\n"); |
@@ -1674,11 +1699,11 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr) | |||
1674 | 1699 | ||
1675 | spin_lock_irqsave(&instance->completion_lock, flags); | 1700 | spin_lock_irqsave(&instance->completion_lock, flags); |
1676 | 1701 | ||
1677 | producer = *instance->producer; | 1702 | producer = le32_to_cpu(*instance->producer); |
1678 | consumer = *instance->consumer; | 1703 | consumer = le32_to_cpu(*instance->consumer); |
1679 | 1704 | ||
1680 | while (consumer != producer) { | 1705 | while (consumer != producer) { |
1681 | context = instance->reply_queue[consumer]; | 1706 | context = le32_to_cpu(instance->reply_queue[consumer]); |
1682 | if (context >= instance->max_fw_cmds) { | 1707 | if (context >= instance->max_fw_cmds) { |
1683 | printk(KERN_ERR "Unexpected context value %x\n", | 1708 | printk(KERN_ERR "Unexpected context value %x\n", |
1684 | context); | 1709 | context); |
@@ -1695,7 +1720,7 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr) | |||
1695 | } | 1720 | } |
1696 | } | 1721 | } |
1697 | 1722 | ||
1698 | *instance->consumer = producer; | 1723 | *instance->consumer = cpu_to_le32(producer); |
1699 | 1724 | ||
1700 | spin_unlock_irqrestore(&instance->completion_lock, flags); | 1725 | spin_unlock_irqrestore(&instance->completion_lock, flags); |
1701 | 1726 | ||
@@ -1716,7 +1741,7 @@ void megasas_do_ocr(struct megasas_instance *instance) | |||
1716 | if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || | 1741 | if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) || |
1717 | (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || | 1742 | (instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) || |
1718 | (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) { | 1743 | (instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) { |
1719 | *instance->consumer = MEGASAS_ADPRESET_INPROG_SIGN; | 1744 | *instance->consumer = cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); |
1720 | } | 1745 | } |
1721 | instance->instancet->disable_intr(instance); | 1746 | instance->instancet->disable_intr(instance); |
1722 | instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT; | 1747 | instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT; |
@@ -2186,6 +2211,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, | |||
2186 | struct megasas_header *hdr = &cmd->frame->hdr; | 2211 | struct megasas_header *hdr = &cmd->frame->hdr; |
2187 | unsigned long flags; | 2212 | unsigned long flags; |
2188 | struct fusion_context *fusion = instance->ctrl_context; | 2213 | struct fusion_context *fusion = instance->ctrl_context; |
2214 | u32 opcode; | ||
2189 | 2215 | ||
2190 | /* flag for the retry reset */ | 2216 | /* flag for the retry reset */ |
2191 | cmd->retry_for_fw_reset = 0; | 2217 | cmd->retry_for_fw_reset = 0; |
@@ -2287,9 +2313,10 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, | |||
2287 | case MFI_CMD_SMP: | 2313 | case MFI_CMD_SMP: |
2288 | case MFI_CMD_STP: | 2314 | case MFI_CMD_STP: |
2289 | case MFI_CMD_DCMD: | 2315 | case MFI_CMD_DCMD: |
2316 | opcode = le32_to_cpu(cmd->frame->dcmd.opcode); | ||
2290 | /* Check for LD map update */ | 2317 | /* Check for LD map update */ |
2291 | if ((cmd->frame->dcmd.opcode == MR_DCMD_LD_MAP_GET_INFO) && | 2318 | if ((opcode == MR_DCMD_LD_MAP_GET_INFO) |
2292 | (cmd->frame->dcmd.mbox.b[1] == 1)) { | 2319 | && (cmd->frame->dcmd.mbox.b[1] == 1)) { |
2293 | fusion->fast_path_io = 0; | 2320 | fusion->fast_path_io = 0; |
2294 | spin_lock_irqsave(instance->host->host_lock, flags); | 2321 | spin_lock_irqsave(instance->host->host_lock, flags); |
2295 | if (cmd->frame->hdr.cmd_status != 0) { | 2322 | if (cmd->frame->hdr.cmd_status != 0) { |
@@ -2323,8 +2350,8 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, | |||
2323 | flags); | 2350 | flags); |
2324 | break; | 2351 | break; |
2325 | } | 2352 | } |
2326 | if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET_INFO || | 2353 | if (opcode == MR_DCMD_CTRL_EVENT_GET_INFO || |
2327 | cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_GET) { | 2354 | opcode == MR_DCMD_CTRL_EVENT_GET) { |
2328 | spin_lock_irqsave(&poll_aen_lock, flags); | 2355 | spin_lock_irqsave(&poll_aen_lock, flags); |
2329 | megasas_poll_wait_aen = 0; | 2356 | megasas_poll_wait_aen = 0; |
2330 | spin_unlock_irqrestore(&poll_aen_lock, flags); | 2357 | spin_unlock_irqrestore(&poll_aen_lock, flags); |
@@ -2333,7 +2360,7 @@ megasas_complete_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd, | |||
2333 | /* | 2360 | /* |
2334 | * See if got an event notification | 2361 | * See if got an event notification |
2335 | */ | 2362 | */ |
2336 | if (cmd->frame->dcmd.opcode == MR_DCMD_CTRL_EVENT_WAIT) | 2363 | if (opcode == MR_DCMD_CTRL_EVENT_WAIT) |
2337 | megasas_service_aen(instance, cmd); | 2364 | megasas_service_aen(instance, cmd); |
2338 | else | 2365 | else |
2339 | megasas_complete_int_cmd(instance, cmd); | 2366 | megasas_complete_int_cmd(instance, cmd); |
@@ -2606,7 +2633,7 @@ megasas_deplete_reply_queue(struct megasas_instance *instance, | |||
2606 | PCI_DEVICE_ID_LSI_VERDE_ZCR)) { | 2633 | PCI_DEVICE_ID_LSI_VERDE_ZCR)) { |
2607 | 2634 | ||
2608 | *instance->consumer = | 2635 | *instance->consumer = |
2609 | MEGASAS_ADPRESET_INPROG_SIGN; | 2636 | cpu_to_le32(MEGASAS_ADPRESET_INPROG_SIGN); |
2610 | } | 2637 | } |
2611 | 2638 | ||
2612 | 2639 | ||
@@ -2983,7 +3010,7 @@ static int megasas_create_frame_pool(struct megasas_instance *instance) | |||
2983 | } | 3010 | } |
2984 | 3011 | ||
2985 | memset(cmd->frame, 0, total_sz); | 3012 | memset(cmd->frame, 0, total_sz); |
2986 | cmd->frame->io.context = cmd->index; | 3013 | cmd->frame->io.context = cpu_to_le32(cmd->index); |
2987 | cmd->frame->io.pad_0 = 0; | 3014 | cmd->frame->io.pad_0 = 0; |
2988 | if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) && | 3015 | if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) && |
2989 | (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) && | 3016 | (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) && |
@@ -3143,13 +3170,13 @@ megasas_get_pd_list(struct megasas_instance *instance) | |||
3143 | dcmd->cmd = MFI_CMD_DCMD; | 3170 | dcmd->cmd = MFI_CMD_DCMD; |
3144 | dcmd->cmd_status = 0xFF; | 3171 | dcmd->cmd_status = 0xFF; |
3145 | dcmd->sge_count = 1; | 3172 | dcmd->sge_count = 1; |
3146 | dcmd->flags = MFI_FRAME_DIR_READ; | 3173 | dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); |
3147 | dcmd->timeout = 0; | 3174 | dcmd->timeout = 0; |
3148 | dcmd->pad_0 = 0; | 3175 | dcmd->pad_0 = 0; |
3149 | dcmd->data_xfer_len = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST); | 3176 | dcmd->data_xfer_len = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)); |
3150 | dcmd->opcode = MR_DCMD_PD_LIST_QUERY; | 3177 | dcmd->opcode = cpu_to_le32(MR_DCMD_PD_LIST_QUERY); |
3151 | dcmd->sgl.sge32[0].phys_addr = ci_h; | 3178 | dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); |
3152 | dcmd->sgl.sge32[0].length = MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST); | 3179 | dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST)); |
3153 | 3180 | ||
3154 | if (!megasas_issue_polled(instance, cmd)) { | 3181 | if (!megasas_issue_polled(instance, cmd)) { |
3155 | ret = 0; | 3182 | ret = 0; |
@@ -3164,16 +3191,16 @@ megasas_get_pd_list(struct megasas_instance *instance) | |||
3164 | pd_addr = ci->addr; | 3191 | pd_addr = ci->addr; |
3165 | 3192 | ||
3166 | if ( ret == 0 && | 3193 | if ( ret == 0 && |
3167 | (ci->count < | 3194 | (le32_to_cpu(ci->count) < |
3168 | (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) { | 3195 | (MEGASAS_MAX_PD_CHANNELS * MEGASAS_MAX_DEV_PER_CHANNEL))) { |
3169 | 3196 | ||
3170 | memset(instance->pd_list, 0, | 3197 | memset(instance->pd_list, 0, |
3171 | MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); | 3198 | MEGASAS_MAX_PD * sizeof(struct megasas_pd_list)); |
3172 | 3199 | ||
3173 | for (pd_index = 0; pd_index < ci->count; pd_index++) { | 3200 | for (pd_index = 0; pd_index < le32_to_cpu(ci->count); pd_index++) { |
3174 | 3201 | ||
3175 | instance->pd_list[pd_addr->deviceId].tid = | 3202 | instance->pd_list[pd_addr->deviceId].tid = |
3176 | pd_addr->deviceId; | 3203 | le16_to_cpu(pd_addr->deviceId); |
3177 | instance->pd_list[pd_addr->deviceId].driveType = | 3204 | instance->pd_list[pd_addr->deviceId].driveType = |
3178 | pd_addr->scsiDevType; | 3205 | pd_addr->scsiDevType; |
3179 | instance->pd_list[pd_addr->deviceId].driveState = | 3206 | instance->pd_list[pd_addr->deviceId].driveState = |
@@ -3207,6 +3234,7 @@ megasas_get_ld_list(struct megasas_instance *instance) | |||
3207 | struct megasas_dcmd_frame *dcmd; | 3234 | struct megasas_dcmd_frame *dcmd; |
3208 | struct MR_LD_LIST *ci; | 3235 | struct MR_LD_LIST *ci; |
3209 | dma_addr_t ci_h = 0; | 3236 | dma_addr_t ci_h = 0; |
3237 | u32 ld_count; | ||
3210 | 3238 | ||
3211 | cmd = megasas_get_cmd(instance); | 3239 | cmd = megasas_get_cmd(instance); |
3212 | 3240 | ||
@@ -3233,12 +3261,12 @@ megasas_get_ld_list(struct megasas_instance *instance) | |||
3233 | dcmd->cmd = MFI_CMD_DCMD; | 3261 | dcmd->cmd = MFI_CMD_DCMD; |
3234 | dcmd->cmd_status = 0xFF; | 3262 | dcmd->cmd_status = 0xFF; |
3235 | dcmd->sge_count = 1; | 3263 | dcmd->sge_count = 1; |
3236 | dcmd->flags = MFI_FRAME_DIR_READ; | 3264 | dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); |
3237 | dcmd->timeout = 0; | 3265 | dcmd->timeout = 0; |
3238 | dcmd->data_xfer_len = sizeof(struct MR_LD_LIST); | 3266 | dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_LIST)); |
3239 | dcmd->opcode = MR_DCMD_LD_GET_LIST; | 3267 | dcmd->opcode = cpu_to_le32(MR_DCMD_LD_GET_LIST); |
3240 | dcmd->sgl.sge32[0].phys_addr = ci_h; | 3268 | dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); |
3241 | dcmd->sgl.sge32[0].length = sizeof(struct MR_LD_LIST); | 3269 | dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST)); |
3242 | dcmd->pad_0 = 0; | 3270 | dcmd->pad_0 = 0; |
3243 | 3271 | ||
3244 | if (!megasas_issue_polled(instance, cmd)) { | 3272 | if (!megasas_issue_polled(instance, cmd)) { |
@@ -3247,12 +3275,14 @@ megasas_get_ld_list(struct megasas_instance *instance) | |||
3247 | ret = -1; | 3275 | ret = -1; |
3248 | } | 3276 | } |
3249 | 3277 | ||
3278 | ld_count = le32_to_cpu(ci->ldCount); | ||
3279 | |||
3250 | /* the following function will get the instance PD LIST */ | 3280 | /* the following function will get the instance PD LIST */ |
3251 | 3281 | ||
3252 | if ((ret == 0) && (ci->ldCount <= MAX_LOGICAL_DRIVES)) { | 3282 | if ((ret == 0) && (ld_count <= MAX_LOGICAL_DRIVES)) { |
3253 | memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); | 3283 | memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); |
3254 | 3284 | ||
3255 | for (ld_index = 0; ld_index < ci->ldCount; ld_index++) { | 3285 | for (ld_index = 0; ld_index < ld_count; ld_index++) { |
3256 | if (ci->ldList[ld_index].state != 0) { | 3286 | if (ci->ldList[ld_index].state != 0) { |
3257 | ids = ci->ldList[ld_index].ref.targetId; | 3287 | ids = ci->ldList[ld_index].ref.targetId; |
3258 | instance->ld_ids[ids] = | 3288 | instance->ld_ids[ids] = |
@@ -3271,6 +3301,87 @@ megasas_get_ld_list(struct megasas_instance *instance) | |||
3271 | } | 3301 | } |
3272 | 3302 | ||
3273 | /** | 3303 | /** |
3304 | * megasas_ld_list_query - Returns FW's ld_list structure | ||
3305 | * @instance: Adapter soft state | ||
3306 | * @ld_list: ld_list structure | ||
3307 | * | ||
3308 | * Issues an internal command (DCMD) to get the FW's controller PD | ||
3309 | * list structure. This information is mainly used to find out SYSTEM | ||
3310 | * supported by the FW. | ||
3311 | */ | ||
3312 | static int | ||
3313 | megasas_ld_list_query(struct megasas_instance *instance, u8 query_type) | ||
3314 | { | ||
3315 | int ret = 0, ld_index = 0, ids = 0; | ||
3316 | struct megasas_cmd *cmd; | ||
3317 | struct megasas_dcmd_frame *dcmd; | ||
3318 | struct MR_LD_TARGETID_LIST *ci; | ||
3319 | dma_addr_t ci_h = 0; | ||
3320 | u32 tgtid_count; | ||
3321 | |||
3322 | cmd = megasas_get_cmd(instance); | ||
3323 | |||
3324 | if (!cmd) { | ||
3325 | printk(KERN_WARNING | ||
3326 | "megasas:(megasas_ld_list_query): Failed to get cmd\n"); | ||
3327 | return -ENOMEM; | ||
3328 | } | ||
3329 | |||
3330 | dcmd = &cmd->frame->dcmd; | ||
3331 | |||
3332 | ci = pci_alloc_consistent(instance->pdev, | ||
3333 | sizeof(struct MR_LD_TARGETID_LIST), &ci_h); | ||
3334 | |||
3335 | if (!ci) { | ||
3336 | printk(KERN_WARNING | ||
3337 | "megasas: Failed to alloc mem for ld_list_query\n"); | ||
3338 | megasas_return_cmd(instance, cmd); | ||
3339 | return -ENOMEM; | ||
3340 | } | ||
3341 | |||
3342 | memset(ci, 0, sizeof(*ci)); | ||
3343 | memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE); | ||
3344 | |||
3345 | dcmd->mbox.b[0] = query_type; | ||
3346 | |||
3347 | dcmd->cmd = MFI_CMD_DCMD; | ||
3348 | dcmd->cmd_status = 0xFF; | ||
3349 | dcmd->sge_count = 1; | ||
3350 | dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); | ||
3351 | dcmd->timeout = 0; | ||
3352 | dcmd->data_xfer_len = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST)); | ||
3353 | dcmd->opcode = cpu_to_le32(MR_DCMD_LD_LIST_QUERY); | ||
3354 | dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); | ||
3355 | dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST)); | ||
3356 | dcmd->pad_0 = 0; | ||
3357 | |||
3358 | if (!megasas_issue_polled(instance, cmd) && !dcmd->cmd_status) { | ||
3359 | ret = 0; | ||
3360 | } else { | ||
3361 | /* On failure, call older LD list DCMD */ | ||
3362 | ret = 1; | ||
3363 | } | ||
3364 | |||
3365 | tgtid_count = le32_to_cpu(ci->count); | ||
3366 | |||
3367 | if ((ret == 0) && (tgtid_count <= (MAX_LOGICAL_DRIVES))) { | ||
3368 | memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); | ||
3369 | for (ld_index = 0; ld_index < tgtid_count; ld_index++) { | ||
3370 | ids = ci->targetId[ld_index]; | ||
3371 | instance->ld_ids[ids] = ci->targetId[ld_index]; | ||
3372 | } | ||
3373 | |||
3374 | } | ||
3375 | |||
3376 | pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST), | ||
3377 | ci, ci_h); | ||
3378 | |||
3379 | megasas_return_cmd(instance, cmd); | ||
3380 | |||
3381 | return ret; | ||
3382 | } | ||
3383 | |||
3384 | /** | ||
3274 | * megasas_get_controller_info - Returns FW's controller structure | 3385 | * megasas_get_controller_info - Returns FW's controller structure |
3275 | * @instance: Adapter soft state | 3386 | * @instance: Adapter soft state |
3276 | * @ctrl_info: Controller information structure | 3387 | * @ctrl_info: Controller information structure |
@@ -3313,13 +3424,13 @@ megasas_get_ctrl_info(struct megasas_instance *instance, | |||
3313 | dcmd->cmd = MFI_CMD_DCMD; | 3424 | dcmd->cmd = MFI_CMD_DCMD; |
3314 | dcmd->cmd_status = 0xFF; | 3425 | dcmd->cmd_status = 0xFF; |
3315 | dcmd->sge_count = 1; | 3426 | dcmd->sge_count = 1; |
3316 | dcmd->flags = MFI_FRAME_DIR_READ; | 3427 | dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); |
3317 | dcmd->timeout = 0; | 3428 | dcmd->timeout = 0; |
3318 | dcmd->pad_0 = 0; | 3429 | dcmd->pad_0 = 0; |
3319 | dcmd->data_xfer_len = sizeof(struct megasas_ctrl_info); | 3430 | dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_ctrl_info)); |
3320 | dcmd->opcode = MR_DCMD_CTRL_GET_INFO; | 3431 | dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO); |
3321 | dcmd->sgl.sge32[0].phys_addr = ci_h; | 3432 | dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); |
3322 | dcmd->sgl.sge32[0].length = sizeof(struct megasas_ctrl_info); | 3433 | dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info)); |
3323 | 3434 | ||
3324 | if (!megasas_issue_polled(instance, cmd)) { | 3435 | if (!megasas_issue_polled(instance, cmd)) { |
3325 | ret = 0; | 3436 | ret = 0; |
@@ -3375,17 +3486,20 @@ megasas_issue_init_mfi(struct megasas_instance *instance) | |||
3375 | memset(initq_info, 0, sizeof(struct megasas_init_queue_info)); | 3486 | memset(initq_info, 0, sizeof(struct megasas_init_queue_info)); |
3376 | init_frame->context = context; | 3487 | init_frame->context = context; |
3377 | 3488 | ||
3378 | initq_info->reply_queue_entries = instance->max_fw_cmds + 1; | 3489 | initq_info->reply_queue_entries = cpu_to_le32(instance->max_fw_cmds + 1); |
3379 | initq_info->reply_queue_start_phys_addr_lo = instance->reply_queue_h; | 3490 | initq_info->reply_queue_start_phys_addr_lo = cpu_to_le32(instance->reply_queue_h); |
3380 | 3491 | ||
3381 | initq_info->producer_index_phys_addr_lo = instance->producer_h; | 3492 | initq_info->producer_index_phys_addr_lo = cpu_to_le32(instance->producer_h); |
3382 | initq_info->consumer_index_phys_addr_lo = instance->consumer_h; | 3493 | initq_info->consumer_index_phys_addr_lo = cpu_to_le32(instance->consumer_h); |
3383 | 3494 | ||
3384 | init_frame->cmd = MFI_CMD_INIT; | 3495 | init_frame->cmd = MFI_CMD_INIT; |
3385 | init_frame->cmd_status = 0xFF; | 3496 | init_frame->cmd_status = 0xFF; |
3386 | init_frame->queue_info_new_phys_addr_lo = initq_info_h; | 3497 | init_frame->queue_info_new_phys_addr_lo = |
3498 | cpu_to_le32(lower_32_bits(initq_info_h)); | ||
3499 | init_frame->queue_info_new_phys_addr_hi = | ||
3500 | cpu_to_le32(upper_32_bits(initq_info_h)); | ||
3387 | 3501 | ||
3388 | init_frame->data_xfer_len = sizeof(struct megasas_init_queue_info); | 3502 | init_frame->data_xfer_len = cpu_to_le32(sizeof(struct megasas_init_queue_info)); |
3389 | 3503 | ||
3390 | /* | 3504 | /* |
3391 | * disable the intr before firing the init frame to FW | 3505 | * disable the intr before firing the init frame to FW |
@@ -3648,7 +3762,9 @@ static int megasas_init_fw(struct megasas_instance *instance) | |||
3648 | megasas_get_pd_list(instance); | 3762 | megasas_get_pd_list(instance); |
3649 | 3763 | ||
3650 | memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); | 3764 | memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); |
3651 | megasas_get_ld_list(instance); | 3765 | if (megasas_ld_list_query(instance, |
3766 | MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) | ||
3767 | megasas_get_ld_list(instance); | ||
3652 | 3768 | ||
3653 | ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL); | 3769 | ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL); |
3654 | 3770 | ||
@@ -3665,8 +3781,8 @@ static int megasas_init_fw(struct megasas_instance *instance) | |||
3665 | if (ctrl_info && !megasas_get_ctrl_info(instance, ctrl_info)) { | 3781 | if (ctrl_info && !megasas_get_ctrl_info(instance, ctrl_info)) { |
3666 | 3782 | ||
3667 | max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) * | 3783 | max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) * |
3668 | ctrl_info->max_strips_per_io; | 3784 | le16_to_cpu(ctrl_info->max_strips_per_io); |
3669 | max_sectors_2 = ctrl_info->max_request_size; | 3785 | max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size); |
3670 | 3786 | ||
3671 | tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2); | 3787 | tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2); |
3672 | 3788 | ||
@@ -3675,14 +3791,18 @@ static int megasas_init_fw(struct megasas_instance *instance) | |||
3675 | instance->is_imr = 0; | 3791 | instance->is_imr = 0; |
3676 | dev_info(&instance->pdev->dev, "Controller type: MR," | 3792 | dev_info(&instance->pdev->dev, "Controller type: MR," |
3677 | "Memory size is: %dMB\n", | 3793 | "Memory size is: %dMB\n", |
3678 | ctrl_info->memory_size); | 3794 | le16_to_cpu(ctrl_info->memory_size)); |
3679 | } else { | 3795 | } else { |
3680 | instance->is_imr = 1; | 3796 | instance->is_imr = 1; |
3681 | dev_info(&instance->pdev->dev, | 3797 | dev_info(&instance->pdev->dev, |
3682 | "Controller type: iMR\n"); | 3798 | "Controller type: iMR\n"); |
3683 | } | 3799 | } |
3800 | /* OnOffProperties are converted into CPU arch*/ | ||
3801 | le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties); | ||
3684 | instance->disableOnlineCtrlReset = | 3802 | instance->disableOnlineCtrlReset = |
3685 | ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; | 3803 | ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset; |
3804 | /* adapterOperations2 are converted into CPU arch*/ | ||
3805 | le32_to_cpus((u32 *)&ctrl_info->adapterOperations2); | ||
3686 | instance->UnevenSpanSupport = | 3806 | instance->UnevenSpanSupport = |
3687 | ctrl_info->adapterOperations2.supportUnevenSpans; | 3807 | ctrl_info->adapterOperations2.supportUnevenSpans; |
3688 | if (instance->UnevenSpanSupport) { | 3808 | if (instance->UnevenSpanSupport) { |
@@ -3696,7 +3816,6 @@ static int megasas_init_fw(struct megasas_instance *instance) | |||
3696 | 3816 | ||
3697 | } | 3817 | } |
3698 | } | 3818 | } |
3699 | |||
3700 | instance->max_sectors_per_req = instance->max_num_sge * | 3819 | instance->max_sectors_per_req = instance->max_num_sge * |
3701 | PAGE_SIZE / 512; | 3820 | PAGE_SIZE / 512; |
3702 | if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) | 3821 | if (tmp_sectors && (instance->max_sectors_per_req > tmp_sectors)) |
@@ -3802,20 +3921,24 @@ megasas_get_seq_num(struct megasas_instance *instance, | |||
3802 | dcmd->cmd = MFI_CMD_DCMD; | 3921 | dcmd->cmd = MFI_CMD_DCMD; |
3803 | dcmd->cmd_status = 0x0; | 3922 | dcmd->cmd_status = 0x0; |
3804 | dcmd->sge_count = 1; | 3923 | dcmd->sge_count = 1; |
3805 | dcmd->flags = MFI_FRAME_DIR_READ; | 3924 | dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); |
3806 | dcmd->timeout = 0; | 3925 | dcmd->timeout = 0; |
3807 | dcmd->pad_0 = 0; | 3926 | dcmd->pad_0 = 0; |
3808 | dcmd->data_xfer_len = sizeof(struct megasas_evt_log_info); | 3927 | dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_log_info)); |
3809 | dcmd->opcode = MR_DCMD_CTRL_EVENT_GET_INFO; | 3928 | dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_GET_INFO); |
3810 | dcmd->sgl.sge32[0].phys_addr = el_info_h; | 3929 | dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(el_info_h); |
3811 | dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_log_info); | 3930 | dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_log_info)); |
3812 | 3931 | ||
3813 | megasas_issue_blocked_cmd(instance, cmd); | 3932 | megasas_issue_blocked_cmd(instance, cmd); |
3814 | 3933 | ||
3815 | /* | 3934 | /* |
3816 | * Copy the data back into callers buffer | 3935 | * Copy the data back into callers buffer |
3817 | */ | 3936 | */ |
3818 | memcpy(eli, el_info, sizeof(struct megasas_evt_log_info)); | 3937 | eli->newest_seq_num = le32_to_cpu(el_info->newest_seq_num); |
3938 | eli->oldest_seq_num = le32_to_cpu(el_info->oldest_seq_num); | ||
3939 | eli->clear_seq_num = le32_to_cpu(el_info->clear_seq_num); | ||
3940 | eli->shutdown_seq_num = le32_to_cpu(el_info->shutdown_seq_num); | ||
3941 | eli->boot_seq_num = le32_to_cpu(el_info->boot_seq_num); | ||
3819 | 3942 | ||
3820 | pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info), | 3943 | pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info), |
3821 | el_info, el_info_h); | 3944 | el_info, el_info_h); |
@@ -3862,6 +3985,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num, | |||
3862 | if (instance->aen_cmd) { | 3985 | if (instance->aen_cmd) { |
3863 | 3986 | ||
3864 | prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1]; | 3987 | prev_aen.word = instance->aen_cmd->frame->dcmd.mbox.w[1]; |
3988 | prev_aen.members.locale = le16_to_cpu(prev_aen.members.locale); | ||
3865 | 3989 | ||
3866 | /* | 3990 | /* |
3867 | * A class whose enum value is smaller is inclusive of all | 3991 | * A class whose enum value is smaller is inclusive of all |
@@ -3874,7 +3998,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num, | |||
3874 | * values | 3998 | * values |
3875 | */ | 3999 | */ |
3876 | if ((prev_aen.members.class <= curr_aen.members.class) && | 4000 | if ((prev_aen.members.class <= curr_aen.members.class) && |
3877 | !((prev_aen.members.locale & curr_aen.members.locale) ^ | 4001 | !((le16_to_cpu(prev_aen.members.locale) & curr_aen.members.locale) ^ |
3878 | curr_aen.members.locale)) { | 4002 | curr_aen.members.locale)) { |
3879 | /* | 4003 | /* |
3880 | * Previously issued event registration includes | 4004 | * Previously issued event registration includes |
@@ -3882,7 +4006,7 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num, | |||
3882 | */ | 4006 | */ |
3883 | return 0; | 4007 | return 0; |
3884 | } else { | 4008 | } else { |
3885 | curr_aen.members.locale |= prev_aen.members.locale; | 4009 | curr_aen.members.locale |= le16_to_cpu(prev_aen.members.locale); |
3886 | 4010 | ||
3887 | if (prev_aen.members.class < curr_aen.members.class) | 4011 | if (prev_aen.members.class < curr_aen.members.class) |
3888 | curr_aen.members.class = prev_aen.members.class; | 4012 | curr_aen.members.class = prev_aen.members.class; |
@@ -3917,16 +4041,16 @@ megasas_register_aen(struct megasas_instance *instance, u32 seq_num, | |||
3917 | dcmd->cmd = MFI_CMD_DCMD; | 4041 | dcmd->cmd = MFI_CMD_DCMD; |
3918 | dcmd->cmd_status = 0x0; | 4042 | dcmd->cmd_status = 0x0; |
3919 | dcmd->sge_count = 1; | 4043 | dcmd->sge_count = 1; |
3920 | dcmd->flags = MFI_FRAME_DIR_READ; | 4044 | dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); |
3921 | dcmd->timeout = 0; | 4045 | dcmd->timeout = 0; |
3922 | dcmd->pad_0 = 0; | 4046 | dcmd->pad_0 = 0; |
4047 | dcmd->data_xfer_len = cpu_to_le32(sizeof(struct megasas_evt_detail)); | ||
4048 | dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_EVENT_WAIT); | ||
4049 | dcmd->mbox.w[0] = cpu_to_le32(seq_num); | ||
3923 | instance->last_seq_num = seq_num; | 4050 | instance->last_seq_num = seq_num; |
3924 | dcmd->data_xfer_len = sizeof(struct megasas_evt_detail); | 4051 | dcmd->mbox.w[1] = cpu_to_le32(curr_aen.word); |
3925 | dcmd->opcode = MR_DCMD_CTRL_EVENT_WAIT; | 4052 | dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->evt_detail_h); |
3926 | dcmd->mbox.w[0] = seq_num; | 4053 | dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_evt_detail)); |
3927 | dcmd->mbox.w[1] = curr_aen.word; | ||
3928 | dcmd->sgl.sge32[0].phys_addr = (u32) instance->evt_detail_h; | ||
3929 | dcmd->sgl.sge32[0].length = sizeof(struct megasas_evt_detail); | ||
3930 | 4054 | ||
3931 | if (instance->aen_cmd != NULL) { | 4055 | if (instance->aen_cmd != NULL) { |
3932 | megasas_return_cmd(instance, cmd); | 4056 | megasas_return_cmd(instance, cmd); |
@@ -3972,8 +4096,9 @@ static int megasas_start_aen(struct megasas_instance *instance) | |||
3972 | class_locale.members.locale = MR_EVT_LOCALE_ALL; | 4096 | class_locale.members.locale = MR_EVT_LOCALE_ALL; |
3973 | class_locale.members.class = MR_EVT_CLASS_DEBUG; | 4097 | class_locale.members.class = MR_EVT_CLASS_DEBUG; |
3974 | 4098 | ||
3975 | return megasas_register_aen(instance, eli.newest_seq_num + 1, | 4099 | return megasas_register_aen(instance, |
3976 | class_locale.word); | 4100 | le32_to_cpu(eli.newest_seq_num) + 1, |
4101 | class_locale.word); | ||
3977 | } | 4102 | } |
3978 | 4103 | ||
3979 | /** | 4104 | /** |
@@ -4068,6 +4193,7 @@ megasas_set_dma_mask(struct pci_dev *pdev) | |||
4068 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) | 4193 | if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) |
4069 | goto fail_set_dma_mask; | 4194 | goto fail_set_dma_mask; |
4070 | } | 4195 | } |
4196 | |||
4071 | return 0; | 4197 | return 0; |
4072 | 4198 | ||
4073 | fail_set_dma_mask: | 4199 | fail_set_dma_mask: |
@@ -4386,11 +4512,11 @@ static void megasas_flush_cache(struct megasas_instance *instance) | |||
4386 | dcmd->cmd = MFI_CMD_DCMD; | 4512 | dcmd->cmd = MFI_CMD_DCMD; |
4387 | dcmd->cmd_status = 0x0; | 4513 | dcmd->cmd_status = 0x0; |
4388 | dcmd->sge_count = 0; | 4514 | dcmd->sge_count = 0; |
4389 | dcmd->flags = MFI_FRAME_DIR_NONE; | 4515 | dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); |
4390 | dcmd->timeout = 0; | 4516 | dcmd->timeout = 0; |
4391 | dcmd->pad_0 = 0; | 4517 | dcmd->pad_0 = 0; |
4392 | dcmd->data_xfer_len = 0; | 4518 | dcmd->data_xfer_len = 0; |
4393 | dcmd->opcode = MR_DCMD_CTRL_CACHE_FLUSH; | 4519 | dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_CACHE_FLUSH); |
4394 | dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; | 4520 | dcmd->mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE; |
4395 | 4521 | ||
4396 | megasas_issue_blocked_cmd(instance, cmd); | 4522 | megasas_issue_blocked_cmd(instance, cmd); |
@@ -4431,11 +4557,11 @@ static void megasas_shutdown_controller(struct megasas_instance *instance, | |||
4431 | dcmd->cmd = MFI_CMD_DCMD; | 4557 | dcmd->cmd = MFI_CMD_DCMD; |
4432 | dcmd->cmd_status = 0x0; | 4558 | dcmd->cmd_status = 0x0; |
4433 | dcmd->sge_count = 0; | 4559 | dcmd->sge_count = 0; |
4434 | dcmd->flags = MFI_FRAME_DIR_NONE; | 4560 | dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE); |
4435 | dcmd->timeout = 0; | 4561 | dcmd->timeout = 0; |
4436 | dcmd->pad_0 = 0; | 4562 | dcmd->pad_0 = 0; |
4437 | dcmd->data_xfer_len = 0; | 4563 | dcmd->data_xfer_len = 0; |
4438 | dcmd->opcode = opcode; | 4564 | dcmd->opcode = cpu_to_le32(opcode); |
4439 | 4565 | ||
4440 | megasas_issue_blocked_cmd(instance, cmd); | 4566 | megasas_issue_blocked_cmd(instance, cmd); |
4441 | 4567 | ||
@@ -4850,10 +4976,11 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, | |||
4850 | * alone separately | 4976 | * alone separately |
4851 | */ | 4977 | */ |
4852 | memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); | 4978 | memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE); |
4853 | cmd->frame->hdr.context = cmd->index; | 4979 | cmd->frame->hdr.context = cpu_to_le32(cmd->index); |
4854 | cmd->frame->hdr.pad_0 = 0; | 4980 | cmd->frame->hdr.pad_0 = 0; |
4855 | cmd->frame->hdr.flags &= ~(MFI_FRAME_IEEE | MFI_FRAME_SGL64 | | 4981 | cmd->frame->hdr.flags &= cpu_to_le16(~(MFI_FRAME_IEEE | |
4856 | MFI_FRAME_SENSE64); | 4982 | MFI_FRAME_SGL64 | |
4983 | MFI_FRAME_SENSE64)); | ||
4857 | 4984 | ||
4858 | /* | 4985 | /* |
4859 | * The management interface between applications and the fw uses | 4986 | * The management interface between applications and the fw uses |
@@ -4887,8 +5014,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, | |||
4887 | * We don't change the dma_coherent_mask, so | 5014 | * We don't change the dma_coherent_mask, so |
4888 | * pci_alloc_consistent only returns 32bit addresses | 5015 | * pci_alloc_consistent only returns 32bit addresses |
4889 | */ | 5016 | */ |
4890 | kern_sge32[i].phys_addr = (u32) buf_handle; | 5017 | kern_sge32[i].phys_addr = cpu_to_le32(buf_handle); |
4891 | kern_sge32[i].length = ioc->sgl[i].iov_len; | 5018 | kern_sge32[i].length = cpu_to_le32(ioc->sgl[i].iov_len); |
4892 | 5019 | ||
4893 | /* | 5020 | /* |
4894 | * We created a kernel buffer corresponding to the | 5021 | * We created a kernel buffer corresponding to the |
@@ -4911,7 +5038,7 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, | |||
4911 | 5038 | ||
4912 | sense_ptr = | 5039 | sense_ptr = |
4913 | (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off); | 5040 | (unsigned long *) ((unsigned long)cmd->frame + ioc->sense_off); |
4914 | *sense_ptr = sense_handle; | 5041 | *sense_ptr = cpu_to_le32(sense_handle); |
4915 | } | 5042 | } |
4916 | 5043 | ||
4917 | /* | 5044 | /* |
@@ -4971,9 +5098,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, | |||
4971 | for (i = 0; i < ioc->sge_count; i++) { | 5098 | for (i = 0; i < ioc->sge_count; i++) { |
4972 | if (kbuff_arr[i]) | 5099 | if (kbuff_arr[i]) |
4973 | dma_free_coherent(&instance->pdev->dev, | 5100 | dma_free_coherent(&instance->pdev->dev, |
4974 | kern_sge32[i].length, | 5101 | le32_to_cpu(kern_sge32[i].length), |
4975 | kbuff_arr[i], | 5102 | kbuff_arr[i], |
4976 | kern_sge32[i].phys_addr); | 5103 | le32_to_cpu(kern_sge32[i].phys_addr)); |
4977 | } | 5104 | } |
4978 | 5105 | ||
4979 | megasas_return_cmd(instance, cmd); | 5106 | megasas_return_cmd(instance, cmd); |
@@ -5327,7 +5454,7 @@ megasas_aen_polling(struct work_struct *work) | |||
5327 | host = instance->host; | 5454 | host = instance->host; |
5328 | if (instance->evt_detail) { | 5455 | if (instance->evt_detail) { |
5329 | 5456 | ||
5330 | switch (instance->evt_detail->code) { | 5457 | switch (le32_to_cpu(instance->evt_detail->code)) { |
5331 | case MR_EVT_PD_INSERTED: | 5458 | case MR_EVT_PD_INSERTED: |
5332 | if (megasas_get_pd_list(instance) == 0) { | 5459 | if (megasas_get_pd_list(instance) == 0) { |
5333 | for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { | 5460 | for (i = 0; i < MEGASAS_MAX_PD_CHANNELS; i++) { |
@@ -5389,7 +5516,9 @@ megasas_aen_polling(struct work_struct *work) | |||
5389 | case MR_EVT_LD_OFFLINE: | 5516 | case MR_EVT_LD_OFFLINE: |
5390 | case MR_EVT_CFG_CLEARED: | 5517 | case MR_EVT_CFG_CLEARED: |
5391 | case MR_EVT_LD_DELETED: | 5518 | case MR_EVT_LD_DELETED: |
5392 | megasas_get_ld_list(instance); | 5519 | if (megasas_ld_list_query(instance, |
5520 | MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) | ||
5521 | megasas_get_ld_list(instance); | ||
5393 | for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { | 5522 | for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { |
5394 | for (j = 0; | 5523 | for (j = 0; |
5395 | j < MEGASAS_MAX_DEV_PER_CHANNEL; | 5524 | j < MEGASAS_MAX_DEV_PER_CHANNEL; |
@@ -5399,7 +5528,7 @@ megasas_aen_polling(struct work_struct *work) | |||
5399 | (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; | 5528 | (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; |
5400 | 5529 | ||
5401 | sdev1 = scsi_device_lookup(host, | 5530 | sdev1 = scsi_device_lookup(host, |
5402 | i + MEGASAS_MAX_LD_CHANNELS, | 5531 | MEGASAS_MAX_PD_CHANNELS + i, |
5403 | j, | 5532 | j, |
5404 | 0); | 5533 | 0); |
5405 | 5534 | ||
@@ -5418,7 +5547,9 @@ megasas_aen_polling(struct work_struct *work) | |||
5418 | doscan = 0; | 5547 | doscan = 0; |
5419 | break; | 5548 | break; |
5420 | case MR_EVT_LD_CREATED: | 5549 | case MR_EVT_LD_CREATED: |
5421 | megasas_get_ld_list(instance); | 5550 | if (megasas_ld_list_query(instance, |
5551 | MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) | ||
5552 | megasas_get_ld_list(instance); | ||
5422 | for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { | 5553 | for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { |
5423 | for (j = 0; | 5554 | for (j = 0; |
5424 | j < MEGASAS_MAX_DEV_PER_CHANNEL; | 5555 | j < MEGASAS_MAX_DEV_PER_CHANNEL; |
@@ -5427,14 +5558,14 @@ megasas_aen_polling(struct work_struct *work) | |||
5427 | (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; | 5558 | (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; |
5428 | 5559 | ||
5429 | sdev1 = scsi_device_lookup(host, | 5560 | sdev1 = scsi_device_lookup(host, |
5430 | i+MEGASAS_MAX_LD_CHANNELS, | 5561 | MEGASAS_MAX_PD_CHANNELS + i, |
5431 | j, 0); | 5562 | j, 0); |
5432 | 5563 | ||
5433 | if (instance->ld_ids[ld_index] != | 5564 | if (instance->ld_ids[ld_index] != |
5434 | 0xff) { | 5565 | 0xff) { |
5435 | if (!sdev1) { | 5566 | if (!sdev1) { |
5436 | scsi_add_device(host, | 5567 | scsi_add_device(host, |
5437 | i + 2, | 5568 | MEGASAS_MAX_PD_CHANNELS + i, |
5438 | j, 0); | 5569 | j, 0); |
5439 | } | 5570 | } |
5440 | } | 5571 | } |
@@ -5483,18 +5614,20 @@ megasas_aen_polling(struct work_struct *work) | |||
5483 | } | 5614 | } |
5484 | } | 5615 | } |
5485 | 5616 | ||
5486 | megasas_get_ld_list(instance); | 5617 | if (megasas_ld_list_query(instance, |
5618 | MR_LD_QUERY_TYPE_EXPOSED_TO_HOST)) | ||
5619 | megasas_get_ld_list(instance); | ||
5487 | for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { | 5620 | for (i = 0; i < MEGASAS_MAX_LD_CHANNELS; i++) { |
5488 | for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { | 5621 | for (j = 0; j < MEGASAS_MAX_DEV_PER_CHANNEL; j++) { |
5489 | ld_index = | 5622 | ld_index = |
5490 | (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; | 5623 | (i * MEGASAS_MAX_DEV_PER_CHANNEL) + j; |
5491 | 5624 | ||
5492 | sdev1 = scsi_device_lookup(host, | 5625 | sdev1 = scsi_device_lookup(host, |
5493 | i+MEGASAS_MAX_LD_CHANNELS, j, 0); | 5626 | MEGASAS_MAX_PD_CHANNELS + i, j, 0); |
5494 | if (instance->ld_ids[ld_index] != 0xff) { | 5627 | if (instance->ld_ids[ld_index] != 0xff) { |
5495 | if (!sdev1) { | 5628 | if (!sdev1) { |
5496 | scsi_add_device(host, | 5629 | scsi_add_device(host, |
5497 | i+2, | 5630 | MEGASAS_MAX_PD_CHANNELS + i, |
5498 | j, 0); | 5631 | j, 0); |
5499 | } else { | 5632 | } else { |
5500 | scsi_device_put(sdev1); | 5633 | scsi_device_put(sdev1); |
@@ -5514,7 +5647,7 @@ megasas_aen_polling(struct work_struct *work) | |||
5514 | return ; | 5647 | return ; |
5515 | } | 5648 | } |
5516 | 5649 | ||
5517 | seq_num = instance->evt_detail->seq_num + 1; | 5650 | seq_num = le32_to_cpu(instance->evt_detail->seq_num) + 1; |
5518 | 5651 | ||
5519 | /* Register AEN with FW for latest sequence number plus 1 */ | 5652 | /* Register AEN with FW for latest sequence number plus 1 */ |
5520 | class_locale.members.reserved = 0; | 5653 | class_locale.members.reserved = 0; |
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c index 4f401f753f8e..e24b6eb645b5 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fp.c +++ b/drivers/scsi/megaraid/megaraid_sas_fp.c | |||
@@ -126,17 +126,17 @@ static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_FW_RAID_MAP_ALL *map) | |||
126 | return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx]; | 126 | return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx]; |
127 | } | 127 | } |
128 | 128 | ||
129 | static u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map) | 129 | u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map) |
130 | { | 130 | { |
131 | return map->raidMap.arMapInfo[ar].pd[arm]; | 131 | return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]); |
132 | } | 132 | } |
133 | 133 | ||
134 | static u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map) | 134 | u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map) |
135 | { | 135 | { |
136 | return map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef; | 136 | return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef); |
137 | } | 137 | } |
138 | 138 | ||
139 | static u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map) | 139 | u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map) |
140 | { | 140 | { |
141 | return map->raidMap.devHndlInfo[pd].curDevHdl; | 141 | return map->raidMap.devHndlInfo[pd].curDevHdl; |
142 | } | 142 | } |
@@ -148,7 +148,7 @@ u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map) | |||
148 | 148 | ||
149 | u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map) | 149 | u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map) |
150 | { | 150 | { |
151 | return map->raidMap.ldTgtIdToLd[ldTgtId]; | 151 | return le16_to_cpu(map->raidMap.ldTgtIdToLd[ldTgtId]); |
152 | } | 152 | } |
153 | 153 | ||
154 | static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span, | 154 | static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span, |
@@ -167,18 +167,22 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance) | |||
167 | struct LD_LOAD_BALANCE_INFO *lbInfo = fusion->load_balance_info; | 167 | struct LD_LOAD_BALANCE_INFO *lbInfo = fusion->load_balance_info; |
168 | PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; | 168 | PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span; |
169 | struct MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap; | 169 | struct MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap; |
170 | struct MR_LD_RAID *raid; | ||
171 | int ldCount, num_lds; | ||
172 | u16 ld; | ||
173 | |||
170 | 174 | ||
171 | if (pFwRaidMap->totalSize != | 175 | if (le32_to_cpu(pFwRaidMap->totalSize) != |
172 | (sizeof(struct MR_FW_RAID_MAP) -sizeof(struct MR_LD_SPAN_MAP) + | 176 | (sizeof(struct MR_FW_RAID_MAP) -sizeof(struct MR_LD_SPAN_MAP) + |
173 | (sizeof(struct MR_LD_SPAN_MAP) *pFwRaidMap->ldCount))) { | 177 | (sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(pFwRaidMap->ldCount)))) { |
174 | printk(KERN_ERR "megasas: map info structure size 0x%x is not matching with ld count\n", | 178 | printk(KERN_ERR "megasas: map info structure size 0x%x is not matching with ld count\n", |
175 | (unsigned int)((sizeof(struct MR_FW_RAID_MAP) - | 179 | (unsigned int)((sizeof(struct MR_FW_RAID_MAP) - |
176 | sizeof(struct MR_LD_SPAN_MAP)) + | 180 | sizeof(struct MR_LD_SPAN_MAP)) + |
177 | (sizeof(struct MR_LD_SPAN_MAP) * | 181 | (sizeof(struct MR_LD_SPAN_MAP) * |
178 | pFwRaidMap->ldCount))); | 182 | le32_to_cpu(pFwRaidMap->ldCount)))); |
179 | printk(KERN_ERR "megasas: span map %x, pFwRaidMap->totalSize " | 183 | printk(KERN_ERR "megasas: span map %x, pFwRaidMap->totalSize " |
180 | ": %x\n", (unsigned int)sizeof(struct MR_LD_SPAN_MAP), | 184 | ": %x\n", (unsigned int)sizeof(struct MR_LD_SPAN_MAP), |
181 | pFwRaidMap->totalSize); | 185 | le32_to_cpu(pFwRaidMap->totalSize)); |
182 | return 0; | 186 | return 0; |
183 | } | 187 | } |
184 | 188 | ||
@@ -187,6 +191,15 @@ u8 MR_ValidateMapInfo(struct megasas_instance *instance) | |||
187 | 191 | ||
188 | mr_update_load_balance_params(map, lbInfo); | 192 | mr_update_load_balance_params(map, lbInfo); |
189 | 193 | ||
194 | num_lds = le32_to_cpu(map->raidMap.ldCount); | ||
195 | |||
196 | /*Convert Raid capability values to CPU arch */ | ||
197 | for (ldCount = 0; ldCount < num_lds; ldCount++) { | ||
198 | ld = MR_TargetIdToLdGet(ldCount, map); | ||
199 | raid = MR_LdRaidGet(ld, map); | ||
200 | le32_to_cpus((u32 *)&raid->capability); | ||
201 | } | ||
202 | |||
190 | return 1; | 203 | return 1; |
191 | } | 204 | } |
192 | 205 | ||
@@ -200,23 +213,20 @@ u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk, | |||
200 | 213 | ||
201 | for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) { | 214 | for (span = 0; span < raid->spanDepth; span++, pSpanBlock++) { |
202 | 215 | ||
203 | for (j = 0; j < pSpanBlock->block_span_info.noElements; j++) { | 216 | for (j = 0; j < le32_to_cpu(pSpanBlock->block_span_info.noElements); j++) { |
204 | quad = &pSpanBlock->block_span_info.quad[j]; | 217 | quad = &pSpanBlock->block_span_info.quad[j]; |
205 | 218 | ||
206 | if (quad->diff == 0) | 219 | if (le32_to_cpu(quad->diff) == 0) |
207 | return SPAN_INVALID; | 220 | return SPAN_INVALID; |
208 | if (quad->logStart <= row && row <= quad->logEnd && | 221 | if (le64_to_cpu(quad->logStart) <= row && row <= |
209 | (mega_mod64(row-quad->logStart, quad->diff)) == 0) { | 222 | le64_to_cpu(quad->logEnd) && (mega_mod64(row - le64_to_cpu(quad->logStart), |
223 | le32_to_cpu(quad->diff))) == 0) { | ||
210 | if (span_blk != NULL) { | 224 | if (span_blk != NULL) { |
211 | u64 blk, debugBlk; | 225 | u64 blk, debugBlk; |
212 | blk = | 226 | blk = mega_div64_32((row-le64_to_cpu(quad->logStart)), le32_to_cpu(quad->diff)); |
213 | mega_div64_32( | ||
214 | (row-quad->logStart), | ||
215 | quad->diff); | ||
216 | debugBlk = blk; | 227 | debugBlk = blk; |
217 | 228 | ||
218 | blk = (blk + quad->offsetInSpan) << | 229 | blk = (blk + le64_to_cpu(quad->offsetInSpan)) << raid->stripeShift; |
219 | raid->stripeShift; | ||
220 | *span_blk = blk; | 230 | *span_blk = blk; |
221 | } | 231 | } |
222 | return span; | 232 | return span; |
@@ -257,8 +267,8 @@ static int getSpanInfo(struct MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo) | |||
257 | for (span = 0; span < raid->spanDepth; span++) | 267 | for (span = 0; span < raid->spanDepth; span++) |
258 | dev_dbg(&instance->pdev->dev, "Span=%x," | 268 | dev_dbg(&instance->pdev->dev, "Span=%x," |
259 | " number of quads=%x\n", span, | 269 | " number of quads=%x\n", span, |
260 | map->raidMap.ldSpanMap[ld].spanBlock[span]. | 270 | le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. |
261 | block_span_info.noElements); | 271 | block_span_info.noElements)); |
262 | for (element = 0; element < MAX_QUAD_DEPTH; element++) { | 272 | for (element = 0; element < MAX_QUAD_DEPTH; element++) { |
263 | span_set = &(ldSpanInfo[ld].span_set[element]); | 273 | span_set = &(ldSpanInfo[ld].span_set[element]); |
264 | if (span_set->span_row_data_width == 0) | 274 | if (span_set->span_row_data_width == 0) |
@@ -286,22 +296,22 @@ static int getSpanInfo(struct MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo) | |||
286 | (long unsigned int)span_set->data_strip_end); | 296 | (long unsigned int)span_set->data_strip_end); |
287 | 297 | ||
288 | for (span = 0; span < raid->spanDepth; span++) { | 298 | for (span = 0; span < raid->spanDepth; span++) { |
289 | if (map->raidMap.ldSpanMap[ld].spanBlock[span]. | 299 | if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. |
290 | block_span_info.noElements >= | 300 | block_span_info.noElements) >= |
291 | element + 1) { | 301 | element + 1) { |
292 | quad = &map->raidMap.ldSpanMap[ld]. | 302 | quad = &map->raidMap.ldSpanMap[ld]. |
293 | spanBlock[span].block_span_info. | 303 | spanBlock[span].block_span_info. |
294 | quad[element]; | 304 | quad[element]; |
295 | dev_dbg(&instance->pdev->dev, "Span=%x," | 305 | dev_dbg(&instance->pdev->dev, "Span=%x," |
296 | "Quad=%x, diff=%x\n", span, | 306 | "Quad=%x, diff=%x\n", span, |
297 | element, quad->diff); | 307 | element, le32_to_cpu(quad->diff)); |
298 | dev_dbg(&instance->pdev->dev, | 308 | dev_dbg(&instance->pdev->dev, |
299 | "offset_in_span=0x%08lx\n", | 309 | "offset_in_span=0x%08lx\n", |
300 | (long unsigned int)quad->offsetInSpan); | 310 | (long unsigned int)le64_to_cpu(quad->offsetInSpan)); |
301 | dev_dbg(&instance->pdev->dev, | 311 | dev_dbg(&instance->pdev->dev, |
302 | "logical start=0x%08lx, end=0x%08lx\n", | 312 | "logical start=0x%08lx, end=0x%08lx\n", |
303 | (long unsigned int)quad->logStart, | 313 | (long unsigned int)le64_to_cpu(quad->logStart), |
304 | (long unsigned int)quad->logEnd); | 314 | (long unsigned int)le64_to_cpu(quad->logEnd)); |
305 | } | 315 | } |
306 | } | 316 | } |
307 | } | 317 | } |
@@ -348,23 +358,23 @@ u32 mr_spanset_get_span_block(struct megasas_instance *instance, | |||
348 | continue; | 358 | continue; |
349 | 359 | ||
350 | for (span = 0; span < raid->spanDepth; span++) | 360 | for (span = 0; span < raid->spanDepth; span++) |
351 | if (map->raidMap.ldSpanMap[ld].spanBlock[span]. | 361 | if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. |
352 | block_span_info.noElements >= info+1) { | 362 | block_span_info.noElements) >= info+1) { |
353 | quad = &map->raidMap.ldSpanMap[ld]. | 363 | quad = &map->raidMap.ldSpanMap[ld]. |
354 | spanBlock[span]. | 364 | spanBlock[span]. |
355 | block_span_info.quad[info]; | 365 | block_span_info.quad[info]; |
356 | if (quad->diff == 0) | 366 | if (le32_to_cpu(quad->diff == 0)) |
357 | return SPAN_INVALID; | 367 | return SPAN_INVALID; |
358 | if (quad->logStart <= row && | 368 | if (le64_to_cpu(quad->logStart) <= row && |
359 | row <= quad->logEnd && | 369 | row <= le64_to_cpu(quad->logEnd) && |
360 | (mega_mod64(row - quad->logStart, | 370 | (mega_mod64(row - le64_to_cpu(quad->logStart), |
361 | quad->diff)) == 0) { | 371 | le32_to_cpu(quad->diff))) == 0) { |
362 | if (span_blk != NULL) { | 372 | if (span_blk != NULL) { |
363 | u64 blk; | 373 | u64 blk; |
364 | blk = mega_div64_32 | 374 | blk = mega_div64_32 |
365 | ((row - quad->logStart), | 375 | ((row - le64_to_cpu(quad->logStart)), |
366 | quad->diff); | 376 | le32_to_cpu(quad->diff)); |
367 | blk = (blk + quad->offsetInSpan) | 377 | blk = (blk + le64_to_cpu(quad->offsetInSpan)) |
368 | << raid->stripeShift; | 378 | << raid->stripeShift; |
369 | *span_blk = blk; | 379 | *span_blk = blk; |
370 | } | 380 | } |
@@ -415,8 +425,8 @@ static u64 get_row_from_strip(struct megasas_instance *instance, | |||
415 | span_set_Row = mega_div64_32(span_set_Strip, | 425 | span_set_Row = mega_div64_32(span_set_Strip, |
416 | span_set->span_row_data_width) * span_set->diff; | 426 | span_set->span_row_data_width) * span_set->diff; |
417 | for (span = 0, span_offset = 0; span < raid->spanDepth; span++) | 427 | for (span = 0, span_offset = 0; span < raid->spanDepth; span++) |
418 | if (map->raidMap.ldSpanMap[ld].spanBlock[span]. | 428 | if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. |
419 | block_span_info.noElements >= info+1) { | 429 | block_span_info.noElements >= info+1)) { |
420 | if (strip_offset >= | 430 | if (strip_offset >= |
421 | span_set->strip_offset[span]) | 431 | span_set->strip_offset[span]) |
422 | span_offset++; | 432 | span_offset++; |
@@ -480,18 +490,18 @@ static u64 get_strip_from_row(struct megasas_instance *instance, | |||
480 | continue; | 490 | continue; |
481 | 491 | ||
482 | for (span = 0; span < raid->spanDepth; span++) | 492 | for (span = 0; span < raid->spanDepth; span++) |
483 | if (map->raidMap.ldSpanMap[ld].spanBlock[span]. | 493 | if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. |
484 | block_span_info.noElements >= info+1) { | 494 | block_span_info.noElements) >= info+1) { |
485 | quad = &map->raidMap.ldSpanMap[ld]. | 495 | quad = &map->raidMap.ldSpanMap[ld]. |
486 | spanBlock[span].block_span_info.quad[info]; | 496 | spanBlock[span].block_span_info.quad[info]; |
487 | if (quad->logStart <= row && | 497 | if (le64_to_cpu(quad->logStart) <= row && |
488 | row <= quad->logEnd && | 498 | row <= le64_to_cpu(quad->logEnd) && |
489 | mega_mod64((row - quad->logStart), | 499 | mega_mod64((row - le64_to_cpu(quad->logStart)), |
490 | quad->diff) == 0) { | 500 | le32_to_cpu(quad->diff)) == 0) { |
491 | strip = mega_div64_32 | 501 | strip = mega_div64_32 |
492 | (((row - span_set->data_row_start) | 502 | (((row - span_set->data_row_start) |
493 | - quad->logStart), | 503 | - le64_to_cpu(quad->logStart)), |
494 | quad->diff); | 504 | le32_to_cpu(quad->diff)); |
495 | strip *= span_set->span_row_data_width; | 505 | strip *= span_set->span_row_data_width; |
496 | strip += span_set->data_strip_start; | 506 | strip += span_set->data_strip_start; |
497 | strip += span_set->strip_offset[span]; | 507 | strip += span_set->strip_offset[span]; |
@@ -543,8 +553,8 @@ static u32 get_arm_from_strip(struct megasas_instance *instance, | |||
543 | span_set->span_row_data_width); | 553 | span_set->span_row_data_width); |
544 | 554 | ||
545 | for (span = 0, span_offset = 0; span < raid->spanDepth; span++) | 555 | for (span = 0, span_offset = 0; span < raid->spanDepth; span++) |
546 | if (map->raidMap.ldSpanMap[ld].spanBlock[span]. | 556 | if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. |
547 | block_span_info.noElements >= info+1) { | 557 | block_span_info.noElements) >= info+1) { |
548 | if (strip_offset >= | 558 | if (strip_offset >= |
549 | span_set->strip_offset[span]) | 559 | span_set->strip_offset[span]) |
550 | span_offset = | 560 | span_offset = |
@@ -669,7 +679,7 @@ static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld, | |||
669 | } | 679 | } |
670 | } | 680 | } |
671 | 681 | ||
672 | *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk; | 682 | *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); |
673 | pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | | 683 | pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | |
674 | physArm; | 684 | physArm; |
675 | return retval; | 685 | return retval; |
@@ -765,7 +775,7 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow, | |||
765 | } | 775 | } |
766 | } | 776 | } |
767 | 777 | ||
768 | *pdBlock += stripRef + MR_LdSpanPtrGet(ld, span, map)->startBlk; | 778 | *pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk); |
769 | pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | | 779 | pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | |
770 | physArm; | 780 | physArm; |
771 | return retval; | 781 | return retval; |
@@ -784,7 +794,7 @@ u8 | |||
784 | MR_BuildRaidContext(struct megasas_instance *instance, | 794 | MR_BuildRaidContext(struct megasas_instance *instance, |
785 | struct IO_REQUEST_INFO *io_info, | 795 | struct IO_REQUEST_INFO *io_info, |
786 | struct RAID_CONTEXT *pRAID_Context, | 796 | struct RAID_CONTEXT *pRAID_Context, |
787 | struct MR_FW_RAID_MAP_ALL *map) | 797 | struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN) |
788 | { | 798 | { |
789 | struct MR_LD_RAID *raid; | 799 | struct MR_LD_RAID *raid; |
790 | u32 ld, stripSize, stripe_mask; | 800 | u32 ld, stripSize, stripe_mask; |
@@ -965,7 +975,7 @@ MR_BuildRaidContext(struct megasas_instance *instance, | |||
965 | regSize += stripSize; | 975 | regSize += stripSize; |
966 | } | 976 | } |
967 | 977 | ||
968 | pRAID_Context->timeoutValue = map->raidMap.fpPdIoTimeoutSec; | 978 | pRAID_Context->timeoutValue = cpu_to_le16(map->raidMap.fpPdIoTimeoutSec); |
969 | if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || | 979 | if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || |
970 | (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) | 980 | (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) |
971 | pRAID_Context->regLockFlags = (isRead) ? | 981 | pRAID_Context->regLockFlags = (isRead) ? |
@@ -974,9 +984,12 @@ MR_BuildRaidContext(struct megasas_instance *instance, | |||
974 | pRAID_Context->regLockFlags = (isRead) ? | 984 | pRAID_Context->regLockFlags = (isRead) ? |
975 | REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite; | 985 | REGION_TYPE_SHARED_READ : raid->regTypeReqOnWrite; |
976 | pRAID_Context->VirtualDiskTgtId = raid->targetId; | 986 | pRAID_Context->VirtualDiskTgtId = raid->targetId; |
977 | pRAID_Context->regLockRowLBA = regStart; | 987 | pRAID_Context->regLockRowLBA = cpu_to_le64(regStart); |
978 | pRAID_Context->regLockLength = regSize; | 988 | pRAID_Context->regLockLength = cpu_to_le32(regSize); |
979 | pRAID_Context->configSeqNum = raid->seqNum; | 989 | pRAID_Context->configSeqNum = raid->seqNum; |
990 | /* save pointer to raid->LUN array */ | ||
991 | *raidLUN = raid->LUN; | ||
992 | |||
980 | 993 | ||
981 | /*Get Phy Params only if FP capable, or else leave it to MR firmware | 994 | /*Get Phy Params only if FP capable, or else leave it to MR firmware |
982 | to do the calculation.*/ | 995 | to do the calculation.*/ |
@@ -1047,8 +1060,8 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map, | |||
1047 | raid = MR_LdRaidGet(ld, map); | 1060 | raid = MR_LdRaidGet(ld, map); |
1048 | for (element = 0; element < MAX_QUAD_DEPTH; element++) { | 1061 | for (element = 0; element < MAX_QUAD_DEPTH; element++) { |
1049 | for (span = 0; span < raid->spanDepth; span++) { | 1062 | for (span = 0; span < raid->spanDepth; span++) { |
1050 | if (map->raidMap.ldSpanMap[ld].spanBlock[span]. | 1063 | if (le32_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span]. |
1051 | block_span_info.noElements < | 1064 | block_span_info.noElements) < |
1052 | element + 1) | 1065 | element + 1) |
1053 | continue; | 1066 | continue; |
1054 | span_set = &(ldSpanInfo[ld].span_set[element]); | 1067 | span_set = &(ldSpanInfo[ld].span_set[element]); |
@@ -1056,14 +1069,14 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map, | |||
1056 | spanBlock[span].block_span_info. | 1069 | spanBlock[span].block_span_info. |
1057 | quad[element]; | 1070 | quad[element]; |
1058 | 1071 | ||
1059 | span_set->diff = quad->diff; | 1072 | span_set->diff = le32_to_cpu(quad->diff); |
1060 | 1073 | ||
1061 | for (count = 0, span_row_width = 0; | 1074 | for (count = 0, span_row_width = 0; |
1062 | count < raid->spanDepth; count++) { | 1075 | count < raid->spanDepth; count++) { |
1063 | if (map->raidMap.ldSpanMap[ld]. | 1076 | if (le32_to_cpu(map->raidMap.ldSpanMap[ld]. |
1064 | spanBlock[count]. | 1077 | spanBlock[count]. |
1065 | block_span_info. | 1078 | block_span_info. |
1066 | noElements >= element + 1) { | 1079 | noElements) >= element + 1) { |
1067 | span_set->strip_offset[count] = | 1080 | span_set->strip_offset[count] = |
1068 | span_row_width; | 1081 | span_row_width; |
1069 | span_row_width += | 1082 | span_row_width += |
@@ -1077,9 +1090,9 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map, | |||
1077 | } | 1090 | } |
1078 | 1091 | ||
1079 | span_set->span_row_data_width = span_row_width; | 1092 | span_set->span_row_data_width = span_row_width; |
1080 | span_row = mega_div64_32(((quad->logEnd - | 1093 | span_row = mega_div64_32(((le64_to_cpu(quad->logEnd) - |
1081 | quad->logStart) + quad->diff), | 1094 | le64_to_cpu(quad->logStart)) + le32_to_cpu(quad->diff)), |
1082 | quad->diff); | 1095 | le32_to_cpu(quad->diff)); |
1083 | 1096 | ||
1084 | if (element == 0) { | 1097 | if (element == 0) { |
1085 | span_set->log_start_lba = 0; | 1098 | span_set->log_start_lba = 0; |
@@ -1096,7 +1109,7 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map, | |||
1096 | 1109 | ||
1097 | span_set->data_row_start = 0; | 1110 | span_set->data_row_start = 0; |
1098 | span_set->data_row_end = | 1111 | span_set->data_row_end = |
1099 | (span_row * quad->diff) - 1; | 1112 | (span_row * le32_to_cpu(quad->diff)) - 1; |
1100 | } else { | 1113 | } else { |
1101 | span_set_prev = &(ldSpanInfo[ld]. | 1114 | span_set_prev = &(ldSpanInfo[ld]. |
1102 | span_set[element - 1]); | 1115 | span_set[element - 1]); |
@@ -1122,7 +1135,7 @@ void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map, | |||
1122 | span_set_prev->data_row_end + 1; | 1135 | span_set_prev->data_row_end + 1; |
1123 | span_set->data_row_end = | 1136 | span_set->data_row_end = |
1124 | span_set->data_row_start + | 1137 | span_set->data_row_start + |
1125 | (span_row * quad->diff) - 1; | 1138 | (span_row * le32_to_cpu(quad->diff)) - 1; |
1126 | } | 1139 | } |
1127 | break; | 1140 | break; |
1128 | } | 1141 | } |
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 417d5f167aa2..f6555921fd7a 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c | |||
@@ -72,17 +72,6 @@ megasas_clear_intr_fusion(struct megasas_register_set __iomem *regs); | |||
72 | int | 72 | int |
73 | megasas_issue_polled(struct megasas_instance *instance, | 73 | megasas_issue_polled(struct megasas_instance *instance, |
74 | struct megasas_cmd *cmd); | 74 | struct megasas_cmd *cmd); |
75 | |||
76 | u8 | ||
77 | MR_BuildRaidContext(struct megasas_instance *instance, | ||
78 | struct IO_REQUEST_INFO *io_info, | ||
79 | struct RAID_CONTEXT *pRAID_Context, | ||
80 | struct MR_FW_RAID_MAP_ALL *map); | ||
81 | u16 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map); | ||
82 | struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map); | ||
83 | |||
84 | u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map); | ||
85 | |||
86 | void | 75 | void |
87 | megasas_check_and_restore_queue_depth(struct megasas_instance *instance); | 76 | megasas_check_and_restore_queue_depth(struct megasas_instance *instance); |
88 | 77 | ||
@@ -626,23 +615,20 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) | |||
626 | 615 | ||
627 | IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT; | 616 | IOCInitMessage->Function = MPI2_FUNCTION_IOC_INIT; |
628 | IOCInitMessage->WhoInit = MPI2_WHOINIT_HOST_DRIVER; | 617 | IOCInitMessage->WhoInit = MPI2_WHOINIT_HOST_DRIVER; |
629 | IOCInitMessage->MsgVersion = MPI2_VERSION; | 618 | IOCInitMessage->MsgVersion = cpu_to_le16(MPI2_VERSION); |
630 | IOCInitMessage->HeaderVersion = MPI2_HEADER_VERSION; | 619 | IOCInitMessage->HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION); |
631 | IOCInitMessage->SystemRequestFrameSize = | 620 | IOCInitMessage->SystemRequestFrameSize = cpu_to_le16(MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4); |
632 | MEGA_MPI2_RAID_DEFAULT_IO_FRAME_SIZE / 4; | 621 | |
633 | 622 | IOCInitMessage->ReplyDescriptorPostQueueDepth = cpu_to_le16(fusion->reply_q_depth); | |
634 | IOCInitMessage->ReplyDescriptorPostQueueDepth = fusion->reply_q_depth; | 623 | IOCInitMessage->ReplyDescriptorPostQueueAddress = cpu_to_le64(fusion->reply_frames_desc_phys); |
635 | IOCInitMessage->ReplyDescriptorPostQueueAddress = | 624 | IOCInitMessage->SystemRequestFrameBaseAddress = cpu_to_le64(fusion->io_request_frames_phys); |
636 | fusion->reply_frames_desc_phys; | ||
637 | IOCInitMessage->SystemRequestFrameBaseAddress = | ||
638 | fusion->io_request_frames_phys; | ||
639 | IOCInitMessage->HostMSIxVectors = instance->msix_vectors; | 625 | IOCInitMessage->HostMSIxVectors = instance->msix_vectors; |
640 | init_frame = (struct megasas_init_frame *)cmd->frame; | 626 | init_frame = (struct megasas_init_frame *)cmd->frame; |
641 | memset(init_frame, 0, MEGAMFI_FRAME_SIZE); | 627 | memset(init_frame, 0, MEGAMFI_FRAME_SIZE); |
642 | 628 | ||
643 | frame_hdr = &cmd->frame->hdr; | 629 | frame_hdr = &cmd->frame->hdr; |
644 | frame_hdr->cmd_status = 0xFF; | 630 | frame_hdr->cmd_status = 0xFF; |
645 | frame_hdr->flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE; | 631 | frame_hdr->flags |= cpu_to_le16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE); |
646 | 632 | ||
647 | init_frame->cmd = MFI_CMD_INIT; | 633 | init_frame->cmd = MFI_CMD_INIT; |
648 | init_frame->cmd_status = 0xFF; | 634 | init_frame->cmd_status = 0xFF; |
@@ -652,17 +638,24 @@ megasas_ioc_init_fusion(struct megasas_instance *instance) | |||
652 | (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) | 638 | (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) |
653 | init_frame->driver_operations. | 639 | init_frame->driver_operations. |
654 | mfi_capabilities.support_additional_msix = 1; | 640 | mfi_capabilities.support_additional_msix = 1; |
641 | /* driver supports HA / Remote LUN over Fast Path interface */ | ||
642 | init_frame->driver_operations.mfi_capabilities.support_fp_remote_lun | ||
643 | = 1; | ||
644 | /* Convert capability to LE32 */ | ||
645 | cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities); | ||
655 | 646 | ||
656 | init_frame->queue_info_new_phys_addr_lo = ioc_init_handle; | 647 | init_frame->queue_info_new_phys_addr_lo = cpu_to_le32((u32)ioc_init_handle); |
657 | init_frame->data_xfer_len = sizeof(struct MPI2_IOC_INIT_REQUEST); | 648 | init_frame->data_xfer_len = cpu_to_le32(sizeof(struct MPI2_IOC_INIT_REQUEST)); |
658 | 649 | ||
659 | req_desc = | 650 | req_desc = |
660 | (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)fusion->req_frames_desc; | 651 | (union MEGASAS_REQUEST_DESCRIPTOR_UNION *)fusion->req_frames_desc; |
661 | 652 | ||
662 | req_desc->Words = cmd->frame_phys_addr; | 653 | req_desc->Words = 0; |
663 | req_desc->MFAIo.RequestFlags = | 654 | req_desc->MFAIo.RequestFlags = |
664 | (MEGASAS_REQ_DESCRIPT_FLAGS_MFA << | 655 | (MEGASAS_REQ_DESCRIPT_FLAGS_MFA << |
665 | MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); | 656 | MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); |
657 | cpu_to_le32s((u32 *)&req_desc->MFAIo); | ||
658 | req_desc->Words |= cpu_to_le64(cmd->frame_phys_addr); | ||
666 | 659 | ||
667 | /* | 660 | /* |
668 | * disable the intr before firing the init frame | 661 | * disable the intr before firing the init frame |
@@ -753,13 +746,13 @@ megasas_get_ld_map_info(struct megasas_instance *instance) | |||
753 | dcmd->cmd = MFI_CMD_DCMD; | 746 | dcmd->cmd = MFI_CMD_DCMD; |
754 | dcmd->cmd_status = 0xFF; | 747 | dcmd->cmd_status = 0xFF; |
755 | dcmd->sge_count = 1; | 748 | dcmd->sge_count = 1; |
756 | dcmd->flags = MFI_FRAME_DIR_READ; | 749 | dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_READ); |
757 | dcmd->timeout = 0; | 750 | dcmd->timeout = 0; |
758 | dcmd->pad_0 = 0; | 751 | dcmd->pad_0 = 0; |
759 | dcmd->data_xfer_len = size_map_info; | 752 | dcmd->data_xfer_len = cpu_to_le32(size_map_info); |
760 | dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; | 753 | dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO); |
761 | dcmd->sgl.sge32[0].phys_addr = ci_h; | 754 | dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); |
762 | dcmd->sgl.sge32[0].length = size_map_info; | 755 | dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info); |
763 | 756 | ||
764 | if (!megasas_issue_polled(instance, cmd)) | 757 | if (!megasas_issue_polled(instance, cmd)) |
765 | ret = 0; | 758 | ret = 0; |
@@ -828,7 +821,7 @@ megasas_sync_map_info(struct megasas_instance *instance) | |||
828 | 821 | ||
829 | map = fusion->ld_map[instance->map_id & 1]; | 822 | map = fusion->ld_map[instance->map_id & 1]; |
830 | 823 | ||
831 | num_lds = map->raidMap.ldCount; | 824 | num_lds = le32_to_cpu(map->raidMap.ldCount); |
832 | 825 | ||
833 | dcmd = &cmd->frame->dcmd; | 826 | dcmd = &cmd->frame->dcmd; |
834 | 827 | ||
@@ -856,15 +849,15 @@ megasas_sync_map_info(struct megasas_instance *instance) | |||
856 | dcmd->cmd = MFI_CMD_DCMD; | 849 | dcmd->cmd = MFI_CMD_DCMD; |
857 | dcmd->cmd_status = 0xFF; | 850 | dcmd->cmd_status = 0xFF; |
858 | dcmd->sge_count = 1; | 851 | dcmd->sge_count = 1; |
859 | dcmd->flags = MFI_FRAME_DIR_WRITE; | 852 | dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_WRITE); |
860 | dcmd->timeout = 0; | 853 | dcmd->timeout = 0; |
861 | dcmd->pad_0 = 0; | 854 | dcmd->pad_0 = 0; |
862 | dcmd->data_xfer_len = size_map_info; | 855 | dcmd->data_xfer_len = cpu_to_le32(size_map_info); |
863 | dcmd->mbox.b[0] = num_lds; | 856 | dcmd->mbox.b[0] = num_lds; |
864 | dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG; | 857 | dcmd->mbox.b[1] = MEGASAS_DCMD_MBOX_PEND_FLAG; |
865 | dcmd->opcode = MR_DCMD_LD_MAP_GET_INFO; | 858 | dcmd->opcode = cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO); |
866 | dcmd->sgl.sge32[0].phys_addr = ci_h; | 859 | dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h); |
867 | dcmd->sgl.sge32[0].length = size_map_info; | 860 | dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info); |
868 | 861 | ||
869 | instance->map_update_cmd = cmd; | 862 | instance->map_update_cmd = cmd; |
870 | 863 | ||
@@ -1067,9 +1060,8 @@ megasas_fire_cmd_fusion(struct megasas_instance *instance, | |||
1067 | 1060 | ||
1068 | spin_lock_irqsave(&instance->hba_lock, flags); | 1061 | spin_lock_irqsave(&instance->hba_lock, flags); |
1069 | 1062 | ||
1070 | writel(req_desc_lo, | 1063 | writel(le32_to_cpu(req_desc_lo), &(regs)->inbound_low_queue_port); |
1071 | &(regs)->inbound_low_queue_port); | 1064 | writel(le32_to_cpu(req_desc_hi), &(regs)->inbound_high_queue_port); |
1072 | writel(req_desc_hi, &(regs)->inbound_high_queue_port); | ||
1073 | spin_unlock_irqrestore(&instance->hba_lock, flags); | 1065 | spin_unlock_irqrestore(&instance->hba_lock, flags); |
1074 | } | 1066 | } |
1075 | 1067 | ||
@@ -1157,8 +1149,8 @@ megasas_make_sgl_fusion(struct megasas_instance *instance, | |||
1157 | return sge_count; | 1149 | return sge_count; |
1158 | 1150 | ||
1159 | scsi_for_each_sg(scp, os_sgl, sge_count, i) { | 1151 | scsi_for_each_sg(scp, os_sgl, sge_count, i) { |
1160 | sgl_ptr->Length = sg_dma_len(os_sgl); | 1152 | sgl_ptr->Length = cpu_to_le32(sg_dma_len(os_sgl)); |
1161 | sgl_ptr->Address = sg_dma_address(os_sgl); | 1153 | sgl_ptr->Address = cpu_to_le64(sg_dma_address(os_sgl)); |
1162 | sgl_ptr->Flags = 0; | 1154 | sgl_ptr->Flags = 0; |
1163 | if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || | 1155 | if ((instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) || |
1164 | (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { | 1156 | (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) { |
@@ -1177,9 +1169,9 @@ megasas_make_sgl_fusion(struct megasas_instance *instance, | |||
1177 | PCI_DEVICE_ID_LSI_INVADER) || | 1169 | PCI_DEVICE_ID_LSI_INVADER) || |
1178 | (instance->pdev->device == | 1170 | (instance->pdev->device == |
1179 | PCI_DEVICE_ID_LSI_FURY)) { | 1171 | PCI_DEVICE_ID_LSI_FURY)) { |
1180 | if ((cmd->io_request->IoFlags & | 1172 | if ((le16_to_cpu(cmd->io_request->IoFlags) & |
1181 | MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) != | 1173 | MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) != |
1182 | MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) | 1174 | MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH) |
1183 | cmd->io_request->ChainOffset = | 1175 | cmd->io_request->ChainOffset = |
1184 | fusion-> | 1176 | fusion-> |
1185 | chain_offset_io_request; | 1177 | chain_offset_io_request; |
@@ -1201,9 +1193,8 @@ megasas_make_sgl_fusion(struct megasas_instance *instance, | |||
1201 | sg_chain->Flags = | 1193 | sg_chain->Flags = |
1202 | (IEEE_SGE_FLAGS_CHAIN_ELEMENT | | 1194 | (IEEE_SGE_FLAGS_CHAIN_ELEMENT | |
1203 | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR); | 1195 | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR); |
1204 | sg_chain->Length = (sizeof(union MPI2_SGE_IO_UNION) | 1196 | sg_chain->Length = cpu_to_le32((sizeof(union MPI2_SGE_IO_UNION) * (sge_count - sg_processed))); |
1205 | *(sge_count - sg_processed)); | 1197 | sg_chain->Address = cpu_to_le64(cmd->sg_frame_phys_addr); |
1206 | sg_chain->Address = cmd->sg_frame_phys_addr; | ||
1207 | 1198 | ||
1208 | sgl_ptr = | 1199 | sgl_ptr = |
1209 | (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame; | 1200 | (struct MPI25_IEEE_SGE_CHAIN64 *)cmd->sg_frame; |
@@ -1261,7 +1252,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, | |||
1261 | io_request->CDB.EEDP32.PrimaryReferenceTag = | 1252 | io_request->CDB.EEDP32.PrimaryReferenceTag = |
1262 | cpu_to_be32(ref_tag); | 1253 | cpu_to_be32(ref_tag); |
1263 | io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff; | 1254 | io_request->CDB.EEDP32.PrimaryApplicationTagMask = 0xffff; |
1264 | io_request->IoFlags = 32; /* Specify 32-byte cdb */ | 1255 | io_request->IoFlags = cpu_to_le16(32); /* Specify 32-byte cdb */ |
1265 | 1256 | ||
1266 | /* Transfer length */ | 1257 | /* Transfer length */ |
1267 | cdb[28] = (u8)((num_blocks >> 24) & 0xff); | 1258 | cdb[28] = (u8)((num_blocks >> 24) & 0xff); |
@@ -1271,19 +1262,19 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, | |||
1271 | 1262 | ||
1272 | /* set SCSI IO EEDPFlags */ | 1263 | /* set SCSI IO EEDPFlags */ |
1273 | if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) { | 1264 | if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) { |
1274 | io_request->EEDPFlags = | 1265 | io_request->EEDPFlags = cpu_to_le16( |
1275 | MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | | 1266 | MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | |
1276 | MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | | 1267 | MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG | |
1277 | MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP | | 1268 | MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP | |
1278 | MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG | | 1269 | MPI2_SCSIIO_EEDPFLAGS_CHECK_APPTAG | |
1279 | MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD; | 1270 | MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD); |
1280 | } else { | 1271 | } else { |
1281 | io_request->EEDPFlags = | 1272 | io_request->EEDPFlags = cpu_to_le16( |
1282 | MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | | 1273 | MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG | |
1283 | MPI2_SCSIIO_EEDPFLAGS_INSERT_OP; | 1274 | MPI2_SCSIIO_EEDPFLAGS_INSERT_OP); |
1284 | } | 1275 | } |
1285 | io_request->Control |= (0x4 << 26); | 1276 | io_request->Control |= cpu_to_le32((0x4 << 26)); |
1286 | io_request->EEDPBlockSize = scp->device->sector_size; | 1277 | io_request->EEDPBlockSize = cpu_to_le32(scp->device->sector_size); |
1287 | } else { | 1278 | } else { |
1288 | /* Some drives don't support 16/12 byte CDB's, convert to 10 */ | 1279 | /* Some drives don't support 16/12 byte CDB's, convert to 10 */ |
1289 | if (((cdb_len == 12) || (cdb_len == 16)) && | 1280 | if (((cdb_len == 12) || (cdb_len == 16)) && |
@@ -1311,7 +1302,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, | |||
1311 | cdb[8] = (u8)(num_blocks & 0xff); | 1302 | cdb[8] = (u8)(num_blocks & 0xff); |
1312 | cdb[7] = (u8)((num_blocks >> 8) & 0xff); | 1303 | cdb[7] = (u8)((num_blocks >> 8) & 0xff); |
1313 | 1304 | ||
1314 | io_request->IoFlags = 10; /* Specify 10-byte cdb */ | 1305 | io_request->IoFlags = cpu_to_le16(10); /* Specify 10-byte cdb */ |
1315 | cdb_len = 10; | 1306 | cdb_len = 10; |
1316 | } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) { | 1307 | } else if ((cdb_len < 16) && (start_blk > 0xffffffff)) { |
1317 | /* Convert to 16 byte CDB for large LBA's */ | 1308 | /* Convert to 16 byte CDB for large LBA's */ |
@@ -1349,7 +1340,7 @@ megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len, | |||
1349 | cdb[11] = (u8)((num_blocks >> 16) & 0xff); | 1340 | cdb[11] = (u8)((num_blocks >> 16) & 0xff); |
1350 | cdb[10] = (u8)((num_blocks >> 24) & 0xff); | 1341 | cdb[10] = (u8)((num_blocks >> 24) & 0xff); |
1351 | 1342 | ||
1352 | io_request->IoFlags = 16; /* Specify 16-byte cdb */ | 1343 | io_request->IoFlags = cpu_to_le16(16); /* Specify 16-byte cdb */ |
1353 | cdb_len = 16; | 1344 | cdb_len = 16; |
1354 | } | 1345 | } |
1355 | 1346 | ||
@@ -1410,13 +1401,14 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, | |||
1410 | struct IO_REQUEST_INFO io_info; | 1401 | struct IO_REQUEST_INFO io_info; |
1411 | struct fusion_context *fusion; | 1402 | struct fusion_context *fusion; |
1412 | struct MR_FW_RAID_MAP_ALL *local_map_ptr; | 1403 | struct MR_FW_RAID_MAP_ALL *local_map_ptr; |
1404 | u8 *raidLUN; | ||
1413 | 1405 | ||
1414 | device_id = MEGASAS_DEV_INDEX(instance, scp); | 1406 | device_id = MEGASAS_DEV_INDEX(instance, scp); |
1415 | 1407 | ||
1416 | fusion = instance->ctrl_context; | 1408 | fusion = instance->ctrl_context; |
1417 | 1409 | ||
1418 | io_request = cmd->io_request; | 1410 | io_request = cmd->io_request; |
1419 | io_request->RaidContext.VirtualDiskTgtId = device_id; | 1411 | io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id); |
1420 | io_request->RaidContext.status = 0; | 1412 | io_request->RaidContext.status = 0; |
1421 | io_request->RaidContext.exStatus = 0; | 1413 | io_request->RaidContext.exStatus = 0; |
1422 | 1414 | ||
@@ -1480,7 +1472,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, | |||
1480 | io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo; | 1472 | io_info.ldStartBlock = ((u64)start_lba_hi << 32) | start_lba_lo; |
1481 | io_info.numBlocks = datalength; | 1473 | io_info.numBlocks = datalength; |
1482 | io_info.ldTgtId = device_id; | 1474 | io_info.ldTgtId = device_id; |
1483 | io_request->DataLength = scsi_bufflen(scp); | 1475 | io_request->DataLength = cpu_to_le32(scsi_bufflen(scp)); |
1484 | 1476 | ||
1485 | if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) | 1477 | if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) |
1486 | io_info.isRead = 1; | 1478 | io_info.isRead = 1; |
@@ -1494,7 +1486,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, | |||
1494 | } else { | 1486 | } else { |
1495 | if (MR_BuildRaidContext(instance, &io_info, | 1487 | if (MR_BuildRaidContext(instance, &io_info, |
1496 | &io_request->RaidContext, | 1488 | &io_request->RaidContext, |
1497 | local_map_ptr)) | 1489 | local_map_ptr, &raidLUN)) |
1498 | fp_possible = io_info.fpOkForIo; | 1490 | fp_possible = io_info.fpOkForIo; |
1499 | } | 1491 | } |
1500 | 1492 | ||
@@ -1520,8 +1512,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, | |||
1520 | MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); | 1512 | MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); |
1521 | io_request->RaidContext.Type = MPI2_TYPE_CUDA; | 1513 | io_request->RaidContext.Type = MPI2_TYPE_CUDA; |
1522 | io_request->RaidContext.nseg = 0x1; | 1514 | io_request->RaidContext.nseg = 0x1; |
1523 | io_request->IoFlags |= | 1515 | io_request->IoFlags |= cpu_to_le16(MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH); |
1524 | MPI25_SAS_DEVICE0_FLAGS_ENABLED_FAST_PATH; | ||
1525 | io_request->RaidContext.regLockFlags |= | 1516 | io_request->RaidContext.regLockFlags |= |
1526 | (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | | 1517 | (MR_RL_FLAGS_GRANT_DESTINATION_CUDA | |
1527 | MR_RL_FLAGS_SEQ_NUM_ENABLE); | 1518 | MR_RL_FLAGS_SEQ_NUM_ENABLE); |
@@ -1537,9 +1528,11 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, | |||
1537 | scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; | 1528 | scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG; |
1538 | cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle; | 1529 | cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle; |
1539 | io_request->DevHandle = io_info.devHandle; | 1530 | io_request->DevHandle = io_info.devHandle; |
1531 | /* populate the LUN field */ | ||
1532 | memcpy(io_request->LUN, raidLUN, 8); | ||
1540 | } else { | 1533 | } else { |
1541 | io_request->RaidContext.timeoutValue = | 1534 | io_request->RaidContext.timeoutValue = |
1542 | local_map_ptr->raidMap.fpPdIoTimeoutSec; | 1535 | cpu_to_le16(local_map_ptr->raidMap.fpPdIoTimeoutSec); |
1543 | cmd->request_desc->SCSIIO.RequestFlags = | 1536 | cmd->request_desc->SCSIIO.RequestFlags = |
1544 | (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO | 1537 | (MEGASAS_REQ_DESCRIPT_FLAGS_LD_IO |
1545 | << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); | 1538 | << MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); |
@@ -1557,7 +1550,7 @@ megasas_build_ldio_fusion(struct megasas_instance *instance, | |||
1557 | io_request->RaidContext.nseg = 0x1; | 1550 | io_request->RaidContext.nseg = 0x1; |
1558 | } | 1551 | } |
1559 | io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; | 1552 | io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; |
1560 | io_request->DevHandle = device_id; | 1553 | io_request->DevHandle = cpu_to_le16(device_id); |
1561 | } /* Not FP */ | 1554 | } /* Not FP */ |
1562 | } | 1555 | } |
1563 | 1556 | ||
@@ -1579,6 +1572,11 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance, | |||
1579 | u16 pd_index = 0; | 1572 | u16 pd_index = 0; |
1580 | struct MR_FW_RAID_MAP_ALL *local_map_ptr; | 1573 | struct MR_FW_RAID_MAP_ALL *local_map_ptr; |
1581 | struct fusion_context *fusion = instance->ctrl_context; | 1574 | struct fusion_context *fusion = instance->ctrl_context; |
1575 | u8 span, physArm; | ||
1576 | u16 devHandle; | ||
1577 | u32 ld, arRef, pd; | ||
1578 | struct MR_LD_RAID *raid; | ||
1579 | struct RAID_CONTEXT *pRAID_Context; | ||
1582 | 1580 | ||
1583 | io_request = cmd->io_request; | 1581 | io_request = cmd->io_request; |
1584 | device_id = MEGASAS_DEV_INDEX(instance, scmd); | 1582 | device_id = MEGASAS_DEV_INDEX(instance, scmd); |
@@ -1586,6 +1584,9 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance, | |||
1586 | +scmd->device->id; | 1584 | +scmd->device->id; |
1587 | local_map_ptr = fusion->ld_map[(instance->map_id & 1)]; | 1585 | local_map_ptr = fusion->ld_map[(instance->map_id & 1)]; |
1588 | 1586 | ||
1587 | io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd)); | ||
1588 | |||
1589 | |||
1589 | /* Check if this is a system PD I/O */ | 1590 | /* Check if this is a system PD I/O */ |
1590 | if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS && | 1591 | if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS && |
1591 | instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { | 1592 | instance->pd_list[pd_index].driveState == MR_PD_STATE_SYSTEM) { |
@@ -1623,15 +1624,62 @@ megasas_build_dcdb_fusion(struct megasas_instance *instance, | |||
1623 | scmd->request->timeout / HZ; | 1624 | scmd->request->timeout / HZ; |
1624 | } | 1625 | } |
1625 | } else { | 1626 | } else { |
1627 | if (scmd->device->channel < MEGASAS_MAX_PD_CHANNELS) | ||
1628 | goto NonFastPath; | ||
1629 | |||
1630 | ld = MR_TargetIdToLdGet(device_id, local_map_ptr); | ||
1631 | if ((ld >= MAX_LOGICAL_DRIVES) || (!fusion->fast_path_io)) | ||
1632 | goto NonFastPath; | ||
1633 | |||
1634 | raid = MR_LdRaidGet(ld, local_map_ptr); | ||
1635 | |||
1636 | /* check if this LD is FP capable */ | ||
1637 | if (!(raid->capability.fpNonRWCapable)) | ||
1638 | /* not FP capable, send as non-FP */ | ||
1639 | goto NonFastPath; | ||
1640 | |||
1641 | /* get RAID_Context pointer */ | ||
1642 | pRAID_Context = &io_request->RaidContext; | ||
1643 | |||
1644 | /* set RAID context values */ | ||
1645 | pRAID_Context->regLockFlags = REGION_TYPE_SHARED_READ; | ||
1646 | pRAID_Context->timeoutValue = raid->fpIoTimeoutForLd; | ||
1647 | pRAID_Context->VirtualDiskTgtId = cpu_to_le16(device_id); | ||
1648 | pRAID_Context->regLockRowLBA = 0; | ||
1649 | pRAID_Context->regLockLength = 0; | ||
1650 | pRAID_Context->configSeqNum = raid->seqNum; | ||
1651 | |||
1652 | /* get the DevHandle for the PD (since this is | ||
1653 | fpNonRWCapable, this is a single disk RAID0) */ | ||
1654 | span = physArm = 0; | ||
1655 | arRef = MR_LdSpanArrayGet(ld, span, local_map_ptr); | ||
1656 | pd = MR_ArPdGet(arRef, physArm, local_map_ptr); | ||
1657 | devHandle = MR_PdDevHandleGet(pd, local_map_ptr); | ||
1658 | |||
1659 | /* build request descriptor */ | ||
1660 | cmd->request_desc->SCSIIO.RequestFlags = | ||
1661 | (MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY << | ||
1662 | MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); | ||
1663 | cmd->request_desc->SCSIIO.DevHandle = devHandle; | ||
1664 | |||
1665 | /* populate the LUN field */ | ||
1666 | memcpy(io_request->LUN, raid->LUN, 8); | ||
1667 | |||
1668 | /* build the raidScsiIO structure */ | ||
1669 | io_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST; | ||
1670 | io_request->DevHandle = devHandle; | ||
1671 | |||
1672 | return; | ||
1673 | |||
1674 | NonFastPath: | ||
1626 | io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; | 1675 | io_request->Function = MEGASAS_MPI2_FUNCTION_LD_IO_REQUEST; |
1627 | io_request->DevHandle = device_id; | 1676 | io_request->DevHandle = cpu_to_le16(device_id); |
1628 | cmd->request_desc->SCSIIO.RequestFlags = | 1677 | cmd->request_desc->SCSIIO.RequestFlags = |
1629 | (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << | 1678 | (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << |
1630 | MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); | 1679 | MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); |
1631 | } | 1680 | } |
1632 | io_request->RaidContext.VirtualDiskTgtId = device_id; | 1681 | io_request->RaidContext.VirtualDiskTgtId = cpu_to_le16(device_id); |
1633 | io_request->LUN[1] = scmd->device->lun; | 1682 | io_request->LUN[1] = scmd->device->lun; |
1634 | io_request->DataLength = scsi_bufflen(scmd); | ||
1635 | } | 1683 | } |
1636 | 1684 | ||
1637 | /** | 1685 | /** |
@@ -1670,7 +1718,7 @@ megasas_build_io_fusion(struct megasas_instance *instance, | |||
1670 | * Just the CDB length,rest of the Flags are zero | 1718 | * Just the CDB length,rest of the Flags are zero |
1671 | * This will be modified for FP in build_ldio_fusion | 1719 | * This will be modified for FP in build_ldio_fusion |
1672 | */ | 1720 | */ |
1673 | io_request->IoFlags = scp->cmd_len; | 1721 | io_request->IoFlags = cpu_to_le16(scp->cmd_len); |
1674 | 1722 | ||
1675 | if (megasas_is_ldio(scp)) | 1723 | if (megasas_is_ldio(scp)) |
1676 | megasas_build_ldio_fusion(instance, scp, cmd); | 1724 | megasas_build_ldio_fusion(instance, scp, cmd); |
@@ -1695,17 +1743,17 @@ megasas_build_io_fusion(struct megasas_instance *instance, | |||
1695 | 1743 | ||
1696 | io_request->RaidContext.numSGE = sge_count; | 1744 | io_request->RaidContext.numSGE = sge_count; |
1697 | 1745 | ||
1698 | io_request->SGLFlags = MPI2_SGE_FLAGS_64_BIT_ADDRESSING; | 1746 | io_request->SGLFlags = cpu_to_le16(MPI2_SGE_FLAGS_64_BIT_ADDRESSING); |
1699 | 1747 | ||
1700 | if (scp->sc_data_direction == PCI_DMA_TODEVICE) | 1748 | if (scp->sc_data_direction == PCI_DMA_TODEVICE) |
1701 | io_request->Control |= MPI2_SCSIIO_CONTROL_WRITE; | 1749 | io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_WRITE); |
1702 | else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) | 1750 | else if (scp->sc_data_direction == PCI_DMA_FROMDEVICE) |
1703 | io_request->Control |= MPI2_SCSIIO_CONTROL_READ; | 1751 | io_request->Control |= cpu_to_le32(MPI2_SCSIIO_CONTROL_READ); |
1704 | 1752 | ||
1705 | io_request->SGLOffset0 = | 1753 | io_request->SGLOffset0 = |
1706 | offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4; | 1754 | offsetof(struct MPI2_RAID_SCSI_IO_REQUEST, SGL) / 4; |
1707 | 1755 | ||
1708 | io_request->SenseBufferLowAddress = cmd->sense_phys_addr; | 1756 | io_request->SenseBufferLowAddress = cpu_to_le32(cmd->sense_phys_addr); |
1709 | io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; | 1757 | io_request->SenseBufferLength = SCSI_SENSE_BUFFERSIZE; |
1710 | 1758 | ||
1711 | cmd->scmd = scp; | 1759 | cmd->scmd = scp; |
@@ -1770,7 +1818,7 @@ megasas_build_and_issue_cmd_fusion(struct megasas_instance *instance, | |||
1770 | } | 1818 | } |
1771 | 1819 | ||
1772 | req_desc = cmd->request_desc; | 1820 | req_desc = cmd->request_desc; |
1773 | req_desc->SCSIIO.SMID = index; | 1821 | req_desc->SCSIIO.SMID = cpu_to_le16(index); |
1774 | 1822 | ||
1775 | if (cmd->io_request->ChainOffset != 0 && | 1823 | if (cmd->io_request->ChainOffset != 0 && |
1776 | cmd->io_request->ChainOffset != 0xF) | 1824 | cmd->io_request->ChainOffset != 0xF) |
@@ -1832,7 +1880,7 @@ complete_cmd_fusion(struct megasas_instance *instance, u32 MSIxIndex) | |||
1832 | num_completed = 0; | 1880 | num_completed = 0; |
1833 | 1881 | ||
1834 | while ((d_val.u.low != UINT_MAX) && (d_val.u.high != UINT_MAX)) { | 1882 | while ((d_val.u.low != UINT_MAX) && (d_val.u.high != UINT_MAX)) { |
1835 | smid = reply_desc->SMID; | 1883 | smid = le16_to_cpu(reply_desc->SMID); |
1836 | 1884 | ||
1837 | cmd_fusion = fusion->cmd_list[smid - 1]; | 1885 | cmd_fusion = fusion->cmd_list[smid - 1]; |
1838 | 1886 | ||
@@ -2050,12 +2098,12 @@ build_mpt_mfi_pass_thru(struct megasas_instance *instance, | |||
2050 | SGL) / 4; | 2098 | SGL) / 4; |
2051 | io_req->ChainOffset = fusion->chain_offset_mfi_pthru; | 2099 | io_req->ChainOffset = fusion->chain_offset_mfi_pthru; |
2052 | 2100 | ||
2053 | mpi25_ieee_chain->Address = mfi_cmd->frame_phys_addr; | 2101 | mpi25_ieee_chain->Address = cpu_to_le64(mfi_cmd->frame_phys_addr); |
2054 | 2102 | ||
2055 | mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | | 2103 | mpi25_ieee_chain->Flags = IEEE_SGE_FLAGS_CHAIN_ELEMENT | |
2056 | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; | 2104 | MPI2_IEEE_SGE_FLAGS_IOCPLBNTA_ADDR; |
2057 | 2105 | ||
2058 | mpi25_ieee_chain->Length = MEGASAS_MAX_SZ_CHAIN_FRAME; | 2106 | mpi25_ieee_chain->Length = cpu_to_le32(MEGASAS_MAX_SZ_CHAIN_FRAME); |
2059 | 2107 | ||
2060 | return 0; | 2108 | return 0; |
2061 | } | 2109 | } |
@@ -2088,7 +2136,7 @@ build_mpt_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd) | |||
2088 | req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << | 2136 | req_desc->SCSIIO.RequestFlags = (MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO << |
2089 | MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); | 2137 | MEGASAS_REQ_DESCRIPT_FLAGS_TYPE_SHIFT); |
2090 | 2138 | ||
2091 | req_desc->SCSIIO.SMID = index; | 2139 | req_desc->SCSIIO.SMID = cpu_to_le16(index); |
2092 | 2140 | ||
2093 | return req_desc; | 2141 | return req_desc; |
2094 | } | 2142 | } |
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h index 4eb84011cb07..35a51397b364 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.h +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h | |||
@@ -93,8 +93,13 @@ enum MR_RAID_FLAGS_IO_SUB_TYPE { | |||
93 | */ | 93 | */ |
94 | 94 | ||
95 | struct RAID_CONTEXT { | 95 | struct RAID_CONTEXT { |
96 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
97 | u8 nseg:4; | ||
98 | u8 Type:4; | ||
99 | #else | ||
96 | u8 Type:4; | 100 | u8 Type:4; |
97 | u8 nseg:4; | 101 | u8 nseg:4; |
102 | #endif | ||
98 | u8 resvd0; | 103 | u8 resvd0; |
99 | u16 timeoutValue; | 104 | u16 timeoutValue; |
100 | u8 regLockFlags; | 105 | u8 regLockFlags; |
@@ -298,8 +303,13 @@ struct MPI2_RAID_SCSI_IO_REQUEST { | |||
298 | * MPT RAID MFA IO Descriptor. | 303 | * MPT RAID MFA IO Descriptor. |
299 | */ | 304 | */ |
300 | struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR { | 305 | struct MEGASAS_RAID_MFA_IO_REQUEST_DESCRIPTOR { |
306 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
307 | u32 MessageAddress1:24; /* bits 31:8*/ | ||
308 | u32 RequestFlags:8; | ||
309 | #else | ||
301 | u32 RequestFlags:8; | 310 | u32 RequestFlags:8; |
302 | u32 MessageAddress1:24; /* bits 31:8*/ | 311 | u32 MessageAddress1:24; /* bits 31:8*/ |
312 | #endif | ||
303 | u32 MessageAddress2; /* bits 61:32 */ | 313 | u32 MessageAddress2; /* bits 61:32 */ |
304 | }; | 314 | }; |
305 | 315 | ||
@@ -518,6 +528,19 @@ struct MR_SPAN_BLOCK_INFO { | |||
518 | 528 | ||
519 | struct MR_LD_RAID { | 529 | struct MR_LD_RAID { |
520 | struct { | 530 | struct { |
531 | #if defined(__BIG_ENDIAN_BITFIELD) | ||
532 | u32 reserved4:7; | ||
533 | u32 fpNonRWCapable:1; | ||
534 | u32 fpReadAcrossStripe:1; | ||
535 | u32 fpWriteAcrossStripe:1; | ||
536 | u32 fpReadCapable:1; | ||
537 | u32 fpWriteCapable:1; | ||
538 | u32 encryptionType:8; | ||
539 | u32 pdPiMode:4; | ||
540 | u32 ldPiMode:4; | ||
541 | u32 reserved5:3; | ||
542 | u32 fpCapable:1; | ||
543 | #else | ||
521 | u32 fpCapable:1; | 544 | u32 fpCapable:1; |
522 | u32 reserved5:3; | 545 | u32 reserved5:3; |
523 | u32 ldPiMode:4; | 546 | u32 ldPiMode:4; |
@@ -527,7 +550,9 @@ struct MR_LD_RAID { | |||
527 | u32 fpReadCapable:1; | 550 | u32 fpReadCapable:1; |
528 | u32 fpWriteAcrossStripe:1; | 551 | u32 fpWriteAcrossStripe:1; |
529 | u32 fpReadAcrossStripe:1; | 552 | u32 fpReadAcrossStripe:1; |
530 | u32 reserved4:8; | 553 | u32 fpNonRWCapable:1; |
554 | u32 reserved4:7; | ||
555 | #endif | ||
531 | } capability; | 556 | } capability; |
532 | u32 reserved6; | 557 | u32 reserved6; |
533 | u64 size; | 558 | u64 size; |
@@ -551,7 +576,9 @@ struct MR_LD_RAID { | |||
551 | u32 reserved:31; | 576 | u32 reserved:31; |
552 | } flags; | 577 | } flags; |
553 | 578 | ||
554 | u8 reserved3[0x5C]; | 579 | u8 LUN[8]; /* 0x24 8 byte LUN field used for SCSI IO's */ |
580 | u8 fpIoTimeoutForLd;/*0x2C timeout value used by driver in FP IO*/ | ||
581 | u8 reserved3[0x80-0x2D]; /* 0x2D */ | ||
555 | }; | 582 | }; |
556 | 583 | ||
557 | struct MR_LD_SPAN_MAP { | 584 | struct MR_LD_SPAN_MAP { |
diff --git a/drivers/scsi/mpt3sas/Makefile b/drivers/scsi/mpt3sas/Makefile index 4c1d2e7a1176..efb0c4c2e310 100644 --- a/drivers/scsi/mpt3sas/Makefile +++ b/drivers/scsi/mpt3sas/Makefile | |||
@@ -1,5 +1,5 @@ | |||
1 | # mpt3sas makefile | 1 | # mpt3sas makefile |
2 | obj-m += mpt3sas.o | 2 | obj-$(CONFIG_SCSI_MPT3SAS) += mpt3sas.o |
3 | mpt3sas-y += mpt3sas_base.o \ | 3 | mpt3sas-y += mpt3sas_base.o \ |
4 | mpt3sas_config.o \ | 4 | mpt3sas_config.o \ |
5 | mpt3sas_scsih.o \ | 5 | mpt3sas_scsih.o \ |
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index b58e8f815a00..e62d17d41d4e 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c | |||
@@ -2420,14 +2420,9 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) | |||
2420 | } | 2420 | } |
2421 | } | 2421 | } |
2422 | 2422 | ||
2423 | if (modepage == 0x3F) { | 2423 | sd_printk(KERN_ERR, sdkp, "No Caching mode page found\n"); |
2424 | sd_printk(KERN_ERR, sdkp, "No Caching mode page " | 2424 | goto defaults; |
2425 | "present\n"); | 2425 | |
2426 | goto defaults; | ||
2427 | } else if ((buffer[offset] & 0x3f) != modepage) { | ||
2428 | sd_printk(KERN_ERR, sdkp, "Got wrong page\n"); | ||
2429 | goto defaults; | ||
2430 | } | ||
2431 | Page_found: | 2426 | Page_found: |
2432 | if (modepage == 8) { | 2427 | if (modepage == 8) { |
2433 | sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0); | 2428 | sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0); |
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h index bce09a6898c4..721050090520 100644 --- a/drivers/scsi/ufs/ufs.h +++ b/drivers/scsi/ufs/ufs.h | |||
@@ -177,6 +177,7 @@ enum { | |||
177 | MASK_TASK_RESPONSE = 0xFF00, | 177 | MASK_TASK_RESPONSE = 0xFF00, |
178 | MASK_RSP_UPIU_RESULT = 0xFFFF, | 178 | MASK_RSP_UPIU_RESULT = 0xFFFF, |
179 | MASK_QUERY_DATA_SEG_LEN = 0xFFFF, | 179 | MASK_QUERY_DATA_SEG_LEN = 0xFFFF, |
180 | MASK_RSP_UPIU_DATA_SEG_LEN = 0xFFFF, | ||
180 | MASK_RSP_EXCEPTION_EVENT = 0x10000, | 181 | MASK_RSP_EXCEPTION_EVENT = 0x10000, |
181 | }; | 182 | }; |
182 | 183 | ||
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index b36ca9a2dfbb..04884d663e4e 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c | |||
@@ -36,9 +36,11 @@ | |||
36 | #include <linux/async.h> | 36 | #include <linux/async.h> |
37 | 37 | ||
38 | #include "ufshcd.h" | 38 | #include "ufshcd.h" |
39 | #include "unipro.h" | ||
39 | 40 | ||
40 | #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ | 41 | #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ |
41 | UTP_TASK_REQ_COMPL |\ | 42 | UTP_TASK_REQ_COMPL |\ |
43 | UIC_POWER_MODE |\ | ||
42 | UFSHCD_ERROR_MASK) | 44 | UFSHCD_ERROR_MASK) |
43 | /* UIC command timeout, unit: ms */ | 45 | /* UIC command timeout, unit: ms */ |
44 | #define UIC_CMD_TIMEOUT 500 | 46 | #define UIC_CMD_TIMEOUT 500 |
@@ -56,6 +58,9 @@ | |||
56 | /* Expose the flag value from utp_upiu_query.value */ | 58 | /* Expose the flag value from utp_upiu_query.value */ |
57 | #define MASK_QUERY_UPIU_FLAG_LOC 0xFF | 59 | #define MASK_QUERY_UPIU_FLAG_LOC 0xFF |
58 | 60 | ||
61 | /* Interrupt aggregation default timeout, unit: 40us */ | ||
62 | #define INT_AGGR_DEF_TO 0x02 | ||
63 | |||
59 | enum { | 64 | enum { |
60 | UFSHCD_MAX_CHANNEL = 0, | 65 | UFSHCD_MAX_CHANNEL = 0, |
61 | UFSHCD_MAX_ID = 1, | 66 | UFSHCD_MAX_ID = 1, |
@@ -78,12 +83,6 @@ enum { | |||
78 | UFSHCD_INT_CLEAR, | 83 | UFSHCD_INT_CLEAR, |
79 | }; | 84 | }; |
80 | 85 | ||
81 | /* Interrupt aggregation options */ | ||
82 | enum { | ||
83 | INT_AGGR_RESET, | ||
84 | INT_AGGR_CONFIG, | ||
85 | }; | ||
86 | |||
87 | /* | 86 | /* |
88 | * ufshcd_wait_for_register - wait for register value to change | 87 | * ufshcd_wait_for_register - wait for register value to change |
89 | * @hba - per-adapter interface | 88 | * @hba - per-adapter interface |
@@ -238,6 +237,18 @@ static inline int ufshcd_get_uic_cmd_result(struct ufs_hba *hba) | |||
238 | } | 237 | } |
239 | 238 | ||
240 | /** | 239 | /** |
240 | * ufshcd_get_dme_attr_val - Get the value of attribute returned by UIC command | ||
241 | * @hba: Pointer to adapter instance | ||
242 | * | ||
243 | * This function gets UIC command argument3 | ||
244 | * Returns 0 on success, non zero value on error | ||
245 | */ | ||
246 | static inline u32 ufshcd_get_dme_attr_val(struct ufs_hba *hba) | ||
247 | { | ||
248 | return ufshcd_readl(hba, REG_UIC_COMMAND_ARG_3); | ||
249 | } | ||
250 | |||
251 | /** | ||
241 | * ufshcd_get_req_rsp - returns the TR response transaction type | 252 | * ufshcd_get_req_rsp - returns the TR response transaction type |
242 | * @ucd_rsp_ptr: pointer to response UPIU | 253 | * @ucd_rsp_ptr: pointer to response UPIU |
243 | */ | 254 | */ |
@@ -260,6 +271,20 @@ ufshcd_get_rsp_upiu_result(struct utp_upiu_rsp *ucd_rsp_ptr) | |||
260 | return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT; | 271 | return be32_to_cpu(ucd_rsp_ptr->header.dword_1) & MASK_RSP_UPIU_RESULT; |
261 | } | 272 | } |
262 | 273 | ||
274 | /* | ||
275 | * ufshcd_get_rsp_upiu_data_seg_len - Get the data segment length | ||
276 | * from response UPIU | ||
277 | * @ucd_rsp_ptr: pointer to response UPIU | ||
278 | * | ||
279 | * Return the data segment length. | ||
280 | */ | ||
281 | static inline unsigned int | ||
282 | ufshcd_get_rsp_upiu_data_seg_len(struct utp_upiu_rsp *ucd_rsp_ptr) | ||
283 | { | ||
284 | return be32_to_cpu(ucd_rsp_ptr->header.dword_2) & | ||
285 | MASK_RSP_UPIU_DATA_SEG_LEN; | ||
286 | } | ||
287 | |||
263 | /** | 288 | /** |
264 | * ufshcd_is_exception_event - Check if the device raised an exception event | 289 | * ufshcd_is_exception_event - Check if the device raised an exception event |
265 | * @ucd_rsp_ptr: pointer to response UPIU | 290 | * @ucd_rsp_ptr: pointer to response UPIU |
@@ -276,30 +301,30 @@ static inline bool ufshcd_is_exception_event(struct utp_upiu_rsp *ucd_rsp_ptr) | |||
276 | } | 301 | } |
277 | 302 | ||
278 | /** | 303 | /** |
279 | * ufshcd_config_int_aggr - Configure interrupt aggregation values. | 304 | * ufshcd_reset_intr_aggr - Reset interrupt aggregation values. |
280 | * Currently there is no use case where we want to configure | ||
281 | * interrupt aggregation dynamically. So to configure interrupt | ||
282 | * aggregation, #define INT_AGGR_COUNTER_THRESHOLD_VALUE and | ||
283 | * INT_AGGR_TIMEOUT_VALUE are used. | ||
284 | * @hba: per adapter instance | 305 | * @hba: per adapter instance |
285 | * @option: Interrupt aggregation option | ||
286 | */ | 306 | */ |
287 | static inline void | 307 | static inline void |
288 | ufshcd_config_int_aggr(struct ufs_hba *hba, int option) | 308 | ufshcd_reset_intr_aggr(struct ufs_hba *hba) |
289 | { | 309 | { |
290 | switch (option) { | 310 | ufshcd_writel(hba, INT_AGGR_ENABLE | |
291 | case INT_AGGR_RESET: | 311 | INT_AGGR_COUNTER_AND_TIMER_RESET, |
292 | ufshcd_writel(hba, INT_AGGR_ENABLE | | 312 | REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); |
293 | INT_AGGR_COUNTER_AND_TIMER_RESET, | 313 | } |
294 | REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); | 314 | |
295 | break; | 315 | /** |
296 | case INT_AGGR_CONFIG: | 316 | * ufshcd_config_intr_aggr - Configure interrupt aggregation values. |
297 | ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE | | 317 | * @hba: per adapter instance |
298 | INT_AGGR_COUNTER_THRESHOLD_VALUE | | 318 | * @cnt: Interrupt aggregation counter threshold |
299 | INT_AGGR_TIMEOUT_VALUE, | 319 | * @tmout: Interrupt aggregation timeout value |
300 | REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); | 320 | */ |
301 | break; | 321 | static inline void |
302 | } | 322 | ufshcd_config_intr_aggr(struct ufs_hba *hba, u8 cnt, u8 tmout) |
323 | { | ||
324 | ufshcd_writel(hba, INT_AGGR_ENABLE | INT_AGGR_PARAM_WRITE | | ||
325 | INT_AGGR_COUNTER_THLD_VAL(cnt) | | ||
326 | INT_AGGR_TIMEOUT_VAL(tmout), | ||
327 | REG_UTP_TRANSFER_REQ_INT_AGG_CONTROL); | ||
303 | } | 328 | } |
304 | 329 | ||
305 | /** | 330 | /** |
@@ -355,7 +380,8 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) | |||
355 | static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp) | 380 | static inline void ufshcd_copy_sense_data(struct ufshcd_lrb *lrbp) |
356 | { | 381 | { |
357 | int len; | 382 | int len; |
358 | if (lrbp->sense_buffer) { | 383 | if (lrbp->sense_buffer && |
384 | ufshcd_get_rsp_upiu_data_seg_len(lrbp->ucd_rsp_ptr)) { | ||
359 | len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len); | 385 | len = be16_to_cpu(lrbp->ucd_rsp_ptr->sr.sense_data_len); |
360 | memcpy(lrbp->sense_buffer, | 386 | memcpy(lrbp->sense_buffer, |
361 | lrbp->ucd_rsp_ptr->sr.sense_data, | 387 | lrbp->ucd_rsp_ptr->sr.sense_data, |
@@ -446,6 +472,18 @@ static inline bool ufshcd_ready_for_uic_cmd(struct ufs_hba *hba) | |||
446 | } | 472 | } |
447 | 473 | ||
448 | /** | 474 | /** |
475 | * ufshcd_get_upmcrs - Get the power mode change request status | ||
476 | * @hba: Pointer to adapter instance | ||
477 | * | ||
478 | * This function gets the UPMCRS field of HCS register | ||
479 | * Returns value of UPMCRS field | ||
480 | */ | ||
481 | static inline u8 ufshcd_get_upmcrs(struct ufs_hba *hba) | ||
482 | { | ||
483 | return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) >> 8) & 0x7; | ||
484 | } | ||
485 | |||
486 | /** | ||
449 | * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers | 487 | * ufshcd_dispatch_uic_cmd - Dispatch UIC commands to unipro layers |
450 | * @hba: per adapter instance | 488 | * @hba: per adapter instance |
451 | * @uic_cmd: UIC command | 489 | * @uic_cmd: UIC command |
@@ -1362,6 +1400,202 @@ static int ufshcd_dme_link_startup(struct ufs_hba *hba) | |||
1362 | } | 1400 | } |
1363 | 1401 | ||
1364 | /** | 1402 | /** |
1403 | * ufshcd_dme_set_attr - UIC command for DME_SET, DME_PEER_SET | ||
1404 | * @hba: per adapter instance | ||
1405 | * @attr_sel: uic command argument1 | ||
1406 | * @attr_set: attribute set type as uic command argument2 | ||
1407 | * @mib_val: setting value as uic command argument3 | ||
1408 | * @peer: indicate whether peer or local | ||
1409 | * | ||
1410 | * Returns 0 on success, non-zero value on failure | ||
1411 | */ | ||
1412 | int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, | ||
1413 | u8 attr_set, u32 mib_val, u8 peer) | ||
1414 | { | ||
1415 | struct uic_command uic_cmd = {0}; | ||
1416 | static const char *const action[] = { | ||
1417 | "dme-set", | ||
1418 | "dme-peer-set" | ||
1419 | }; | ||
1420 | const char *set = action[!!peer]; | ||
1421 | int ret; | ||
1422 | |||
1423 | uic_cmd.command = peer ? | ||
1424 | UIC_CMD_DME_PEER_SET : UIC_CMD_DME_SET; | ||
1425 | uic_cmd.argument1 = attr_sel; | ||
1426 | uic_cmd.argument2 = UIC_ARG_ATTR_TYPE(attr_set); | ||
1427 | uic_cmd.argument3 = mib_val; | ||
1428 | |||
1429 | ret = ufshcd_send_uic_cmd(hba, &uic_cmd); | ||
1430 | if (ret) | ||
1431 | dev_err(hba->dev, "%s: attr-id 0x%x val 0x%x error code %d\n", | ||
1432 | set, UIC_GET_ATTR_ID(attr_sel), mib_val, ret); | ||
1433 | |||
1434 | return ret; | ||
1435 | } | ||
1436 | EXPORT_SYMBOL_GPL(ufshcd_dme_set_attr); | ||
1437 | |||
1438 | /** | ||
1439 | * ufshcd_dme_get_attr - UIC command for DME_GET, DME_PEER_GET | ||
1440 | * @hba: per adapter instance | ||
1441 | * @attr_sel: uic command argument1 | ||
1442 | * @mib_val: the value of the attribute as returned by the UIC command | ||
1443 | * @peer: indicate whether peer or local | ||
1444 | * | ||
1445 | * Returns 0 on success, non-zero value on failure | ||
1446 | */ | ||
1447 | int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, | ||
1448 | u32 *mib_val, u8 peer) | ||
1449 | { | ||
1450 | struct uic_command uic_cmd = {0}; | ||
1451 | static const char *const action[] = { | ||
1452 | "dme-get", | ||
1453 | "dme-peer-get" | ||
1454 | }; | ||
1455 | const char *get = action[!!peer]; | ||
1456 | int ret; | ||
1457 | |||
1458 | uic_cmd.command = peer ? | ||
1459 | UIC_CMD_DME_PEER_GET : UIC_CMD_DME_GET; | ||
1460 | uic_cmd.argument1 = attr_sel; | ||
1461 | |||
1462 | ret = ufshcd_send_uic_cmd(hba, &uic_cmd); | ||
1463 | if (ret) { | ||
1464 | dev_err(hba->dev, "%s: attr-id 0x%x error code %d\n", | ||
1465 | get, UIC_GET_ATTR_ID(attr_sel), ret); | ||
1466 | goto out; | ||
1467 | } | ||
1468 | |||
1469 | if (mib_val) | ||
1470 | *mib_val = uic_cmd.argument3; | ||
1471 | out: | ||
1472 | return ret; | ||
1473 | } | ||
1474 | EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr); | ||
1475 | |||
1476 | /** | ||
1477 | * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage | ||
1478 | * using DME_SET primitives. | ||
1479 | * @hba: per adapter instance | ||
1480 | * @mode: powr mode value | ||
1481 | * | ||
1482 | * Returns 0 on success, non-zero value on failure | ||
1483 | */ | ||
1484 | int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode) | ||
1485 | { | ||
1486 | struct uic_command uic_cmd = {0}; | ||
1487 | struct completion pwr_done; | ||
1488 | unsigned long flags; | ||
1489 | u8 status; | ||
1490 | int ret; | ||
1491 | |||
1492 | uic_cmd.command = UIC_CMD_DME_SET; | ||
1493 | uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE); | ||
1494 | uic_cmd.argument3 = mode; | ||
1495 | init_completion(&pwr_done); | ||
1496 | |||
1497 | mutex_lock(&hba->uic_cmd_mutex); | ||
1498 | |||
1499 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
1500 | hba->pwr_done = &pwr_done; | ||
1501 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
1502 | ret = __ufshcd_send_uic_cmd(hba, &uic_cmd); | ||
1503 | if (ret) { | ||
1504 | dev_err(hba->dev, | ||
1505 | "pwr mode change with mode 0x%x uic error %d\n", | ||
1506 | mode, ret); | ||
1507 | goto out; | ||
1508 | } | ||
1509 | |||
1510 | if (!wait_for_completion_timeout(hba->pwr_done, | ||
1511 | msecs_to_jiffies(UIC_CMD_TIMEOUT))) { | ||
1512 | dev_err(hba->dev, | ||
1513 | "pwr mode change with mode 0x%x completion timeout\n", | ||
1514 | mode); | ||
1515 | ret = -ETIMEDOUT; | ||
1516 | goto out; | ||
1517 | } | ||
1518 | |||
1519 | status = ufshcd_get_upmcrs(hba); | ||
1520 | if (status != PWR_LOCAL) { | ||
1521 | dev_err(hba->dev, | ||
1522 | "pwr mode change failed, host umpcrs:0x%x\n", | ||
1523 | status); | ||
1524 | ret = (status != PWR_OK) ? status : -1; | ||
1525 | } | ||
1526 | out: | ||
1527 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
1528 | hba->pwr_done = NULL; | ||
1529 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
1530 | mutex_unlock(&hba->uic_cmd_mutex); | ||
1531 | return ret; | ||
1532 | } | ||
1533 | |||
1534 | /** | ||
1535 | * ufshcd_config_max_pwr_mode - Set & Change power mode with | ||
1536 | * maximum capability attribute information. | ||
1537 | * @hba: per adapter instance | ||
1538 | * | ||
1539 | * Returns 0 on success, non-zero value on failure | ||
1540 | */ | ||
1541 | static int ufshcd_config_max_pwr_mode(struct ufs_hba *hba) | ||
1542 | { | ||
1543 | enum {RX = 0, TX = 1}; | ||
1544 | u32 lanes[] = {1, 1}; | ||
1545 | u32 gear[] = {1, 1}; | ||
1546 | u8 pwr[] = {FASTAUTO_MODE, FASTAUTO_MODE}; | ||
1547 | int ret; | ||
1548 | |||
1549 | /* Get the connected lane count */ | ||
1550 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), &lanes[RX]); | ||
1551 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), &lanes[TX]); | ||
1552 | |||
1553 | /* | ||
1554 | * First, get the maximum gears of HS speed. | ||
1555 | * If a zero value, it means there is no HSGEAR capability. | ||
1556 | * Then, get the maximum gears of PWM speed. | ||
1557 | */ | ||
1558 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[RX]); | ||
1559 | if (!gear[RX]) { | ||
1560 | ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), &gear[RX]); | ||
1561 | pwr[RX] = SLOWAUTO_MODE; | ||
1562 | } | ||
1563 | |||
1564 | ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[TX]); | ||
1565 | if (!gear[TX]) { | ||
1566 | ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), | ||
1567 | &gear[TX]); | ||
1568 | pwr[TX] = SLOWAUTO_MODE; | ||
1569 | } | ||
1570 | |||
1571 | /* | ||
1572 | * Configure attributes for power mode change with below. | ||
1573 | * - PA_RXGEAR, PA_ACTIVERXDATALANES, PA_RXTERMINATION, | ||
1574 | * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION, | ||
1575 | * - PA_HSSERIES | ||
1576 | */ | ||
1577 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), gear[RX]); | ||
1578 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), lanes[RX]); | ||
1579 | if (pwr[RX] == FASTAUTO_MODE) | ||
1580 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE); | ||
1581 | |||
1582 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), gear[TX]); | ||
1583 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), lanes[TX]); | ||
1584 | if (pwr[TX] == FASTAUTO_MODE) | ||
1585 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE); | ||
1586 | |||
1587 | if (pwr[RX] == FASTAUTO_MODE || pwr[TX] == FASTAUTO_MODE) | ||
1588 | ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), PA_HS_MODE_B); | ||
1589 | |||
1590 | ret = ufshcd_uic_change_pwr_mode(hba, pwr[RX] << 4 | pwr[TX]); | ||
1591 | if (ret) | ||
1592 | dev_err(hba->dev, | ||
1593 | "pwr_mode: power mode change failed %d\n", ret); | ||
1594 | |||
1595 | return ret; | ||
1596 | } | ||
1597 | |||
1598 | /** | ||
1365 | * ufshcd_complete_dev_init() - checks device readiness | 1599 | * ufshcd_complete_dev_init() - checks device readiness |
1366 | * hba: per-adapter instance | 1600 | * hba: per-adapter instance |
1367 | * | 1601 | * |
@@ -1442,7 +1676,7 @@ static int ufshcd_make_hba_operational(struct ufs_hba *hba) | |||
1442 | ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); | 1676 | ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS); |
1443 | 1677 | ||
1444 | /* Configure interrupt aggregation */ | 1678 | /* Configure interrupt aggregation */ |
1445 | ufshcd_config_int_aggr(hba, INT_AGGR_CONFIG); | 1679 | ufshcd_config_intr_aggr(hba, hba->nutrs - 1, INT_AGGR_DEF_TO); |
1446 | 1680 | ||
1447 | /* Configure UTRL and UTMRL base address registers */ | 1681 | /* Configure UTRL and UTMRL base address registers */ |
1448 | ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), | 1682 | ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), |
@@ -1788,32 +2022,24 @@ ufshcd_scsi_cmd_status(struct ufshcd_lrb *lrbp, int scsi_status) | |||
1788 | int result = 0; | 2022 | int result = 0; |
1789 | 2023 | ||
1790 | switch (scsi_status) { | 2024 | switch (scsi_status) { |
1791 | case SAM_STAT_GOOD: | ||
1792 | result |= DID_OK << 16 | | ||
1793 | COMMAND_COMPLETE << 8 | | ||
1794 | SAM_STAT_GOOD; | ||
1795 | break; | ||
1796 | case SAM_STAT_CHECK_CONDITION: | 2025 | case SAM_STAT_CHECK_CONDITION: |
2026 | ufshcd_copy_sense_data(lrbp); | ||
2027 | case SAM_STAT_GOOD: | ||
1797 | result |= DID_OK << 16 | | 2028 | result |= DID_OK << 16 | |
1798 | COMMAND_COMPLETE << 8 | | 2029 | COMMAND_COMPLETE << 8 | |
1799 | SAM_STAT_CHECK_CONDITION; | 2030 | scsi_status; |
1800 | ufshcd_copy_sense_data(lrbp); | ||
1801 | break; | ||
1802 | case SAM_STAT_BUSY: | ||
1803 | result |= SAM_STAT_BUSY; | ||
1804 | break; | 2031 | break; |
1805 | case SAM_STAT_TASK_SET_FULL: | 2032 | case SAM_STAT_TASK_SET_FULL: |
1806 | |||
1807 | /* | 2033 | /* |
1808 | * If a LUN reports SAM_STAT_TASK_SET_FULL, then the LUN queue | 2034 | * If a LUN reports SAM_STAT_TASK_SET_FULL, then the LUN queue |
1809 | * depth needs to be adjusted to the exact number of | 2035 | * depth needs to be adjusted to the exact number of |
1810 | * outstanding commands the LUN can handle at any given time. | 2036 | * outstanding commands the LUN can handle at any given time. |
1811 | */ | 2037 | */ |
1812 | ufshcd_adjust_lun_qdepth(lrbp->cmd); | 2038 | ufshcd_adjust_lun_qdepth(lrbp->cmd); |
1813 | result |= SAM_STAT_TASK_SET_FULL; | 2039 | case SAM_STAT_BUSY: |
1814 | break; | ||
1815 | case SAM_STAT_TASK_ABORTED: | 2040 | case SAM_STAT_TASK_ABORTED: |
1816 | result |= SAM_STAT_TASK_ABORTED; | 2041 | ufshcd_copy_sense_data(lrbp); |
2042 | result |= scsi_status; | ||
1817 | break; | 2043 | break; |
1818 | default: | 2044 | default: |
1819 | result |= DID_ERROR << 16; | 2045 | result |= DID_ERROR << 16; |
@@ -1898,14 +2124,20 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) | |||
1898 | /** | 2124 | /** |
1899 | * ufshcd_uic_cmd_compl - handle completion of uic command | 2125 | * ufshcd_uic_cmd_compl - handle completion of uic command |
1900 | * @hba: per adapter instance | 2126 | * @hba: per adapter instance |
2127 | * @intr_status: interrupt status generated by the controller | ||
1901 | */ | 2128 | */ |
1902 | static void ufshcd_uic_cmd_compl(struct ufs_hba *hba) | 2129 | static void ufshcd_uic_cmd_compl(struct ufs_hba *hba, u32 intr_status) |
1903 | { | 2130 | { |
1904 | if (hba->active_uic_cmd) { | 2131 | if ((intr_status & UIC_COMMAND_COMPL) && hba->active_uic_cmd) { |
1905 | hba->active_uic_cmd->argument2 |= | 2132 | hba->active_uic_cmd->argument2 |= |
1906 | ufshcd_get_uic_cmd_result(hba); | 2133 | ufshcd_get_uic_cmd_result(hba); |
2134 | hba->active_uic_cmd->argument3 = | ||
2135 | ufshcd_get_dme_attr_val(hba); | ||
1907 | complete(&hba->active_uic_cmd->done); | 2136 | complete(&hba->active_uic_cmd->done); |
1908 | } | 2137 | } |
2138 | |||
2139 | if ((intr_status & UIC_POWER_MODE) && hba->pwr_done) | ||
2140 | complete(hba->pwr_done); | ||
1909 | } | 2141 | } |
1910 | 2142 | ||
1911 | /** | 2143 | /** |
@@ -1960,7 +2192,7 @@ static void ufshcd_transfer_req_compl(struct ufs_hba *hba) | |||
1960 | 2192 | ||
1961 | /* Reset interrupt aggregation counters */ | 2193 | /* Reset interrupt aggregation counters */ |
1962 | if (int_aggr_reset) | 2194 | if (int_aggr_reset) |
1963 | ufshcd_config_int_aggr(hba, INT_AGGR_RESET); | 2195 | ufshcd_reset_intr_aggr(hba); |
1964 | } | 2196 | } |
1965 | 2197 | ||
1966 | /** | 2198 | /** |
@@ -2251,8 +2483,8 @@ static void ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) | |||
2251 | if (hba->errors) | 2483 | if (hba->errors) |
2252 | ufshcd_err_handler(hba); | 2484 | ufshcd_err_handler(hba); |
2253 | 2485 | ||
2254 | if (intr_status & UIC_COMMAND_COMPL) | 2486 | if (intr_status & UFSHCD_UIC_MASK) |
2255 | ufshcd_uic_cmd_compl(hba); | 2487 | ufshcd_uic_cmd_compl(hba, intr_status); |
2256 | 2488 | ||
2257 | if (intr_status & UTP_TASK_REQ_COMPL) | 2489 | if (intr_status & UTP_TASK_REQ_COMPL) |
2258 | ufshcd_tmc_handler(hba); | 2490 | ufshcd_tmc_handler(hba); |
@@ -2494,6 +2726,8 @@ static void ufshcd_async_scan(void *data, async_cookie_t cookie) | |||
2494 | if (ret) | 2726 | if (ret) |
2495 | goto out; | 2727 | goto out; |
2496 | 2728 | ||
2729 | ufshcd_config_max_pwr_mode(hba); | ||
2730 | |||
2497 | ret = ufshcd_verify_dev_init(hba); | 2731 | ret = ufshcd_verify_dev_init(hba); |
2498 | if (ret) | 2732 | if (ret) |
2499 | goto out; | 2733 | goto out; |
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index 59c9c4848be1..577679a2d189 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h | |||
@@ -175,6 +175,7 @@ struct ufs_dev_cmd { | |||
175 | * @active_uic_cmd: handle of active UIC command | 175 | * @active_uic_cmd: handle of active UIC command |
176 | * @uic_cmd_mutex: mutex for uic command | 176 | * @uic_cmd_mutex: mutex for uic command |
177 | * @ufshcd_tm_wait_queue: wait queue for task management | 177 | * @ufshcd_tm_wait_queue: wait queue for task management |
178 | * @pwr_done: completion for power mode change | ||
178 | * @tm_condition: condition variable for task management | 179 | * @tm_condition: condition variable for task management |
179 | * @ufshcd_state: UFSHCD states | 180 | * @ufshcd_state: UFSHCD states |
180 | * @intr_mask: Interrupt Mask Bits | 181 | * @intr_mask: Interrupt Mask Bits |
@@ -219,6 +220,8 @@ struct ufs_hba { | |||
219 | wait_queue_head_t ufshcd_tm_wait_queue; | 220 | wait_queue_head_t ufshcd_tm_wait_queue; |
220 | unsigned long tm_condition; | 221 | unsigned long tm_condition; |
221 | 222 | ||
223 | struct completion *pwr_done; | ||
224 | |||
222 | u32 ufshcd_state; | 225 | u32 ufshcd_state; |
223 | u32 intr_mask; | 226 | u32 intr_mask; |
224 | u16 ee_ctrl_mask; | 227 | u16 ee_ctrl_mask; |
@@ -263,4 +266,55 @@ static inline void check_upiu_size(void) | |||
263 | extern int ufshcd_runtime_suspend(struct ufs_hba *hba); | 266 | extern int ufshcd_runtime_suspend(struct ufs_hba *hba); |
264 | extern int ufshcd_runtime_resume(struct ufs_hba *hba); | 267 | extern int ufshcd_runtime_resume(struct ufs_hba *hba); |
265 | extern int ufshcd_runtime_idle(struct ufs_hba *hba); | 268 | extern int ufshcd_runtime_idle(struct ufs_hba *hba); |
269 | extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel, | ||
270 | u8 attr_set, u32 mib_val, u8 peer); | ||
271 | extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel, | ||
272 | u32 *mib_val, u8 peer); | ||
273 | |||
274 | /* UIC command interfaces for DME primitives */ | ||
275 | #define DME_LOCAL 0 | ||
276 | #define DME_PEER 1 | ||
277 | #define ATTR_SET_NOR 0 /* NORMAL */ | ||
278 | #define ATTR_SET_ST 1 /* STATIC */ | ||
279 | |||
280 | static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel, | ||
281 | u32 mib_val) | ||
282 | { | ||
283 | return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR, | ||
284 | mib_val, DME_LOCAL); | ||
285 | } | ||
286 | |||
287 | static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel, | ||
288 | u32 mib_val) | ||
289 | { | ||
290 | return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST, | ||
291 | mib_val, DME_LOCAL); | ||
292 | } | ||
293 | |||
294 | static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel, | ||
295 | u32 mib_val) | ||
296 | { | ||
297 | return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR, | ||
298 | mib_val, DME_PEER); | ||
299 | } | ||
300 | |||
301 | static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel, | ||
302 | u32 mib_val) | ||
303 | { | ||
304 | return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST, | ||
305 | mib_val, DME_PEER); | ||
306 | } | ||
307 | |||
308 | static inline int ufshcd_dme_get(struct ufs_hba *hba, | ||
309 | u32 attr_sel, u32 *mib_val) | ||
310 | { | ||
311 | return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL); | ||
312 | } | ||
313 | |||
314 | static inline int ufshcd_dme_peer_get(struct ufs_hba *hba, | ||
315 | u32 attr_sel, u32 *mib_val) | ||
316 | { | ||
317 | return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER); | ||
318 | } | ||
319 | |||
266 | #endif /* End of Header */ | 320 | #endif /* End of Header */ |
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h index f1e1b7459107..0475c6619a68 100644 --- a/drivers/scsi/ufs/ufshci.h +++ b/drivers/scsi/ufs/ufshci.h | |||
@@ -124,6 +124,9 @@ enum { | |||
124 | #define CONTROLLER_FATAL_ERROR UFS_BIT(16) | 124 | #define CONTROLLER_FATAL_ERROR UFS_BIT(16) |
125 | #define SYSTEM_BUS_FATAL_ERROR UFS_BIT(17) | 125 | #define SYSTEM_BUS_FATAL_ERROR UFS_BIT(17) |
126 | 126 | ||
127 | #define UFSHCD_UIC_MASK (UIC_COMMAND_COMPL |\ | ||
128 | UIC_POWER_MODE) | ||
129 | |||
127 | #define UFSHCD_ERROR_MASK (UIC_ERROR |\ | 130 | #define UFSHCD_ERROR_MASK (UIC_ERROR |\ |
128 | DEVICE_FATAL_ERROR |\ | 131 | DEVICE_FATAL_ERROR |\ |
129 | CONTROLLER_FATAL_ERROR |\ | 132 | CONTROLLER_FATAL_ERROR |\ |
@@ -142,6 +145,15 @@ enum { | |||
142 | #define DEVICE_ERROR_INDICATOR UFS_BIT(5) | 145 | #define DEVICE_ERROR_INDICATOR UFS_BIT(5) |
143 | #define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8) | 146 | #define UIC_POWER_MODE_CHANGE_REQ_STATUS_MASK UFS_MASK(0x7, 8) |
144 | 147 | ||
148 | enum { | ||
149 | PWR_OK = 0x0, | ||
150 | PWR_LOCAL = 0x01, | ||
151 | PWR_REMOTE = 0x02, | ||
152 | PWR_BUSY = 0x03, | ||
153 | PWR_ERROR_CAP = 0x04, | ||
154 | PWR_FATAL_ERROR = 0x05, | ||
155 | }; | ||
156 | |||
145 | /* HCE - Host Controller Enable 34h */ | 157 | /* HCE - Host Controller Enable 34h */ |
146 | #define CONTROLLER_ENABLE UFS_BIT(0) | 158 | #define CONTROLLER_ENABLE UFS_BIT(0) |
147 | #define CONTROLLER_DISABLE 0x0 | 159 | #define CONTROLLER_DISABLE 0x0 |
@@ -191,6 +203,12 @@ enum { | |||
191 | #define CONFIG_RESULT_CODE_MASK 0xFF | 203 | #define CONFIG_RESULT_CODE_MASK 0xFF |
192 | #define GENERIC_ERROR_CODE_MASK 0xFF | 204 | #define GENERIC_ERROR_CODE_MASK 0xFF |
193 | 205 | ||
206 | #define UIC_ARG_MIB_SEL(attr, sel) ((((attr) & 0xFFFF) << 16) |\ | ||
207 | ((sel) & 0xFFFF)) | ||
208 | #define UIC_ARG_MIB(attr) UIC_ARG_MIB_SEL(attr, 0) | ||
209 | #define UIC_ARG_ATTR_TYPE(t) (((t) & 0xFF) << 16) | ||
210 | #define UIC_GET_ATTR_ID(v) (((v) >> 16) & 0xFFFF) | ||
211 | |||
194 | /* UIC Commands */ | 212 | /* UIC Commands */ |
195 | enum { | 213 | enum { |
196 | UIC_CMD_DME_GET = 0x01, | 214 | UIC_CMD_DME_GET = 0x01, |
@@ -226,8 +244,8 @@ enum { | |||
226 | 244 | ||
227 | #define MASK_UIC_COMMAND_RESULT 0xFF | 245 | #define MASK_UIC_COMMAND_RESULT 0xFF |
228 | 246 | ||
229 | #define INT_AGGR_COUNTER_THRESHOLD_VALUE (0x1F << 8) | 247 | #define INT_AGGR_COUNTER_THLD_VAL(c) (((c) & 0x1F) << 8) |
230 | #define INT_AGGR_TIMEOUT_VALUE (0x02) | 248 | #define INT_AGGR_TIMEOUT_VAL(t) (((t) & 0xFF) << 0) |
231 | 249 | ||
232 | /* Interrupt disable masks */ | 250 | /* Interrupt disable masks */ |
233 | enum { | 251 | enum { |
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h new file mode 100644 index 000000000000..0bb8041c047a --- /dev/null +++ b/drivers/scsi/ufs/unipro.h | |||
@@ -0,0 +1,151 @@ | |||
1 | /* | ||
2 | * drivers/scsi/ufs/unipro.h | ||
3 | * | ||
4 | * Copyright (C) 2013 Samsung Electronics Co., Ltd. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef _UNIPRO_H_ | ||
13 | #define _UNIPRO_H_ | ||
14 | |||
15 | /* | ||
16 | * PHY Adpater attributes | ||
17 | */ | ||
18 | #define PA_ACTIVETXDATALANES 0x1560 | ||
19 | #define PA_ACTIVERXDATALANES 0x1580 | ||
20 | #define PA_TXTRAILINGCLOCKS 0x1564 | ||
21 | #define PA_PHY_TYPE 0x1500 | ||
22 | #define PA_AVAILTXDATALANES 0x1520 | ||
23 | #define PA_AVAILRXDATALANES 0x1540 | ||
24 | #define PA_MINRXTRAILINGCLOCKS 0x1543 | ||
25 | #define PA_TXPWRSTATUS 0x1567 | ||
26 | #define PA_RXPWRSTATUS 0x1582 | ||
27 | #define PA_TXFORCECLOCK 0x1562 | ||
28 | #define PA_TXPWRMODE 0x1563 | ||
29 | #define PA_LEGACYDPHYESCDL 0x1570 | ||
30 | #define PA_MAXTXSPEEDFAST 0x1521 | ||
31 | #define PA_MAXTXSPEEDSLOW 0x1522 | ||
32 | #define PA_MAXRXSPEEDFAST 0x1541 | ||
33 | #define PA_MAXRXSPEEDSLOW 0x1542 | ||
34 | #define PA_TXLINKSTARTUPHS 0x1544 | ||
35 | #define PA_TXSPEEDFAST 0x1565 | ||
36 | #define PA_TXSPEEDSLOW 0x1566 | ||
37 | #define PA_REMOTEVERINFO 0x15A0 | ||
38 | #define PA_TXGEAR 0x1568 | ||
39 | #define PA_TXTERMINATION 0x1569 | ||
40 | #define PA_HSSERIES 0x156A | ||
41 | #define PA_PWRMODE 0x1571 | ||
42 | #define PA_RXGEAR 0x1583 | ||
43 | #define PA_RXTERMINATION 0x1584 | ||
44 | #define PA_MAXRXPWMGEAR 0x1586 | ||
45 | #define PA_MAXRXHSGEAR 0x1587 | ||
46 | #define PA_RXHSUNTERMCAP 0x15A5 | ||
47 | #define PA_RXLSTERMCAP 0x15A6 | ||
48 | #define PA_PACPREQTIMEOUT 0x1590 | ||
49 | #define PA_PACPREQEOBTIMEOUT 0x1591 | ||
50 | #define PA_HIBERN8TIME 0x15A7 | ||
51 | #define PA_LOCALVERINFO 0x15A9 | ||
52 | #define PA_TACTIVATE 0x15A8 | ||
53 | #define PA_PACPFRAMECOUNT 0x15C0 | ||
54 | #define PA_PACPERRORCOUNT 0x15C1 | ||
55 | #define PA_PHYTESTCONTROL 0x15C2 | ||
56 | #define PA_PWRMODEUSERDATA0 0x15B0 | ||
57 | #define PA_PWRMODEUSERDATA1 0x15B1 | ||
58 | #define PA_PWRMODEUSERDATA2 0x15B2 | ||
59 | #define PA_PWRMODEUSERDATA3 0x15B3 | ||
60 | #define PA_PWRMODEUSERDATA4 0x15B4 | ||
61 | #define PA_PWRMODEUSERDATA5 0x15B5 | ||
62 | #define PA_PWRMODEUSERDATA6 0x15B6 | ||
63 | #define PA_PWRMODEUSERDATA7 0x15B7 | ||
64 | #define PA_PWRMODEUSERDATA8 0x15B8 | ||
65 | #define PA_PWRMODEUSERDATA9 0x15B9 | ||
66 | #define PA_PWRMODEUSERDATA10 0x15BA | ||
67 | #define PA_PWRMODEUSERDATA11 0x15BB | ||
68 | #define PA_CONNECTEDTXDATALANES 0x1561 | ||
69 | #define PA_CONNECTEDRXDATALANES 0x1581 | ||
70 | #define PA_LOGICALLANEMAP 0x15A1 | ||
71 | #define PA_SLEEPNOCONFIGTIME 0x15A2 | ||
72 | #define PA_STALLNOCONFIGTIME 0x15A3 | ||
73 | #define PA_SAVECONFIGTIME 0x15A4 | ||
74 | |||
75 | /* PA power modes */ | ||
76 | enum { | ||
77 | FAST_MODE = 1, | ||
78 | SLOW_MODE = 2, | ||
79 | FASTAUTO_MODE = 4, | ||
80 | SLOWAUTO_MODE = 5, | ||
81 | UNCHANGED = 7, | ||
82 | }; | ||
83 | |||
84 | /* PA TX/RX Frequency Series */ | ||
85 | enum { | ||
86 | PA_HS_MODE_A = 1, | ||
87 | PA_HS_MODE_B = 2, | ||
88 | }; | ||
89 | |||
90 | /* | ||
91 | * Data Link Layer Attributes | ||
92 | */ | ||
93 | #define DL_TC0TXFCTHRESHOLD 0x2040 | ||
94 | #define DL_FC0PROTTIMEOUTVAL 0x2041 | ||
95 | #define DL_TC0REPLAYTIMEOUTVAL 0x2042 | ||
96 | #define DL_AFC0REQTIMEOUTVAL 0x2043 | ||
97 | #define DL_AFC0CREDITTHRESHOLD 0x2044 | ||
98 | #define DL_TC0OUTACKTHRESHOLD 0x2045 | ||
99 | #define DL_TC1TXFCTHRESHOLD 0x2060 | ||
100 | #define DL_FC1PROTTIMEOUTVAL 0x2061 | ||
101 | #define DL_TC1REPLAYTIMEOUTVAL 0x2062 | ||
102 | #define DL_AFC1REQTIMEOUTVAL 0x2063 | ||
103 | #define DL_AFC1CREDITTHRESHOLD 0x2064 | ||
104 | #define DL_TC1OUTACKTHRESHOLD 0x2065 | ||
105 | #define DL_TXPREEMPTIONCAP 0x2000 | ||
106 | #define DL_TC0TXMAXSDUSIZE 0x2001 | ||
107 | #define DL_TC0RXINITCREDITVAL 0x2002 | ||
108 | #define DL_TC0TXBUFFERSIZE 0x2005 | ||
109 | #define DL_PEERTC0PRESENT 0x2046 | ||
110 | #define DL_PEERTC0RXINITCREVAL 0x2047 | ||
111 | #define DL_TC1TXMAXSDUSIZE 0x2003 | ||
112 | #define DL_TC1RXINITCREDITVAL 0x2004 | ||
113 | #define DL_TC1TXBUFFERSIZE 0x2006 | ||
114 | #define DL_PEERTC1PRESENT 0x2066 | ||
115 | #define DL_PEERTC1RXINITCREVAL 0x2067 | ||
116 | |||
117 | /* | ||
118 | * Network Layer Attributes | ||
119 | */ | ||
120 | #define N_DEVICEID 0x3000 | ||
121 | #define N_DEVICEID_VALID 0x3001 | ||
122 | #define N_TC0TXMAXSDUSIZE 0x3020 | ||
123 | #define N_TC1TXMAXSDUSIZE 0x3021 | ||
124 | |||
125 | /* | ||
126 | * Transport Layer Attributes | ||
127 | */ | ||
128 | #define T_NUMCPORTS 0x4000 | ||
129 | #define T_NUMTESTFEATURES 0x4001 | ||
130 | #define T_CONNECTIONSTATE 0x4020 | ||
131 | #define T_PEERDEVICEID 0x4021 | ||
132 | #define T_PEERCPORTID 0x4022 | ||
133 | #define T_TRAFFICCLASS 0x4023 | ||
134 | #define T_PROTOCOLID 0x4024 | ||
135 | #define T_CPORTFLAGS 0x4025 | ||
136 | #define T_TXTOKENVALUE 0x4026 | ||
137 | #define T_RXTOKENVALUE 0x4027 | ||
138 | #define T_LOCALBUFFERSPACE 0x4028 | ||
139 | #define T_PEERBUFFERSPACE 0x4029 | ||
140 | #define T_CREDITSTOSEND 0x402A | ||
141 | #define T_CPORTMODE 0x402B | ||
142 | #define T_TC0TXMAXSDUSIZE 0x4060 | ||
143 | #define T_TC1TXMAXSDUSIZE 0x4061 | ||
144 | |||
145 | /* Boolean attribute values */ | ||
146 | enum { | ||
147 | FALSE = 0, | ||
148 | TRUE, | ||
149 | }; | ||
150 | |||
151 | #endif /* _UNIPRO_H_ */ | ||
diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c index a8c344422a77..d42f5785f098 100644 --- a/drivers/staging/android/logger.c +++ b/drivers/staging/android/logger.c | |||
@@ -481,7 +481,7 @@ static ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov, | |||
481 | header.sec = now.tv_sec; | 481 | header.sec = now.tv_sec; |
482 | header.nsec = now.tv_nsec; | 482 | header.nsec = now.tv_nsec; |
483 | header.euid = current_euid(); | 483 | header.euid = current_euid(); |
484 | header.len = min_t(size_t, iocb->ki_left, LOGGER_ENTRY_MAX_PAYLOAD); | 484 | header.len = min_t(size_t, iocb->ki_nbytes, LOGGER_ENTRY_MAX_PAYLOAD); |
485 | header.hdr_size = sizeof(struct logger_entry); | 485 | header.hdr_size = sizeof(struct logger_entry); |
486 | 486 | ||
487 | /* null writes succeed, return zero */ | 487 | /* null writes succeed, return zero */ |
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c index 253f02688f4f..bc534db12431 100644 --- a/drivers/staging/lustre/lustre/llite/file.c +++ b/drivers/staging/lustre/lustre/llite/file.c | |||
@@ -1009,7 +1009,7 @@ static ssize_t ll_file_read(struct file *file, char *buf, size_t count, | |||
1009 | local_iov->iov_len = count; | 1009 | local_iov->iov_len = count; |
1010 | init_sync_kiocb(kiocb, file); | 1010 | init_sync_kiocb(kiocb, file); |
1011 | kiocb->ki_pos = *ppos; | 1011 | kiocb->ki_pos = *ppos; |
1012 | kiocb->ki_left = count; | 1012 | kiocb->ki_nbytes = count; |
1013 | 1013 | ||
1014 | result = ll_file_aio_read(kiocb, local_iov, 1, kiocb->ki_pos); | 1014 | result = ll_file_aio_read(kiocb, local_iov, 1, kiocb->ki_pos); |
1015 | *ppos = kiocb->ki_pos; | 1015 | *ppos = kiocb->ki_pos; |
@@ -1068,7 +1068,7 @@ static ssize_t ll_file_write(struct file *file, const char *buf, size_t count, | |||
1068 | local_iov->iov_len = count; | 1068 | local_iov->iov_len = count; |
1069 | init_sync_kiocb(kiocb, file); | 1069 | init_sync_kiocb(kiocb, file); |
1070 | kiocb->ki_pos = *ppos; | 1070 | kiocb->ki_pos = *ppos; |
1071 | kiocb->ki_left = count; | 1071 | kiocb->ki_nbytes = count; |
1072 | 1072 | ||
1073 | result = ll_file_aio_write(kiocb, local_iov, 1, kiocb->ki_pos); | 1073 | result = ll_file_aio_write(kiocb, local_iov, 1, kiocb->ki_pos); |
1074 | *ppos = kiocb->ki_pos; | 1074 | *ppos = kiocb->ki_pos; |
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c index 465ef8e2cc91..b94c049ab0d0 100644 --- a/drivers/usb/gadget/inode.c +++ b/drivers/usb/gadget/inode.c | |||
@@ -524,7 +524,7 @@ struct kiocb_priv { | |||
524 | unsigned actual; | 524 | unsigned actual; |
525 | }; | 525 | }; |
526 | 526 | ||
527 | static int ep_aio_cancel(struct kiocb *iocb, struct io_event *e) | 527 | static int ep_aio_cancel(struct kiocb *iocb) |
528 | { | 528 | { |
529 | struct kiocb_priv *priv = iocb->private; | 529 | struct kiocb_priv *priv = iocb->private; |
530 | struct ep_data *epdata; | 530 | struct ep_data *epdata; |
@@ -540,7 +540,6 @@ static int ep_aio_cancel(struct kiocb *iocb, struct io_event *e) | |||
540 | // spin_unlock(&epdata->dev->lock); | 540 | // spin_unlock(&epdata->dev->lock); |
541 | local_irq_enable(); | 541 | local_irq_enable(); |
542 | 542 | ||
543 | aio_put_req(iocb); | ||
544 | return value; | 543 | return value; |
545 | } | 544 | } |
546 | 545 | ||
@@ -709,11 +708,11 @@ ep_aio_read(struct kiocb *iocb, const struct iovec *iov, | |||
709 | if (unlikely(usb_endpoint_dir_in(&epdata->desc))) | 708 | if (unlikely(usb_endpoint_dir_in(&epdata->desc))) |
710 | return -EINVAL; | 709 | return -EINVAL; |
711 | 710 | ||
712 | buf = kmalloc(iocb->ki_left, GFP_KERNEL); | 711 | buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL); |
713 | if (unlikely(!buf)) | 712 | if (unlikely(!buf)) |
714 | return -ENOMEM; | 713 | return -ENOMEM; |
715 | 714 | ||
716 | return ep_aio_rwtail(iocb, buf, iocb->ki_left, epdata, iov, nr_segs); | 715 | return ep_aio_rwtail(iocb, buf, iocb->ki_nbytes, epdata, iov, nr_segs); |
717 | } | 716 | } |
718 | 717 | ||
719 | static ssize_t | 718 | static ssize_t |
@@ -728,7 +727,7 @@ ep_aio_write(struct kiocb *iocb, const struct iovec *iov, | |||
728 | if (unlikely(!usb_endpoint_dir_in(&epdata->desc))) | 727 | if (unlikely(!usb_endpoint_dir_in(&epdata->desc))) |
729 | return -EINVAL; | 728 | return -EINVAL; |
730 | 729 | ||
731 | buf = kmalloc(iocb->ki_left, GFP_KERNEL); | 730 | buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL); |
732 | if (unlikely(!buf)) | 731 | if (unlikely(!buf)) |
733 | return -ENOMEM; | 732 | return -ENOMEM; |
734 | 733 | ||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/mm.h> | 26 | #include <linux/mm.h> |
27 | #include <linux/mman.h> | 27 | #include <linux/mman.h> |
28 | #include <linux/mmu_context.h> | 28 | #include <linux/mmu_context.h> |
29 | #include <linux/percpu.h> | ||
29 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
30 | #include <linux/timer.h> | 31 | #include <linux/timer.h> |
31 | #include <linux/aio.h> | 32 | #include <linux/aio.h> |
@@ -35,6 +36,10 @@ | |||
35 | #include <linux/eventfd.h> | 36 | #include <linux/eventfd.h> |
36 | #include <linux/blkdev.h> | 37 | #include <linux/blkdev.h> |
37 | #include <linux/compat.h> | 38 | #include <linux/compat.h> |
39 | #include <linux/anon_inodes.h> | ||
40 | #include <linux/migrate.h> | ||
41 | #include <linux/ramfs.h> | ||
42 | #include <linux/percpu-refcount.h> | ||
38 | 43 | ||
39 | #include <asm/kmap_types.h> | 44 | #include <asm/kmap_types.h> |
40 | #include <asm/uaccess.h> | 45 | #include <asm/uaccess.h> |
@@ -61,14 +66,29 @@ struct aio_ring { | |||
61 | 66 | ||
62 | #define AIO_RING_PAGES 8 | 67 | #define AIO_RING_PAGES 8 |
63 | 68 | ||
69 | struct kioctx_table { | ||
70 | struct rcu_head rcu; | ||
71 | unsigned nr; | ||
72 | struct kioctx *table[]; | ||
73 | }; | ||
74 | |||
75 | struct kioctx_cpu { | ||
76 | unsigned reqs_available; | ||
77 | }; | ||
78 | |||
64 | struct kioctx { | 79 | struct kioctx { |
65 | atomic_t users; | 80 | struct percpu_ref users; |
66 | atomic_t dead; | 81 | atomic_t dead; |
67 | 82 | ||
68 | /* This needs improving */ | ||
69 | unsigned long user_id; | 83 | unsigned long user_id; |
70 | struct hlist_node list; | ||
71 | 84 | ||
85 | struct __percpu kioctx_cpu *cpu; | ||
86 | |||
87 | /* | ||
88 | * For percpu reqs_available, number of slots we move to/from global | ||
89 | * counter at a time: | ||
90 | */ | ||
91 | unsigned req_batch; | ||
72 | /* | 92 | /* |
73 | * This is what userspace passed to io_setup(), it's not used for | 93 | * This is what userspace passed to io_setup(), it's not used for |
74 | * anything but counting against the global max_reqs quota. | 94 | * anything but counting against the global max_reqs quota. |
@@ -88,10 +108,18 @@ struct kioctx { | |||
88 | long nr_pages; | 108 | long nr_pages; |
89 | 109 | ||
90 | struct rcu_head rcu_head; | 110 | struct rcu_head rcu_head; |
91 | struct work_struct rcu_work; | 111 | struct work_struct free_work; |
92 | 112 | ||
93 | struct { | 113 | struct { |
94 | atomic_t reqs_active; | 114 | /* |
115 | * This counts the number of available slots in the ringbuffer, | ||
116 | * so we avoid overflowing it: it's decremented (if positive) | ||
117 | * when allocating a kiocb and incremented when the resulting | ||
118 | * io_event is pulled off the ringbuffer. | ||
119 | * | ||
120 | * We batch accesses to it with a percpu version. | ||
121 | */ | ||
122 | atomic_t reqs_available; | ||
95 | } ____cacheline_aligned_in_smp; | 123 | } ____cacheline_aligned_in_smp; |
96 | 124 | ||
97 | struct { | 125 | struct { |
@@ -110,6 +138,9 @@ struct kioctx { | |||
110 | } ____cacheline_aligned_in_smp; | 138 | } ____cacheline_aligned_in_smp; |
111 | 139 | ||
112 | struct page *internal_pages[AIO_RING_PAGES]; | 140 | struct page *internal_pages[AIO_RING_PAGES]; |
141 | struct file *aio_ring_file; | ||
142 | |||
143 | unsigned id; | ||
113 | }; | 144 | }; |
114 | 145 | ||
115 | /*------ sysctl variables----*/ | 146 | /*------ sysctl variables----*/ |
@@ -138,15 +169,77 @@ __initcall(aio_setup); | |||
138 | 169 | ||
139 | static void aio_free_ring(struct kioctx *ctx) | 170 | static void aio_free_ring(struct kioctx *ctx) |
140 | { | 171 | { |
141 | long i; | 172 | int i; |
173 | struct file *aio_ring_file = ctx->aio_ring_file; | ||
142 | 174 | ||
143 | for (i = 0; i < ctx->nr_pages; i++) | 175 | for (i = 0; i < ctx->nr_pages; i++) { |
176 | pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i, | ||
177 | page_count(ctx->ring_pages[i])); | ||
144 | put_page(ctx->ring_pages[i]); | 178 | put_page(ctx->ring_pages[i]); |
179 | } | ||
145 | 180 | ||
146 | if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) | 181 | if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) |
147 | kfree(ctx->ring_pages); | 182 | kfree(ctx->ring_pages); |
183 | |||
184 | if (aio_ring_file) { | ||
185 | truncate_setsize(aio_ring_file->f_inode, 0); | ||
186 | fput(aio_ring_file); | ||
187 | ctx->aio_ring_file = NULL; | ||
188 | } | ||
189 | } | ||
190 | |||
191 | static int aio_ring_mmap(struct file *file, struct vm_area_struct *vma) | ||
192 | { | ||
193 | vma->vm_ops = &generic_file_vm_ops; | ||
194 | return 0; | ||
148 | } | 195 | } |
149 | 196 | ||
197 | static const struct file_operations aio_ring_fops = { | ||
198 | .mmap = aio_ring_mmap, | ||
199 | }; | ||
200 | |||
201 | static int aio_set_page_dirty(struct page *page) | ||
202 | { | ||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | #if IS_ENABLED(CONFIG_MIGRATION) | ||
207 | static int aio_migratepage(struct address_space *mapping, struct page *new, | ||
208 | struct page *old, enum migrate_mode mode) | ||
209 | { | ||
210 | struct kioctx *ctx = mapping->private_data; | ||
211 | unsigned long flags; | ||
212 | unsigned idx = old->index; | ||
213 | int rc; | ||
214 | |||
215 | /* Writeback must be complete */ | ||
216 | BUG_ON(PageWriteback(old)); | ||
217 | put_page(old); | ||
218 | |||
219 | rc = migrate_page_move_mapping(mapping, new, old, NULL, mode); | ||
220 | if (rc != MIGRATEPAGE_SUCCESS) { | ||
221 | get_page(old); | ||
222 | return rc; | ||
223 | } | ||
224 | |||
225 | get_page(new); | ||
226 | |||
227 | spin_lock_irqsave(&ctx->completion_lock, flags); | ||
228 | migrate_page_copy(new, old); | ||
229 | ctx->ring_pages[idx] = new; | ||
230 | spin_unlock_irqrestore(&ctx->completion_lock, flags); | ||
231 | |||
232 | return rc; | ||
233 | } | ||
234 | #endif | ||
235 | |||
236 | static const struct address_space_operations aio_ctx_aops = { | ||
237 | .set_page_dirty = aio_set_page_dirty, | ||
238 | #if IS_ENABLED(CONFIG_MIGRATION) | ||
239 | .migratepage = aio_migratepage, | ||
240 | #endif | ||
241 | }; | ||
242 | |||
150 | static int aio_setup_ring(struct kioctx *ctx) | 243 | static int aio_setup_ring(struct kioctx *ctx) |
151 | { | 244 | { |
152 | struct aio_ring *ring; | 245 | struct aio_ring *ring; |
@@ -154,20 +247,45 @@ static int aio_setup_ring(struct kioctx *ctx) | |||
154 | struct mm_struct *mm = current->mm; | 247 | struct mm_struct *mm = current->mm; |
155 | unsigned long size, populate; | 248 | unsigned long size, populate; |
156 | int nr_pages; | 249 | int nr_pages; |
250 | int i; | ||
251 | struct file *file; | ||
157 | 252 | ||
158 | /* Compensate for the ring buffer's head/tail overlap entry */ | 253 | /* Compensate for the ring buffer's head/tail overlap entry */ |
159 | nr_events += 2; /* 1 is required, 2 for good luck */ | 254 | nr_events += 2; /* 1 is required, 2 for good luck */ |
160 | 255 | ||
161 | size = sizeof(struct aio_ring); | 256 | size = sizeof(struct aio_ring); |
162 | size += sizeof(struct io_event) * nr_events; | 257 | size += sizeof(struct io_event) * nr_events; |
163 | nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT; | ||
164 | 258 | ||
259 | nr_pages = PFN_UP(size); | ||
165 | if (nr_pages < 0) | 260 | if (nr_pages < 0) |
166 | return -EINVAL; | 261 | return -EINVAL; |
167 | 262 | ||
168 | nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event); | 263 | file = anon_inode_getfile_private("[aio]", &aio_ring_fops, ctx, O_RDWR); |
264 | if (IS_ERR(file)) { | ||
265 | ctx->aio_ring_file = NULL; | ||
266 | return -EAGAIN; | ||
267 | } | ||
268 | |||
269 | file->f_inode->i_mapping->a_ops = &aio_ctx_aops; | ||
270 | file->f_inode->i_mapping->private_data = ctx; | ||
271 | file->f_inode->i_size = PAGE_SIZE * (loff_t)nr_pages; | ||
272 | |||
273 | for (i = 0; i < nr_pages; i++) { | ||
274 | struct page *page; | ||
275 | page = find_or_create_page(file->f_inode->i_mapping, | ||
276 | i, GFP_HIGHUSER | __GFP_ZERO); | ||
277 | if (!page) | ||
278 | break; | ||
279 | pr_debug("pid(%d) page[%d]->count=%d\n", | ||
280 | current->pid, i, page_count(page)); | ||
281 | SetPageUptodate(page); | ||
282 | SetPageDirty(page); | ||
283 | unlock_page(page); | ||
284 | } | ||
285 | ctx->aio_ring_file = file; | ||
286 | nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) | ||
287 | / sizeof(struct io_event); | ||
169 | 288 | ||
170 | ctx->nr_events = 0; | ||
171 | ctx->ring_pages = ctx->internal_pages; | 289 | ctx->ring_pages = ctx->internal_pages; |
172 | if (nr_pages > AIO_RING_PAGES) { | 290 | if (nr_pages > AIO_RING_PAGES) { |
173 | ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), | 291 | ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), |
@@ -178,10 +296,11 @@ static int aio_setup_ring(struct kioctx *ctx) | |||
178 | 296 | ||
179 | ctx->mmap_size = nr_pages * PAGE_SIZE; | 297 | ctx->mmap_size = nr_pages * PAGE_SIZE; |
180 | pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size); | 298 | pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size); |
299 | |||
181 | down_write(&mm->mmap_sem); | 300 | down_write(&mm->mmap_sem); |
182 | ctx->mmap_base = do_mmap_pgoff(NULL, 0, ctx->mmap_size, | 301 | ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size, |
183 | PROT_READ|PROT_WRITE, | 302 | PROT_READ | PROT_WRITE, |
184 | MAP_ANONYMOUS|MAP_PRIVATE, 0, &populate); | 303 | MAP_SHARED | MAP_POPULATE, 0, &populate); |
185 | if (IS_ERR((void *)ctx->mmap_base)) { | 304 | if (IS_ERR((void *)ctx->mmap_base)) { |
186 | up_write(&mm->mmap_sem); | 305 | up_write(&mm->mmap_sem); |
187 | ctx->mmap_size = 0; | 306 | ctx->mmap_size = 0; |
@@ -190,23 +309,34 @@ static int aio_setup_ring(struct kioctx *ctx) | |||
190 | } | 309 | } |
191 | 310 | ||
192 | pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); | 311 | pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); |
312 | |||
313 | /* We must do this while still holding mmap_sem for write, as we | ||
314 | * need to be protected against userspace attempting to mremap() | ||
315 | * or munmap() the ring buffer. | ||
316 | */ | ||
193 | ctx->nr_pages = get_user_pages(current, mm, ctx->mmap_base, nr_pages, | 317 | ctx->nr_pages = get_user_pages(current, mm, ctx->mmap_base, nr_pages, |
194 | 1, 0, ctx->ring_pages, NULL); | 318 | 1, 0, ctx->ring_pages, NULL); |
319 | |||
320 | /* Dropping the reference here is safe as the page cache will hold | ||
321 | * onto the pages for us. It is also required so that page migration | ||
322 | * can unmap the pages and get the right reference count. | ||
323 | */ | ||
324 | for (i = 0; i < ctx->nr_pages; i++) | ||
325 | put_page(ctx->ring_pages[i]); | ||
326 | |||
195 | up_write(&mm->mmap_sem); | 327 | up_write(&mm->mmap_sem); |
196 | 328 | ||
197 | if (unlikely(ctx->nr_pages != nr_pages)) { | 329 | if (unlikely(ctx->nr_pages != nr_pages)) { |
198 | aio_free_ring(ctx); | 330 | aio_free_ring(ctx); |
199 | return -EAGAIN; | 331 | return -EAGAIN; |
200 | } | 332 | } |
201 | if (populate) | ||
202 | mm_populate(ctx->mmap_base, populate); | ||
203 | 333 | ||
204 | ctx->user_id = ctx->mmap_base; | 334 | ctx->user_id = ctx->mmap_base; |
205 | ctx->nr_events = nr_events; /* trusted copy */ | 335 | ctx->nr_events = nr_events; /* trusted copy */ |
206 | 336 | ||
207 | ring = kmap_atomic(ctx->ring_pages[0]); | 337 | ring = kmap_atomic(ctx->ring_pages[0]); |
208 | ring->nr = nr_events; /* user copy */ | 338 | ring->nr = nr_events; /* user copy */ |
209 | ring->id = ctx->user_id; | 339 | ring->id = ~0U; |
210 | ring->head = ring->tail = 0; | 340 | ring->head = ring->tail = 0; |
211 | ring->magic = AIO_RING_MAGIC; | 341 | ring->magic = AIO_RING_MAGIC; |
212 | ring->compat_features = AIO_RING_COMPAT_FEATURES; | 342 | ring->compat_features = AIO_RING_COMPAT_FEATURES; |
@@ -238,11 +368,9 @@ void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel) | |||
238 | } | 368 | } |
239 | EXPORT_SYMBOL(kiocb_set_cancel_fn); | 369 | EXPORT_SYMBOL(kiocb_set_cancel_fn); |
240 | 370 | ||
241 | static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb, | 371 | static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb) |
242 | struct io_event *res) | ||
243 | { | 372 | { |
244 | kiocb_cancel_fn *old, *cancel; | 373 | kiocb_cancel_fn *old, *cancel; |
245 | int ret = -EINVAL; | ||
246 | 374 | ||
247 | /* | 375 | /* |
248 | * Don't want to set kiocb->ki_cancel = KIOCB_CANCELLED unless it | 376 | * Don't want to set kiocb->ki_cancel = KIOCB_CANCELLED unless it |
@@ -252,28 +380,20 @@ static int kiocb_cancel(struct kioctx *ctx, struct kiocb *kiocb, | |||
252 | cancel = ACCESS_ONCE(kiocb->ki_cancel); | 380 | cancel = ACCESS_ONCE(kiocb->ki_cancel); |
253 | do { | 381 | do { |
254 | if (!cancel || cancel == KIOCB_CANCELLED) | 382 | if (!cancel || cancel == KIOCB_CANCELLED) |
255 | return ret; | 383 | return -EINVAL; |
256 | 384 | ||
257 | old = cancel; | 385 | old = cancel; |
258 | cancel = cmpxchg(&kiocb->ki_cancel, old, KIOCB_CANCELLED); | 386 | cancel = cmpxchg(&kiocb->ki_cancel, old, KIOCB_CANCELLED); |
259 | } while (cancel != old); | 387 | } while (cancel != old); |
260 | 388 | ||
261 | atomic_inc(&kiocb->ki_users); | 389 | return cancel(kiocb); |
262 | spin_unlock_irq(&ctx->ctx_lock); | ||
263 | |||
264 | memset(res, 0, sizeof(*res)); | ||
265 | res->obj = (u64)(unsigned long)kiocb->ki_obj.user; | ||
266 | res->data = kiocb->ki_user_data; | ||
267 | ret = cancel(kiocb, res); | ||
268 | |||
269 | spin_lock_irq(&ctx->ctx_lock); | ||
270 | |||
271 | return ret; | ||
272 | } | 390 | } |
273 | 391 | ||
274 | static void free_ioctx_rcu(struct rcu_head *head) | 392 | static void free_ioctx_rcu(struct rcu_head *head) |
275 | { | 393 | { |
276 | struct kioctx *ctx = container_of(head, struct kioctx, rcu_head); | 394 | struct kioctx *ctx = container_of(head, struct kioctx, rcu_head); |
395 | |||
396 | free_percpu(ctx->cpu); | ||
277 | kmem_cache_free(kioctx_cachep, ctx); | 397 | kmem_cache_free(kioctx_cachep, ctx); |
278 | } | 398 | } |
279 | 399 | ||
@@ -282,12 +402,13 @@ static void free_ioctx_rcu(struct rcu_head *head) | |||
282 | * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - | 402 | * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - |
283 | * now it's safe to cancel any that need to be. | 403 | * now it's safe to cancel any that need to be. |
284 | */ | 404 | */ |
285 | static void free_ioctx(struct kioctx *ctx) | 405 | static void free_ioctx(struct work_struct *work) |
286 | { | 406 | { |
407 | struct kioctx *ctx = container_of(work, struct kioctx, free_work); | ||
287 | struct aio_ring *ring; | 408 | struct aio_ring *ring; |
288 | struct io_event res; | ||
289 | struct kiocb *req; | 409 | struct kiocb *req; |
290 | unsigned head, avail; | 410 | unsigned cpu, avail; |
411 | DEFINE_WAIT(wait); | ||
291 | 412 | ||
292 | spin_lock_irq(&ctx->ctx_lock); | 413 | spin_lock_irq(&ctx->ctx_lock); |
293 | 414 | ||
@@ -296,28 +417,38 @@ static void free_ioctx(struct kioctx *ctx) | |||
296 | struct kiocb, ki_list); | 417 | struct kiocb, ki_list); |
297 | 418 | ||
298 | list_del_init(&req->ki_list); | 419 | list_del_init(&req->ki_list); |
299 | kiocb_cancel(ctx, req, &res); | 420 | kiocb_cancel(ctx, req); |
300 | } | 421 | } |
301 | 422 | ||
302 | spin_unlock_irq(&ctx->ctx_lock); | 423 | spin_unlock_irq(&ctx->ctx_lock); |
303 | 424 | ||
304 | ring = kmap_atomic(ctx->ring_pages[0]); | 425 | for_each_possible_cpu(cpu) { |
305 | head = ring->head; | 426 | struct kioctx_cpu *kcpu = per_cpu_ptr(ctx->cpu, cpu); |
306 | kunmap_atomic(ring); | ||
307 | 427 | ||
308 | while (atomic_read(&ctx->reqs_active) > 0) { | 428 | atomic_add(kcpu->reqs_available, &ctx->reqs_available); |
309 | wait_event(ctx->wait, | 429 | kcpu->reqs_available = 0; |
310 | head != ctx->tail || | 430 | } |
311 | atomic_read(&ctx->reqs_active) <= 0); | ||
312 | 431 | ||
313 | avail = (head <= ctx->tail ? ctx->tail : ctx->nr_events) - head; | 432 | while (1) { |
433 | prepare_to_wait(&ctx->wait, &wait, TASK_UNINTERRUPTIBLE); | ||
314 | 434 | ||
315 | atomic_sub(avail, &ctx->reqs_active); | 435 | ring = kmap_atomic(ctx->ring_pages[0]); |
316 | head += avail; | 436 | avail = (ring->head <= ring->tail) |
317 | head %= ctx->nr_events; | 437 | ? ring->tail - ring->head |
438 | : ctx->nr_events - ring->head + ring->tail; | ||
439 | |||
440 | atomic_add(avail, &ctx->reqs_available); | ||
441 | ring->head = ring->tail; | ||
442 | kunmap_atomic(ring); | ||
443 | |||
444 | if (atomic_read(&ctx->reqs_available) >= ctx->nr_events - 1) | ||
445 | break; | ||
446 | |||
447 | schedule(); | ||
318 | } | 448 | } |
449 | finish_wait(&ctx->wait, &wait); | ||
319 | 450 | ||
320 | WARN_ON(atomic_read(&ctx->reqs_active) < 0); | 451 | WARN_ON(atomic_read(&ctx->reqs_available) > ctx->nr_events - 1); |
321 | 452 | ||
322 | aio_free_ring(ctx); | 453 | aio_free_ring(ctx); |
323 | 454 | ||
@@ -333,10 +464,68 @@ static void free_ioctx(struct kioctx *ctx) | |||
333 | call_rcu(&ctx->rcu_head, free_ioctx_rcu); | 464 | call_rcu(&ctx->rcu_head, free_ioctx_rcu); |
334 | } | 465 | } |
335 | 466 | ||
336 | static void put_ioctx(struct kioctx *ctx) | 467 | static void free_ioctx_ref(struct percpu_ref *ref) |
337 | { | 468 | { |
338 | if (unlikely(atomic_dec_and_test(&ctx->users))) | 469 | struct kioctx *ctx = container_of(ref, struct kioctx, users); |
339 | free_ioctx(ctx); | 470 | |
471 | INIT_WORK(&ctx->free_work, free_ioctx); | ||
472 | schedule_work(&ctx->free_work); | ||
473 | } | ||
474 | |||
475 | static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) | ||
476 | { | ||
477 | unsigned i, new_nr; | ||
478 | struct kioctx_table *table, *old; | ||
479 | struct aio_ring *ring; | ||
480 | |||
481 | spin_lock(&mm->ioctx_lock); | ||
482 | rcu_read_lock(); | ||
483 | table = rcu_dereference(mm->ioctx_table); | ||
484 | |||
485 | while (1) { | ||
486 | if (table) | ||
487 | for (i = 0; i < table->nr; i++) | ||
488 | if (!table->table[i]) { | ||
489 | ctx->id = i; | ||
490 | table->table[i] = ctx; | ||
491 | rcu_read_unlock(); | ||
492 | spin_unlock(&mm->ioctx_lock); | ||
493 | |||
494 | ring = kmap_atomic(ctx->ring_pages[0]); | ||
495 | ring->id = ctx->id; | ||
496 | kunmap_atomic(ring); | ||
497 | return 0; | ||
498 | } | ||
499 | |||
500 | new_nr = (table ? table->nr : 1) * 4; | ||
501 | |||
502 | rcu_read_unlock(); | ||
503 | spin_unlock(&mm->ioctx_lock); | ||
504 | |||
505 | table = kzalloc(sizeof(*table) + sizeof(struct kioctx *) * | ||
506 | new_nr, GFP_KERNEL); | ||
507 | if (!table) | ||
508 | return -ENOMEM; | ||
509 | |||
510 | table->nr = new_nr; | ||
511 | |||
512 | spin_lock(&mm->ioctx_lock); | ||
513 | rcu_read_lock(); | ||
514 | old = rcu_dereference(mm->ioctx_table); | ||
515 | |||
516 | if (!old) { | ||
517 | rcu_assign_pointer(mm->ioctx_table, table); | ||
518 | } else if (table->nr > old->nr) { | ||
519 | memcpy(table->table, old->table, | ||
520 | old->nr * sizeof(struct kioctx *)); | ||
521 | |||
522 | rcu_assign_pointer(mm->ioctx_table, table); | ||
523 | kfree_rcu(old, rcu); | ||
524 | } else { | ||
525 | kfree(table); | ||
526 | table = old; | ||
527 | } | ||
528 | } | ||
340 | } | 529 | } |
341 | 530 | ||
342 | /* ioctx_alloc | 531 | /* ioctx_alloc |
@@ -348,6 +537,18 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) | |||
348 | struct kioctx *ctx; | 537 | struct kioctx *ctx; |
349 | int err = -ENOMEM; | 538 | int err = -ENOMEM; |
350 | 539 | ||
540 | /* | ||
541 | * We keep track of the number of available ringbuffer slots, to prevent | ||
542 | * overflow (reqs_available), and we also use percpu counters for this. | ||
543 | * | ||
544 | * So since up to half the slots might be on other cpu's percpu counters | ||
545 | * and unavailable, double nr_events so userspace sees what they | ||
546 | * expected: additionally, we move req_batch slots to/from percpu | ||
547 | * counters at a time, so make sure that isn't 0: | ||
548 | */ | ||
549 | nr_events = max(nr_events, num_possible_cpus() * 4); | ||
550 | nr_events *= 2; | ||
551 | |||
351 | /* Prevent overflows */ | 552 | /* Prevent overflows */ |
352 | if ((nr_events > (0x10000000U / sizeof(struct io_event))) || | 553 | if ((nr_events > (0x10000000U / sizeof(struct io_event))) || |
353 | (nr_events > (0x10000000U / sizeof(struct kiocb)))) { | 554 | (nr_events > (0x10000000U / sizeof(struct kiocb)))) { |
@@ -355,7 +556,7 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) | |||
355 | return ERR_PTR(-EINVAL); | 556 | return ERR_PTR(-EINVAL); |
356 | } | 557 | } |
357 | 558 | ||
358 | if (!nr_events || (unsigned long)nr_events > aio_max_nr) | 559 | if (!nr_events || (unsigned long)nr_events > (aio_max_nr * 2UL)) |
359 | return ERR_PTR(-EAGAIN); | 560 | return ERR_PTR(-EAGAIN); |
360 | 561 | ||
361 | ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); | 562 | ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); |
@@ -364,8 +565,9 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) | |||
364 | 565 | ||
365 | ctx->max_reqs = nr_events; | 566 | ctx->max_reqs = nr_events; |
366 | 567 | ||
367 | atomic_set(&ctx->users, 2); | 568 | if (percpu_ref_init(&ctx->users, free_ioctx_ref)) |
368 | atomic_set(&ctx->dead, 0); | 569 | goto out_freectx; |
570 | |||
369 | spin_lock_init(&ctx->ctx_lock); | 571 | spin_lock_init(&ctx->ctx_lock); |
370 | spin_lock_init(&ctx->completion_lock); | 572 | spin_lock_init(&ctx->completion_lock); |
371 | mutex_init(&ctx->ring_lock); | 573 | mutex_init(&ctx->ring_lock); |
@@ -373,12 +575,21 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) | |||
373 | 575 | ||
374 | INIT_LIST_HEAD(&ctx->active_reqs); | 576 | INIT_LIST_HEAD(&ctx->active_reqs); |
375 | 577 | ||
578 | ctx->cpu = alloc_percpu(struct kioctx_cpu); | ||
579 | if (!ctx->cpu) | ||
580 | goto out_freeref; | ||
581 | |||
376 | if (aio_setup_ring(ctx) < 0) | 582 | if (aio_setup_ring(ctx) < 0) |
377 | goto out_freectx; | 583 | goto out_freepcpu; |
584 | |||
585 | atomic_set(&ctx->reqs_available, ctx->nr_events - 1); | ||
586 | ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4); | ||
587 | if (ctx->req_batch < 1) | ||
588 | ctx->req_batch = 1; | ||
378 | 589 | ||
379 | /* limit the number of system wide aios */ | 590 | /* limit the number of system wide aios */ |
380 | spin_lock(&aio_nr_lock); | 591 | spin_lock(&aio_nr_lock); |
381 | if (aio_nr + nr_events > aio_max_nr || | 592 | if (aio_nr + nr_events > (aio_max_nr * 2UL) || |
382 | aio_nr + nr_events < aio_nr) { | 593 | aio_nr + nr_events < aio_nr) { |
383 | spin_unlock(&aio_nr_lock); | 594 | spin_unlock(&aio_nr_lock); |
384 | goto out_cleanup; | 595 | goto out_cleanup; |
@@ -386,49 +597,54 @@ static struct kioctx *ioctx_alloc(unsigned nr_events) | |||
386 | aio_nr += ctx->max_reqs; | 597 | aio_nr += ctx->max_reqs; |
387 | spin_unlock(&aio_nr_lock); | 598 | spin_unlock(&aio_nr_lock); |
388 | 599 | ||
389 | /* now link into global list. */ | 600 | percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ |
390 | spin_lock(&mm->ioctx_lock); | 601 | |
391 | hlist_add_head_rcu(&ctx->list, &mm->ioctx_list); | 602 | err = ioctx_add_table(ctx, mm); |
392 | spin_unlock(&mm->ioctx_lock); | 603 | if (err) |
604 | goto out_cleanup_put; | ||
393 | 605 | ||
394 | pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n", | 606 | pr_debug("allocated ioctx %p[%ld]: mm=%p mask=0x%x\n", |
395 | ctx, ctx->user_id, mm, ctx->nr_events); | 607 | ctx, ctx->user_id, mm, ctx->nr_events); |
396 | return ctx; | 608 | return ctx; |
397 | 609 | ||
610 | out_cleanup_put: | ||
611 | percpu_ref_put(&ctx->users); | ||
398 | out_cleanup: | 612 | out_cleanup: |
399 | err = -EAGAIN; | 613 | err = -EAGAIN; |
400 | aio_free_ring(ctx); | 614 | aio_free_ring(ctx); |
615 | out_freepcpu: | ||
616 | free_percpu(ctx->cpu); | ||
617 | out_freeref: | ||
618 | free_percpu(ctx->users.pcpu_count); | ||
401 | out_freectx: | 619 | out_freectx: |
620 | if (ctx->aio_ring_file) | ||
621 | fput(ctx->aio_ring_file); | ||
402 | kmem_cache_free(kioctx_cachep, ctx); | 622 | kmem_cache_free(kioctx_cachep, ctx); |
403 | pr_debug("error allocating ioctx %d\n", err); | 623 | pr_debug("error allocating ioctx %d\n", err); |
404 | return ERR_PTR(err); | 624 | return ERR_PTR(err); |
405 | } | 625 | } |
406 | 626 | ||
407 | static void kill_ioctx_work(struct work_struct *work) | ||
408 | { | ||
409 | struct kioctx *ctx = container_of(work, struct kioctx, rcu_work); | ||
410 | |||
411 | wake_up_all(&ctx->wait); | ||
412 | put_ioctx(ctx); | ||
413 | } | ||
414 | |||
415 | static void kill_ioctx_rcu(struct rcu_head *head) | ||
416 | { | ||
417 | struct kioctx *ctx = container_of(head, struct kioctx, rcu_head); | ||
418 | |||
419 | INIT_WORK(&ctx->rcu_work, kill_ioctx_work); | ||
420 | schedule_work(&ctx->rcu_work); | ||
421 | } | ||
422 | |||
423 | /* kill_ioctx | 627 | /* kill_ioctx |
424 | * Cancels all outstanding aio requests on an aio context. Used | 628 | * Cancels all outstanding aio requests on an aio context. Used |
425 | * when the processes owning a context have all exited to encourage | 629 | * when the processes owning a context have all exited to encourage |
426 | * the rapid destruction of the kioctx. | 630 | * the rapid destruction of the kioctx. |
427 | */ | 631 | */ |
428 | static void kill_ioctx(struct kioctx *ctx) | 632 | static void kill_ioctx(struct mm_struct *mm, struct kioctx *ctx) |
429 | { | 633 | { |
430 | if (!atomic_xchg(&ctx->dead, 1)) { | 634 | if (!atomic_xchg(&ctx->dead, 1)) { |
431 | hlist_del_rcu(&ctx->list); | 635 | struct kioctx_table *table; |
636 | |||
637 | spin_lock(&mm->ioctx_lock); | ||
638 | rcu_read_lock(); | ||
639 | table = rcu_dereference(mm->ioctx_table); | ||
640 | |||
641 | WARN_ON(ctx != table->table[ctx->id]); | ||
642 | table->table[ctx->id] = NULL; | ||
643 | rcu_read_unlock(); | ||
644 | spin_unlock(&mm->ioctx_lock); | ||
645 | |||
646 | /* percpu_ref_kill() will do the necessary call_rcu() */ | ||
647 | wake_up_all(&ctx->wait); | ||
432 | 648 | ||
433 | /* | 649 | /* |
434 | * It'd be more correct to do this in free_ioctx(), after all | 650 | * It'd be more correct to do this in free_ioctx(), after all |
@@ -445,24 +661,23 @@ static void kill_ioctx(struct kioctx *ctx) | |||
445 | if (ctx->mmap_size) | 661 | if (ctx->mmap_size) |
446 | vm_munmap(ctx->mmap_base, ctx->mmap_size); | 662 | vm_munmap(ctx->mmap_base, ctx->mmap_size); |
447 | 663 | ||
448 | /* Between hlist_del_rcu() and dropping the initial ref */ | 664 | percpu_ref_kill(&ctx->users); |
449 | call_rcu(&ctx->rcu_head, kill_ioctx_rcu); | ||
450 | } | 665 | } |
451 | } | 666 | } |
452 | 667 | ||
453 | /* wait_on_sync_kiocb: | 668 | /* wait_on_sync_kiocb: |
454 | * Waits on the given sync kiocb to complete. | 669 | * Waits on the given sync kiocb to complete. |
455 | */ | 670 | */ |
456 | ssize_t wait_on_sync_kiocb(struct kiocb *iocb) | 671 | ssize_t wait_on_sync_kiocb(struct kiocb *req) |
457 | { | 672 | { |
458 | while (atomic_read(&iocb->ki_users)) { | 673 | while (!req->ki_ctx) { |
459 | set_current_state(TASK_UNINTERRUPTIBLE); | 674 | set_current_state(TASK_UNINTERRUPTIBLE); |
460 | if (!atomic_read(&iocb->ki_users)) | 675 | if (req->ki_ctx) |
461 | break; | 676 | break; |
462 | io_schedule(); | 677 | io_schedule(); |
463 | } | 678 | } |
464 | __set_current_state(TASK_RUNNING); | 679 | __set_current_state(TASK_RUNNING); |
465 | return iocb->ki_user_data; | 680 | return req->ki_user_data; |
466 | } | 681 | } |
467 | EXPORT_SYMBOL(wait_on_sync_kiocb); | 682 | EXPORT_SYMBOL(wait_on_sync_kiocb); |
468 | 683 | ||
@@ -476,16 +691,28 @@ EXPORT_SYMBOL(wait_on_sync_kiocb); | |||
476 | */ | 691 | */ |
477 | void exit_aio(struct mm_struct *mm) | 692 | void exit_aio(struct mm_struct *mm) |
478 | { | 693 | { |
694 | struct kioctx_table *table; | ||
479 | struct kioctx *ctx; | 695 | struct kioctx *ctx; |
480 | struct hlist_node *n; | 696 | unsigned i = 0; |
481 | 697 | ||
482 | hlist_for_each_entry_safe(ctx, n, &mm->ioctx_list, list) { | 698 | while (1) { |
483 | if (1 != atomic_read(&ctx->users)) | 699 | rcu_read_lock(); |
484 | printk(KERN_DEBUG | 700 | table = rcu_dereference(mm->ioctx_table); |
485 | "exit_aio:ioctx still alive: %d %d %d\n", | 701 | |
486 | atomic_read(&ctx->users), | 702 | do { |
487 | atomic_read(&ctx->dead), | 703 | if (!table || i >= table->nr) { |
488 | atomic_read(&ctx->reqs_active)); | 704 | rcu_read_unlock(); |
705 | rcu_assign_pointer(mm->ioctx_table, NULL); | ||
706 | if (table) | ||
707 | kfree(table); | ||
708 | return; | ||
709 | } | ||
710 | |||
711 | ctx = table->table[i++]; | ||
712 | } while (!ctx); | ||
713 | |||
714 | rcu_read_unlock(); | ||
715 | |||
489 | /* | 716 | /* |
490 | * We don't need to bother with munmap() here - | 717 | * We don't need to bother with munmap() here - |
491 | * exit_mmap(mm) is coming and it'll unmap everything. | 718 | * exit_mmap(mm) is coming and it'll unmap everything. |
@@ -496,40 +723,75 @@ void exit_aio(struct mm_struct *mm) | |||
496 | */ | 723 | */ |
497 | ctx->mmap_size = 0; | 724 | ctx->mmap_size = 0; |
498 | 725 | ||
499 | kill_ioctx(ctx); | 726 | kill_ioctx(mm, ctx); |
727 | } | ||
728 | } | ||
729 | |||
730 | static void put_reqs_available(struct kioctx *ctx, unsigned nr) | ||
731 | { | ||
732 | struct kioctx_cpu *kcpu; | ||
733 | |||
734 | preempt_disable(); | ||
735 | kcpu = this_cpu_ptr(ctx->cpu); | ||
736 | |||
737 | kcpu->reqs_available += nr; | ||
738 | while (kcpu->reqs_available >= ctx->req_batch * 2) { | ||
739 | kcpu->reqs_available -= ctx->req_batch; | ||
740 | atomic_add(ctx->req_batch, &ctx->reqs_available); | ||
741 | } | ||
742 | |||
743 | preempt_enable(); | ||
744 | } | ||
745 | |||
746 | static bool get_reqs_available(struct kioctx *ctx) | ||
747 | { | ||
748 | struct kioctx_cpu *kcpu; | ||
749 | bool ret = false; | ||
750 | |||
751 | preempt_disable(); | ||
752 | kcpu = this_cpu_ptr(ctx->cpu); | ||
753 | |||
754 | if (!kcpu->reqs_available) { | ||
755 | int old, avail = atomic_read(&ctx->reqs_available); | ||
756 | |||
757 | do { | ||
758 | if (avail < ctx->req_batch) | ||
759 | goto out; | ||
760 | |||
761 | old = avail; | ||
762 | avail = atomic_cmpxchg(&ctx->reqs_available, | ||
763 | avail, avail - ctx->req_batch); | ||
764 | } while (avail != old); | ||
765 | |||
766 | kcpu->reqs_available += ctx->req_batch; | ||
500 | } | 767 | } |
768 | |||
769 | ret = true; | ||
770 | kcpu->reqs_available--; | ||
771 | out: | ||
772 | preempt_enable(); | ||
773 | return ret; | ||
501 | } | 774 | } |
502 | 775 | ||
503 | /* aio_get_req | 776 | /* aio_get_req |
504 | * Allocate a slot for an aio request. Increments the ki_users count | 777 | * Allocate a slot for an aio request. |
505 | * of the kioctx so that the kioctx stays around until all requests are | 778 | * Returns NULL if no requests are free. |
506 | * complete. Returns NULL if no requests are free. | ||
507 | * | ||
508 | * Returns with kiocb->ki_users set to 2. The io submit code path holds | ||
509 | * an extra reference while submitting the i/o. | ||
510 | * This prevents races between the aio code path referencing the | ||
511 | * req (after submitting it) and aio_complete() freeing the req. | ||
512 | */ | 779 | */ |
513 | static inline struct kiocb *aio_get_req(struct kioctx *ctx) | 780 | static inline struct kiocb *aio_get_req(struct kioctx *ctx) |
514 | { | 781 | { |
515 | struct kiocb *req; | 782 | struct kiocb *req; |
516 | 783 | ||
517 | if (atomic_read(&ctx->reqs_active) >= ctx->nr_events) | 784 | if (!get_reqs_available(ctx)) |
518 | return NULL; | 785 | return NULL; |
519 | 786 | ||
520 | if (atomic_inc_return(&ctx->reqs_active) > ctx->nr_events - 1) | ||
521 | goto out_put; | ||
522 | |||
523 | req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO); | 787 | req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL|__GFP_ZERO); |
524 | if (unlikely(!req)) | 788 | if (unlikely(!req)) |
525 | goto out_put; | 789 | goto out_put; |
526 | 790 | ||
527 | atomic_set(&req->ki_users, 2); | ||
528 | req->ki_ctx = ctx; | 791 | req->ki_ctx = ctx; |
529 | |||
530 | return req; | 792 | return req; |
531 | out_put: | 793 | out_put: |
532 | atomic_dec(&ctx->reqs_active); | 794 | put_reqs_available(ctx, 1); |
533 | return NULL; | 795 | return NULL; |
534 | } | 796 | } |
535 | 797 | ||
@@ -539,35 +801,32 @@ static void kiocb_free(struct kiocb *req) | |||
539 | fput(req->ki_filp); | 801 | fput(req->ki_filp); |
540 | if (req->ki_eventfd != NULL) | 802 | if (req->ki_eventfd != NULL) |
541 | eventfd_ctx_put(req->ki_eventfd); | 803 | eventfd_ctx_put(req->ki_eventfd); |
542 | if (req->ki_dtor) | ||
543 | req->ki_dtor(req); | ||
544 | if (req->ki_iovec != &req->ki_inline_vec) | ||
545 | kfree(req->ki_iovec); | ||
546 | kmem_cache_free(kiocb_cachep, req); | 804 | kmem_cache_free(kiocb_cachep, req); |
547 | } | 805 | } |
548 | 806 | ||
549 | void aio_put_req(struct kiocb *req) | ||
550 | { | ||
551 | if (atomic_dec_and_test(&req->ki_users)) | ||
552 | kiocb_free(req); | ||
553 | } | ||
554 | EXPORT_SYMBOL(aio_put_req); | ||
555 | |||
556 | static struct kioctx *lookup_ioctx(unsigned long ctx_id) | 807 | static struct kioctx *lookup_ioctx(unsigned long ctx_id) |
557 | { | 808 | { |
809 | struct aio_ring __user *ring = (void __user *)ctx_id; | ||
558 | struct mm_struct *mm = current->mm; | 810 | struct mm_struct *mm = current->mm; |
559 | struct kioctx *ctx, *ret = NULL; | 811 | struct kioctx *ctx, *ret = NULL; |
812 | struct kioctx_table *table; | ||
813 | unsigned id; | ||
814 | |||
815 | if (get_user(id, &ring->id)) | ||
816 | return NULL; | ||
560 | 817 | ||
561 | rcu_read_lock(); | 818 | rcu_read_lock(); |
819 | table = rcu_dereference(mm->ioctx_table); | ||
562 | 820 | ||
563 | hlist_for_each_entry_rcu(ctx, &mm->ioctx_list, list) { | 821 | if (!table || id >= table->nr) |
564 | if (ctx->user_id == ctx_id) { | 822 | goto out; |
565 | atomic_inc(&ctx->users); | ||
566 | ret = ctx; | ||
567 | break; | ||
568 | } | ||
569 | } | ||
570 | 823 | ||
824 | ctx = table->table[id]; | ||
825 | if (ctx && ctx->user_id == ctx_id) { | ||
826 | percpu_ref_get(&ctx->users); | ||
827 | ret = ctx; | ||
828 | } | ||
829 | out: | ||
571 | rcu_read_unlock(); | 830 | rcu_read_unlock(); |
572 | return ret; | 831 | return ret; |
573 | } | 832 | } |
@@ -591,16 +850,16 @@ void aio_complete(struct kiocb *iocb, long res, long res2) | |||
591 | * - the sync task helpfully left a reference to itself in the iocb | 850 | * - the sync task helpfully left a reference to itself in the iocb |
592 | */ | 851 | */ |
593 | if (is_sync_kiocb(iocb)) { | 852 | if (is_sync_kiocb(iocb)) { |
594 | BUG_ON(atomic_read(&iocb->ki_users) != 1); | ||
595 | iocb->ki_user_data = res; | 853 | iocb->ki_user_data = res; |
596 | atomic_set(&iocb->ki_users, 0); | 854 | smp_wmb(); |
855 | iocb->ki_ctx = ERR_PTR(-EXDEV); | ||
597 | wake_up_process(iocb->ki_obj.tsk); | 856 | wake_up_process(iocb->ki_obj.tsk); |
598 | return; | 857 | return; |
599 | } | 858 | } |
600 | 859 | ||
601 | /* | 860 | /* |
602 | * Take rcu_read_lock() in case the kioctx is being destroyed, as we | 861 | * Take rcu_read_lock() in case the kioctx is being destroyed, as we |
603 | * need to issue a wakeup after decrementing reqs_active. | 862 | * need to issue a wakeup after incrementing reqs_available. |
604 | */ | 863 | */ |
605 | rcu_read_lock(); | 864 | rcu_read_lock(); |
606 | 865 | ||
@@ -613,17 +872,6 @@ void aio_complete(struct kiocb *iocb, long res, long res2) | |||
613 | } | 872 | } |
614 | 873 | ||
615 | /* | 874 | /* |
616 | * cancelled requests don't get events, userland was given one | ||
617 | * when the event got cancelled. | ||
618 | */ | ||
619 | if (unlikely(xchg(&iocb->ki_cancel, | ||
620 | KIOCB_CANCELLED) == KIOCB_CANCELLED)) { | ||
621 | atomic_dec(&ctx->reqs_active); | ||
622 | /* Still need the wake_up in case free_ioctx is waiting */ | ||
623 | goto put_rq; | ||
624 | } | ||
625 | |||
626 | /* | ||
627 | * Add a completion event to the ring buffer. Must be done holding | 875 | * Add a completion event to the ring buffer. Must be done holding |
628 | * ctx->completion_lock to prevent other code from messing with the tail | 876 | * ctx->completion_lock to prevent other code from messing with the tail |
629 | * pointer since we might be called from irq context. | 877 | * pointer since we might be called from irq context. |
@@ -675,9 +923,8 @@ void aio_complete(struct kiocb *iocb, long res, long res2) | |||
675 | if (iocb->ki_eventfd != NULL) | 923 | if (iocb->ki_eventfd != NULL) |
676 | eventfd_signal(iocb->ki_eventfd, 1); | 924 | eventfd_signal(iocb->ki_eventfd, 1); |
677 | 925 | ||
678 | put_rq: | ||
679 | /* everything turned out well, dispose of the aiocb. */ | 926 | /* everything turned out well, dispose of the aiocb. */ |
680 | aio_put_req(iocb); | 927 | kiocb_free(iocb); |
681 | 928 | ||
682 | /* | 929 | /* |
683 | * We have to order our ring_info tail store above and test | 930 | * We have to order our ring_info tail store above and test |
@@ -702,7 +949,7 @@ static long aio_read_events_ring(struct kioctx *ctx, | |||
702 | struct io_event __user *event, long nr) | 949 | struct io_event __user *event, long nr) |
703 | { | 950 | { |
704 | struct aio_ring *ring; | 951 | struct aio_ring *ring; |
705 | unsigned head, pos; | 952 | unsigned head, tail, pos; |
706 | long ret = 0; | 953 | long ret = 0; |
707 | int copy_ret; | 954 | int copy_ret; |
708 | 955 | ||
@@ -710,11 +957,12 @@ static long aio_read_events_ring(struct kioctx *ctx, | |||
710 | 957 | ||
711 | ring = kmap_atomic(ctx->ring_pages[0]); | 958 | ring = kmap_atomic(ctx->ring_pages[0]); |
712 | head = ring->head; | 959 | head = ring->head; |
960 | tail = ring->tail; | ||
713 | kunmap_atomic(ring); | 961 | kunmap_atomic(ring); |
714 | 962 | ||
715 | pr_debug("h%u t%u m%u\n", head, ctx->tail, ctx->nr_events); | 963 | pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events); |
716 | 964 | ||
717 | if (head == ctx->tail) | 965 | if (head == tail) |
718 | goto out; | 966 | goto out; |
719 | 967 | ||
720 | while (ret < nr) { | 968 | while (ret < nr) { |
@@ -722,8 +970,8 @@ static long aio_read_events_ring(struct kioctx *ctx, | |||
722 | struct io_event *ev; | 970 | struct io_event *ev; |
723 | struct page *page; | 971 | struct page *page; |
724 | 972 | ||
725 | avail = (head <= ctx->tail ? ctx->tail : ctx->nr_events) - head; | 973 | avail = (head <= tail ? tail : ctx->nr_events) - head; |
726 | if (head == ctx->tail) | 974 | if (head == tail) |
727 | break; | 975 | break; |
728 | 976 | ||
729 | avail = min(avail, nr - ret); | 977 | avail = min(avail, nr - ret); |
@@ -754,9 +1002,9 @@ static long aio_read_events_ring(struct kioctx *ctx, | |||
754 | kunmap_atomic(ring); | 1002 | kunmap_atomic(ring); |
755 | flush_dcache_page(ctx->ring_pages[0]); | 1003 | flush_dcache_page(ctx->ring_pages[0]); |
756 | 1004 | ||
757 | pr_debug("%li h%u t%u\n", ret, head, ctx->tail); | 1005 | pr_debug("%li h%u t%u\n", ret, head, tail); |
758 | 1006 | ||
759 | atomic_sub(ret, &ctx->reqs_active); | 1007 | put_reqs_available(ctx, ret); |
760 | out: | 1008 | out: |
761 | mutex_unlock(&ctx->ring_lock); | 1009 | mutex_unlock(&ctx->ring_lock); |
762 | 1010 | ||
@@ -854,8 +1102,8 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp) | |||
854 | if (!IS_ERR(ioctx)) { | 1102 | if (!IS_ERR(ioctx)) { |
855 | ret = put_user(ioctx->user_id, ctxp); | 1103 | ret = put_user(ioctx->user_id, ctxp); |
856 | if (ret) | 1104 | if (ret) |
857 | kill_ioctx(ioctx); | 1105 | kill_ioctx(current->mm, ioctx); |
858 | put_ioctx(ioctx); | 1106 | percpu_ref_put(&ioctx->users); |
859 | } | 1107 | } |
860 | 1108 | ||
861 | out: | 1109 | out: |
@@ -872,101 +1120,37 @@ SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) | |||
872 | { | 1120 | { |
873 | struct kioctx *ioctx = lookup_ioctx(ctx); | 1121 | struct kioctx *ioctx = lookup_ioctx(ctx); |
874 | if (likely(NULL != ioctx)) { | 1122 | if (likely(NULL != ioctx)) { |
875 | kill_ioctx(ioctx); | 1123 | kill_ioctx(current->mm, ioctx); |
876 | put_ioctx(ioctx); | 1124 | percpu_ref_put(&ioctx->users); |
877 | return 0; | 1125 | return 0; |
878 | } | 1126 | } |
879 | pr_debug("EINVAL: io_destroy: invalid context id\n"); | 1127 | pr_debug("EINVAL: io_destroy: invalid context id\n"); |
880 | return -EINVAL; | 1128 | return -EINVAL; |
881 | } | 1129 | } |
882 | 1130 | ||
883 | static void aio_advance_iovec(struct kiocb *iocb, ssize_t ret) | ||
884 | { | ||
885 | struct iovec *iov = &iocb->ki_iovec[iocb->ki_cur_seg]; | ||
886 | |||
887 | BUG_ON(ret <= 0); | ||
888 | |||
889 | while (iocb->ki_cur_seg < iocb->ki_nr_segs && ret > 0) { | ||
890 | ssize_t this = min((ssize_t)iov->iov_len, ret); | ||
891 | iov->iov_base += this; | ||
892 | iov->iov_len -= this; | ||
893 | iocb->ki_left -= this; | ||
894 | ret -= this; | ||
895 | if (iov->iov_len == 0) { | ||
896 | iocb->ki_cur_seg++; | ||
897 | iov++; | ||
898 | } | ||
899 | } | ||
900 | |||
901 | /* the caller should not have done more io than what fit in | ||
902 | * the remaining iovecs */ | ||
903 | BUG_ON(ret > 0 && iocb->ki_left == 0); | ||
904 | } | ||
905 | |||
906 | typedef ssize_t (aio_rw_op)(struct kiocb *, const struct iovec *, | 1131 | typedef ssize_t (aio_rw_op)(struct kiocb *, const struct iovec *, |
907 | unsigned long, loff_t); | 1132 | unsigned long, loff_t); |
908 | 1133 | ||
909 | static ssize_t aio_rw_vect_retry(struct kiocb *iocb, int rw, aio_rw_op *rw_op) | 1134 | static ssize_t aio_setup_vectored_rw(struct kiocb *kiocb, |
910 | { | 1135 | int rw, char __user *buf, |
911 | struct file *file = iocb->ki_filp; | 1136 | unsigned long *nr_segs, |
912 | struct address_space *mapping = file->f_mapping; | 1137 | struct iovec **iovec, |
913 | struct inode *inode = mapping->host; | 1138 | bool compat) |
914 | ssize_t ret = 0; | ||
915 | |||
916 | /* This matches the pread()/pwrite() logic */ | ||
917 | if (iocb->ki_pos < 0) | ||
918 | return -EINVAL; | ||
919 | |||
920 | if (rw == WRITE) | ||
921 | file_start_write(file); | ||
922 | do { | ||
923 | ret = rw_op(iocb, &iocb->ki_iovec[iocb->ki_cur_seg], | ||
924 | iocb->ki_nr_segs - iocb->ki_cur_seg, | ||
925 | iocb->ki_pos); | ||
926 | if (ret > 0) | ||
927 | aio_advance_iovec(iocb, ret); | ||
928 | |||
929 | /* retry all partial writes. retry partial reads as long as its a | ||
930 | * regular file. */ | ||
931 | } while (ret > 0 && iocb->ki_left > 0 && | ||
932 | (rw == WRITE || | ||
933 | (!S_ISFIFO(inode->i_mode) && !S_ISSOCK(inode->i_mode)))); | ||
934 | if (rw == WRITE) | ||
935 | file_end_write(file); | ||
936 | |||
937 | /* This means we must have transferred all that we could */ | ||
938 | /* No need to retry anymore */ | ||
939 | if ((ret == 0) || (iocb->ki_left == 0)) | ||
940 | ret = iocb->ki_nbytes - iocb->ki_left; | ||
941 | |||
942 | /* If we managed to write some out we return that, rather than | ||
943 | * the eventual error. */ | ||
944 | if (rw == WRITE | ||
945 | && ret < 0 && ret != -EIOCBQUEUED | ||
946 | && iocb->ki_nbytes - iocb->ki_left) | ||
947 | ret = iocb->ki_nbytes - iocb->ki_left; | ||
948 | |||
949 | return ret; | ||
950 | } | ||
951 | |||
952 | static ssize_t aio_setup_vectored_rw(int rw, struct kiocb *kiocb, bool compat) | ||
953 | { | 1139 | { |
954 | ssize_t ret; | 1140 | ssize_t ret; |
955 | 1141 | ||
956 | kiocb->ki_nr_segs = kiocb->ki_nbytes; | 1142 | *nr_segs = kiocb->ki_nbytes; |
957 | 1143 | ||
958 | #ifdef CONFIG_COMPAT | 1144 | #ifdef CONFIG_COMPAT |
959 | if (compat) | 1145 | if (compat) |
960 | ret = compat_rw_copy_check_uvector(rw, | 1146 | ret = compat_rw_copy_check_uvector(rw, |
961 | (struct compat_iovec __user *)kiocb->ki_buf, | 1147 | (struct compat_iovec __user *)buf, |
962 | kiocb->ki_nr_segs, 1, &kiocb->ki_inline_vec, | 1148 | *nr_segs, 1, *iovec, iovec); |
963 | &kiocb->ki_iovec); | ||
964 | else | 1149 | else |
965 | #endif | 1150 | #endif |
966 | ret = rw_copy_check_uvector(rw, | 1151 | ret = rw_copy_check_uvector(rw, |
967 | (struct iovec __user *)kiocb->ki_buf, | 1152 | (struct iovec __user *)buf, |
968 | kiocb->ki_nr_segs, 1, &kiocb->ki_inline_vec, | 1153 | *nr_segs, 1, *iovec, iovec); |
969 | &kiocb->ki_iovec); | ||
970 | if (ret < 0) | 1154 | if (ret < 0) |
971 | return ret; | 1155 | return ret; |
972 | 1156 | ||
@@ -975,15 +1159,17 @@ static ssize_t aio_setup_vectored_rw(int rw, struct kiocb *kiocb, bool compat) | |||
975 | return 0; | 1159 | return 0; |
976 | } | 1160 | } |
977 | 1161 | ||
978 | static ssize_t aio_setup_single_vector(int rw, struct kiocb *kiocb) | 1162 | static ssize_t aio_setup_single_vector(struct kiocb *kiocb, |
1163 | int rw, char __user *buf, | ||
1164 | unsigned long *nr_segs, | ||
1165 | struct iovec *iovec) | ||
979 | { | 1166 | { |
980 | if (unlikely(!access_ok(!rw, kiocb->ki_buf, kiocb->ki_nbytes))) | 1167 | if (unlikely(!access_ok(!rw, buf, kiocb->ki_nbytes))) |
981 | return -EFAULT; | 1168 | return -EFAULT; |
982 | 1169 | ||
983 | kiocb->ki_iovec = &kiocb->ki_inline_vec; | 1170 | iovec->iov_base = buf; |
984 | kiocb->ki_iovec->iov_base = kiocb->ki_buf; | 1171 | iovec->iov_len = kiocb->ki_nbytes; |
985 | kiocb->ki_iovec->iov_len = kiocb->ki_nbytes; | 1172 | *nr_segs = 1; |
986 | kiocb->ki_nr_segs = 1; | ||
987 | return 0; | 1173 | return 0; |
988 | } | 1174 | } |
989 | 1175 | ||
@@ -992,15 +1178,18 @@ static ssize_t aio_setup_single_vector(int rw, struct kiocb *kiocb) | |||
992 | * Performs the initial checks and aio retry method | 1178 | * Performs the initial checks and aio retry method |
993 | * setup for the kiocb at the time of io submission. | 1179 | * setup for the kiocb at the time of io submission. |
994 | */ | 1180 | */ |
995 | static ssize_t aio_run_iocb(struct kiocb *req, bool compat) | 1181 | static ssize_t aio_run_iocb(struct kiocb *req, unsigned opcode, |
1182 | char __user *buf, bool compat) | ||
996 | { | 1183 | { |
997 | struct file *file = req->ki_filp; | 1184 | struct file *file = req->ki_filp; |
998 | ssize_t ret; | 1185 | ssize_t ret; |
1186 | unsigned long nr_segs; | ||
999 | int rw; | 1187 | int rw; |
1000 | fmode_t mode; | 1188 | fmode_t mode; |
1001 | aio_rw_op *rw_op; | 1189 | aio_rw_op *rw_op; |
1190 | struct iovec inline_vec, *iovec = &inline_vec; | ||
1002 | 1191 | ||
1003 | switch (req->ki_opcode) { | 1192 | switch (opcode) { |
1004 | case IOCB_CMD_PREAD: | 1193 | case IOCB_CMD_PREAD: |
1005 | case IOCB_CMD_PREADV: | 1194 | case IOCB_CMD_PREADV: |
1006 | mode = FMODE_READ; | 1195 | mode = FMODE_READ; |
@@ -1021,21 +1210,38 @@ rw_common: | |||
1021 | if (!rw_op) | 1210 | if (!rw_op) |
1022 | return -EINVAL; | 1211 | return -EINVAL; |
1023 | 1212 | ||
1024 | ret = (req->ki_opcode == IOCB_CMD_PREADV || | 1213 | ret = (opcode == IOCB_CMD_PREADV || |
1025 | req->ki_opcode == IOCB_CMD_PWRITEV) | 1214 | opcode == IOCB_CMD_PWRITEV) |
1026 | ? aio_setup_vectored_rw(rw, req, compat) | 1215 | ? aio_setup_vectored_rw(req, rw, buf, &nr_segs, |
1027 | : aio_setup_single_vector(rw, req); | 1216 | &iovec, compat) |
1217 | : aio_setup_single_vector(req, rw, buf, &nr_segs, | ||
1218 | iovec); | ||
1028 | if (ret) | 1219 | if (ret) |
1029 | return ret; | 1220 | return ret; |
1030 | 1221 | ||
1031 | ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes); | 1222 | ret = rw_verify_area(rw, file, &req->ki_pos, req->ki_nbytes); |
1032 | if (ret < 0) | 1223 | if (ret < 0) { |
1224 | if (iovec != &inline_vec) | ||
1225 | kfree(iovec); | ||
1033 | return ret; | 1226 | return ret; |
1227 | } | ||
1034 | 1228 | ||
1035 | req->ki_nbytes = ret; | 1229 | req->ki_nbytes = ret; |
1036 | req->ki_left = ret; | ||
1037 | 1230 | ||
1038 | ret = aio_rw_vect_retry(req, rw, rw_op); | 1231 | /* XXX: move/kill - rw_verify_area()? */ |
1232 | /* This matches the pread()/pwrite() logic */ | ||
1233 | if (req->ki_pos < 0) { | ||
1234 | ret = -EINVAL; | ||
1235 | break; | ||
1236 | } | ||
1237 | |||
1238 | if (rw == WRITE) | ||
1239 | file_start_write(file); | ||
1240 | |||
1241 | ret = rw_op(req, iovec, nr_segs, req->ki_pos); | ||
1242 | |||
1243 | if (rw == WRITE) | ||
1244 | file_end_write(file); | ||
1039 | break; | 1245 | break; |
1040 | 1246 | ||
1041 | case IOCB_CMD_FDSYNC: | 1247 | case IOCB_CMD_FDSYNC: |
@@ -1057,6 +1263,9 @@ rw_common: | |||
1057 | return -EINVAL; | 1263 | return -EINVAL; |
1058 | } | 1264 | } |
1059 | 1265 | ||
1266 | if (iovec != &inline_vec) | ||
1267 | kfree(iovec); | ||
1268 | |||
1060 | if (ret != -EIOCBQUEUED) { | 1269 | if (ret != -EIOCBQUEUED) { |
1061 | /* | 1270 | /* |
1062 | * There's no easy way to restart the syscall since other AIO's | 1271 | * There's no easy way to restart the syscall since other AIO's |
@@ -1128,21 +1337,18 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, | |||
1128 | req->ki_obj.user = user_iocb; | 1337 | req->ki_obj.user = user_iocb; |
1129 | req->ki_user_data = iocb->aio_data; | 1338 | req->ki_user_data = iocb->aio_data; |
1130 | req->ki_pos = iocb->aio_offset; | 1339 | req->ki_pos = iocb->aio_offset; |
1340 | req->ki_nbytes = iocb->aio_nbytes; | ||
1131 | 1341 | ||
1132 | req->ki_buf = (char __user *)(unsigned long)iocb->aio_buf; | 1342 | ret = aio_run_iocb(req, iocb->aio_lio_opcode, |
1133 | req->ki_left = req->ki_nbytes = iocb->aio_nbytes; | 1343 | (char __user *)(unsigned long)iocb->aio_buf, |
1134 | req->ki_opcode = iocb->aio_lio_opcode; | 1344 | compat); |
1135 | |||
1136 | ret = aio_run_iocb(req, compat); | ||
1137 | if (ret) | 1345 | if (ret) |
1138 | goto out_put_req; | 1346 | goto out_put_req; |
1139 | 1347 | ||
1140 | aio_put_req(req); /* drop extra ref to req */ | ||
1141 | return 0; | 1348 | return 0; |
1142 | out_put_req: | 1349 | out_put_req: |
1143 | atomic_dec(&ctx->reqs_active); | 1350 | put_reqs_available(ctx, 1); |
1144 | aio_put_req(req); /* drop extra ref to req */ | 1351 | kiocb_free(req); |
1145 | aio_put_req(req); /* drop i/o ref to req */ | ||
1146 | return ret; | 1352 | return ret; |
1147 | } | 1353 | } |
1148 | 1354 | ||
@@ -1195,7 +1401,7 @@ long do_io_submit(aio_context_t ctx_id, long nr, | |||
1195 | } | 1401 | } |
1196 | blk_finish_plug(&plug); | 1402 | blk_finish_plug(&plug); |
1197 | 1403 | ||
1198 | put_ioctx(ctx); | 1404 | percpu_ref_put(&ctx->users); |
1199 | return i ? i : ret; | 1405 | return i ? i : ret; |
1200 | } | 1406 | } |
1201 | 1407 | ||
@@ -1252,7 +1458,6 @@ static struct kiocb *lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, | |||
1252 | SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, | 1458 | SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, |
1253 | struct io_event __user *, result) | 1459 | struct io_event __user *, result) |
1254 | { | 1460 | { |
1255 | struct io_event res; | ||
1256 | struct kioctx *ctx; | 1461 | struct kioctx *ctx; |
1257 | struct kiocb *kiocb; | 1462 | struct kiocb *kiocb; |
1258 | u32 key; | 1463 | u32 key; |
@@ -1270,21 +1475,22 @@ SYSCALL_DEFINE3(io_cancel, aio_context_t, ctx_id, struct iocb __user *, iocb, | |||
1270 | 1475 | ||
1271 | kiocb = lookup_kiocb(ctx, iocb, key); | 1476 | kiocb = lookup_kiocb(ctx, iocb, key); |
1272 | if (kiocb) | 1477 | if (kiocb) |
1273 | ret = kiocb_cancel(ctx, kiocb, &res); | 1478 | ret = kiocb_cancel(ctx, kiocb); |
1274 | else | 1479 | else |
1275 | ret = -EINVAL; | 1480 | ret = -EINVAL; |
1276 | 1481 | ||
1277 | spin_unlock_irq(&ctx->ctx_lock); | 1482 | spin_unlock_irq(&ctx->ctx_lock); |
1278 | 1483 | ||
1279 | if (!ret) { | 1484 | if (!ret) { |
1280 | /* Cancellation succeeded -- copy the result | 1485 | /* |
1281 | * into the user's buffer. | 1486 | * The result argument is no longer used - the io_event is |
1487 | * always delivered via the ring buffer. -EINPROGRESS indicates | ||
1488 | * cancellation is progress: | ||
1282 | */ | 1489 | */ |
1283 | if (copy_to_user(result, &res, sizeof(res))) | 1490 | ret = -EINPROGRESS; |
1284 | ret = -EFAULT; | ||
1285 | } | 1491 | } |
1286 | 1492 | ||
1287 | put_ioctx(ctx); | 1493 | percpu_ref_put(&ctx->users); |
1288 | 1494 | ||
1289 | return ret; | 1495 | return ret; |
1290 | } | 1496 | } |
@@ -1313,7 +1519,7 @@ SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id, | |||
1313 | if (likely(ioctx)) { | 1519 | if (likely(ioctx)) { |
1314 | if (likely(min_nr <= nr && min_nr >= 0)) | 1520 | if (likely(min_nr <= nr && min_nr >= 0)) |
1315 | ret = read_events(ioctx, min_nr, nr, events, timeout); | 1521 | ret = read_events(ioctx, min_nr, nr, events, timeout); |
1316 | put_ioctx(ioctx); | 1522 | percpu_ref_put(&ioctx->users); |
1317 | } | 1523 | } |
1318 | return ret; | 1524 | return ret; |
1319 | } | 1525 | } |
diff --git a/fs/anon_inodes.c b/fs/anon_inodes.c index 47a65df8c871..85c961849953 100644 --- a/fs/anon_inodes.c +++ b/fs/anon_inodes.c | |||
@@ -109,6 +109,72 @@ static struct file_system_type anon_inode_fs_type = { | |||
109 | }; | 109 | }; |
110 | 110 | ||
111 | /** | 111 | /** |
112 | * anon_inode_getfile_private - creates a new file instance by hooking it up to an | ||
113 | * anonymous inode, and a dentry that describe the "class" | ||
114 | * of the file | ||
115 | * | ||
116 | * @name: [in] name of the "class" of the new file | ||
117 | * @fops: [in] file operations for the new file | ||
118 | * @priv: [in] private data for the new file (will be file's private_data) | ||
119 | * @flags: [in] flags | ||
120 | * | ||
121 | * | ||
122 | * Similar to anon_inode_getfile, but each file holds a single inode. | ||
123 | * | ||
124 | */ | ||
125 | struct file *anon_inode_getfile_private(const char *name, | ||
126 | const struct file_operations *fops, | ||
127 | void *priv, int flags) | ||
128 | { | ||
129 | struct qstr this; | ||
130 | struct path path; | ||
131 | struct file *file; | ||
132 | struct inode *inode; | ||
133 | |||
134 | if (fops->owner && !try_module_get(fops->owner)) | ||
135 | return ERR_PTR(-ENOENT); | ||
136 | |||
137 | inode = anon_inode_mkinode(anon_inode_mnt->mnt_sb); | ||
138 | if (IS_ERR(inode)) { | ||
139 | file = ERR_PTR(-ENOMEM); | ||
140 | goto err_module; | ||
141 | } | ||
142 | |||
143 | /* | ||
144 | * Link the inode to a directory entry by creating a unique name | ||
145 | * using the inode sequence number. | ||
146 | */ | ||
147 | file = ERR_PTR(-ENOMEM); | ||
148 | this.name = name; | ||
149 | this.len = strlen(name); | ||
150 | this.hash = 0; | ||
151 | path.dentry = d_alloc_pseudo(anon_inode_mnt->mnt_sb, &this); | ||
152 | if (!path.dentry) | ||
153 | goto err_module; | ||
154 | |||
155 | path.mnt = mntget(anon_inode_mnt); | ||
156 | |||
157 | d_instantiate(path.dentry, inode); | ||
158 | |||
159 | file = alloc_file(&path, OPEN_FMODE(flags), fops); | ||
160 | if (IS_ERR(file)) | ||
161 | goto err_dput; | ||
162 | |||
163 | file->f_mapping = inode->i_mapping; | ||
164 | file->f_flags = flags & (O_ACCMODE | O_NONBLOCK); | ||
165 | file->private_data = priv; | ||
166 | |||
167 | return file; | ||
168 | |||
169 | err_dput: | ||
170 | path_put(&path); | ||
171 | err_module: | ||
172 | module_put(fops->owner); | ||
173 | return file; | ||
174 | } | ||
175 | EXPORT_SYMBOL_GPL(anon_inode_getfile_private); | ||
176 | |||
177 | /** | ||
112 | * anon_inode_getfile - creates a new file instance by hooking it up to an | 178 | * anon_inode_getfile - creates a new file instance by hooking it up to an |
113 | * anonymous inode, and a dentry that describe the "class" | 179 | * anonymous inode, and a dentry that describe the "class" |
114 | * of the file | 180 | * of the file |
diff --git a/fs/block_dev.c b/fs/block_dev.c index 1173a4ee0830..1e86823a9cbd 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -592,7 +592,7 @@ static struct block_device *bd_acquire(struct inode *inode) | |||
592 | return bdev; | 592 | return bdev; |
593 | } | 593 | } |
594 | 594 | ||
595 | static inline int sb_is_blkdev_sb(struct super_block *sb) | 595 | int sb_is_blkdev_sb(struct super_block *sb) |
596 | { | 596 | { |
597 | return sb == blockdev_superblock; | 597 | return sb == blockdev_superblock; |
598 | } | 598 | } |
@@ -1542,7 +1542,7 @@ static ssize_t blkdev_aio_read(struct kiocb *iocb, const struct iovec *iov, | |||
1542 | return 0; | 1542 | return 0; |
1543 | 1543 | ||
1544 | size -= pos; | 1544 | size -= pos; |
1545 | if (size < iocb->ki_left) | 1545 | if (size < iocb->ki_nbytes) |
1546 | nr_segs = iov_shorten((struct iovec *)iov, nr_segs, size); | 1546 | nr_segs = iov_shorten((struct iovec *)iov, nr_segs, size); |
1547 | return generic_file_aio_read(iocb, iov, nr_segs, pos); | 1547 | return generic_file_aio_read(iocb, iov, nr_segs, pos); |
1548 | } | 1548 | } |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index d044b35ce228..eb955b525e55 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -3379,6 +3379,9 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, | |||
3379 | return rc; | 3379 | return rc; |
3380 | } | 3380 | } |
3381 | 3381 | ||
3382 | /* | ||
3383 | * cifs_readpage_worker must be called with the page pinned | ||
3384 | */ | ||
3382 | static int cifs_readpage_worker(struct file *file, struct page *page, | 3385 | static int cifs_readpage_worker(struct file *file, struct page *page, |
3383 | loff_t *poffset) | 3386 | loff_t *poffset) |
3384 | { | 3387 | { |
@@ -3390,7 +3393,6 @@ static int cifs_readpage_worker(struct file *file, struct page *page, | |||
3390 | if (rc == 0) | 3393 | if (rc == 0) |
3391 | goto read_complete; | 3394 | goto read_complete; |
3392 | 3395 | ||
3393 | page_cache_get(page); | ||
3394 | read_data = kmap(page); | 3396 | read_data = kmap(page); |
3395 | /* for reads over a certain size could initiate async read ahead */ | 3397 | /* for reads over a certain size could initiate async read ahead */ |
3396 | 3398 | ||
@@ -3417,7 +3419,7 @@ static int cifs_readpage_worker(struct file *file, struct page *page, | |||
3417 | 3419 | ||
3418 | io_error: | 3420 | io_error: |
3419 | kunmap(page); | 3421 | kunmap(page); |
3420 | page_cache_release(page); | 3422 | unlock_page(page); |
3421 | 3423 | ||
3422 | read_complete: | 3424 | read_complete: |
3423 | return rc; | 3425 | return rc; |
@@ -3442,8 +3444,6 @@ static int cifs_readpage(struct file *file, struct page *page) | |||
3442 | 3444 | ||
3443 | rc = cifs_readpage_worker(file, page, &offset); | 3445 | rc = cifs_readpage_worker(file, page, &offset); |
3444 | 3446 | ||
3445 | unlock_page(page); | ||
3446 | |||
3447 | free_xid(xid); | 3447 | free_xid(xid); |
3448 | return rc; | 3448 | return rc; |
3449 | } | 3449 | } |
@@ -3497,6 +3497,7 @@ static int cifs_write_begin(struct file *file, struct address_space *mapping, | |||
3497 | loff_t pos, unsigned len, unsigned flags, | 3497 | loff_t pos, unsigned len, unsigned flags, |
3498 | struct page **pagep, void **fsdata) | 3498 | struct page **pagep, void **fsdata) |
3499 | { | 3499 | { |
3500 | int oncethru = 0; | ||
3500 | pgoff_t index = pos >> PAGE_CACHE_SHIFT; | 3501 | pgoff_t index = pos >> PAGE_CACHE_SHIFT; |
3501 | loff_t offset = pos & (PAGE_CACHE_SIZE - 1); | 3502 | loff_t offset = pos & (PAGE_CACHE_SIZE - 1); |
3502 | loff_t page_start = pos & PAGE_MASK; | 3503 | loff_t page_start = pos & PAGE_MASK; |
@@ -3506,6 +3507,7 @@ static int cifs_write_begin(struct file *file, struct address_space *mapping, | |||
3506 | 3507 | ||
3507 | cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len); | 3508 | cifs_dbg(FYI, "write_begin from %lld len %d\n", (long long)pos, len); |
3508 | 3509 | ||
3510 | start: | ||
3509 | page = grab_cache_page_write_begin(mapping, index, flags); | 3511 | page = grab_cache_page_write_begin(mapping, index, flags); |
3510 | if (!page) { | 3512 | if (!page) { |
3511 | rc = -ENOMEM; | 3513 | rc = -ENOMEM; |
@@ -3547,13 +3549,16 @@ static int cifs_write_begin(struct file *file, struct address_space *mapping, | |||
3547 | } | 3549 | } |
3548 | } | 3550 | } |
3549 | 3551 | ||
3550 | if ((file->f_flags & O_ACCMODE) != O_WRONLY) { | 3552 | if ((file->f_flags & O_ACCMODE) != O_WRONLY && !oncethru) { |
3551 | /* | 3553 | /* |
3552 | * might as well read a page, it is fast enough. If we get | 3554 | * might as well read a page, it is fast enough. If we get |
3553 | * an error, we don't need to return it. cifs_write_end will | 3555 | * an error, we don't need to return it. cifs_write_end will |
3554 | * do a sync write instead since PG_uptodate isn't set. | 3556 | * do a sync write instead since PG_uptodate isn't set. |
3555 | */ | 3557 | */ |
3556 | cifs_readpage_worker(file, page, &page_start); | 3558 | cifs_readpage_worker(file, page, &page_start); |
3559 | page_cache_release(page); | ||
3560 | oncethru = 1; | ||
3561 | goto start; | ||
3557 | } else { | 3562 | } else { |
3558 | /* we could try using another file handle if there is one - | 3563 | /* we could try using another file handle if there is one - |
3559 | but how would we lock it to prevent close of that handle | 3564 | but how would we lock it to prevent close of that handle |
diff --git a/fs/dcache.c b/fs/dcache.c index 1bd4614ce93b..41000305d716 100644 --- a/fs/dcache.c +++ b/fs/dcache.c | |||
@@ -357,15 +357,80 @@ static void dentry_unlink_inode(struct dentry * dentry) | |||
357 | } | 357 | } |
358 | 358 | ||
359 | /* | 359 | /* |
360 | * The DCACHE_LRU_LIST bit is set whenever the 'd_lru' entry | ||
361 | * is in use - which includes both the "real" per-superblock | ||
362 | * LRU list _and_ the DCACHE_SHRINK_LIST use. | ||
363 | * | ||
364 | * The DCACHE_SHRINK_LIST bit is set whenever the dentry is | ||
365 | * on the shrink list (ie not on the superblock LRU list). | ||
366 | * | ||
367 | * The per-cpu "nr_dentry_unused" counters are updated with | ||
368 | * the DCACHE_LRU_LIST bit. | ||
369 | * | ||
370 | * These helper functions make sure we always follow the | ||
371 | * rules. d_lock must be held by the caller. | ||
372 | */ | ||
373 | #define D_FLAG_VERIFY(dentry,x) WARN_ON_ONCE(((dentry)->d_flags & (DCACHE_LRU_LIST | DCACHE_SHRINK_LIST)) != (x)) | ||
374 | static void d_lru_add(struct dentry *dentry) | ||
375 | { | ||
376 | D_FLAG_VERIFY(dentry, 0); | ||
377 | dentry->d_flags |= DCACHE_LRU_LIST; | ||
378 | this_cpu_inc(nr_dentry_unused); | ||
379 | WARN_ON_ONCE(!list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)); | ||
380 | } | ||
381 | |||
382 | static void d_lru_del(struct dentry *dentry) | ||
383 | { | ||
384 | D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); | ||
385 | dentry->d_flags &= ~DCACHE_LRU_LIST; | ||
386 | this_cpu_dec(nr_dentry_unused); | ||
387 | WARN_ON_ONCE(!list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)); | ||
388 | } | ||
389 | |||
390 | static void d_shrink_del(struct dentry *dentry) | ||
391 | { | ||
392 | D_FLAG_VERIFY(dentry, DCACHE_SHRINK_LIST | DCACHE_LRU_LIST); | ||
393 | list_del_init(&dentry->d_lru); | ||
394 | dentry->d_flags &= ~(DCACHE_SHRINK_LIST | DCACHE_LRU_LIST); | ||
395 | this_cpu_dec(nr_dentry_unused); | ||
396 | } | ||
397 | |||
398 | static void d_shrink_add(struct dentry *dentry, struct list_head *list) | ||
399 | { | ||
400 | D_FLAG_VERIFY(dentry, 0); | ||
401 | list_add(&dentry->d_lru, list); | ||
402 | dentry->d_flags |= DCACHE_SHRINK_LIST | DCACHE_LRU_LIST; | ||
403 | this_cpu_inc(nr_dentry_unused); | ||
404 | } | ||
405 | |||
406 | /* | ||
407 | * These can only be called under the global LRU lock, ie during the | ||
408 | * callback for freeing the LRU list. "isolate" removes it from the | ||
409 | * LRU lists entirely, while shrink_move moves it to the indicated | ||
410 | * private list. | ||
411 | */ | ||
412 | static void d_lru_isolate(struct dentry *dentry) | ||
413 | { | ||
414 | D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); | ||
415 | dentry->d_flags &= ~DCACHE_LRU_LIST; | ||
416 | this_cpu_dec(nr_dentry_unused); | ||
417 | list_del_init(&dentry->d_lru); | ||
418 | } | ||
419 | |||
420 | static void d_lru_shrink_move(struct dentry *dentry, struct list_head *list) | ||
421 | { | ||
422 | D_FLAG_VERIFY(dentry, DCACHE_LRU_LIST); | ||
423 | dentry->d_flags |= DCACHE_SHRINK_LIST; | ||
424 | list_move_tail(&dentry->d_lru, list); | ||
425 | } | ||
426 | |||
427 | /* | ||
360 | * dentry_lru_(add|del)_list) must be called with d_lock held. | 428 | * dentry_lru_(add|del)_list) must be called with d_lock held. |
361 | */ | 429 | */ |
362 | static void dentry_lru_add(struct dentry *dentry) | 430 | static void dentry_lru_add(struct dentry *dentry) |
363 | { | 431 | { |
364 | if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST))) { | 432 | if (unlikely(!(dentry->d_flags & DCACHE_LRU_LIST))) |
365 | if (list_lru_add(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)) | 433 | d_lru_add(dentry); |
366 | this_cpu_inc(nr_dentry_unused); | ||
367 | dentry->d_flags |= DCACHE_LRU_LIST; | ||
368 | } | ||
369 | } | 434 | } |
370 | 435 | ||
371 | /* | 436 | /* |
@@ -377,15 +442,11 @@ static void dentry_lru_add(struct dentry *dentry) | |||
377 | */ | 442 | */ |
378 | static void dentry_lru_del(struct dentry *dentry) | 443 | static void dentry_lru_del(struct dentry *dentry) |
379 | { | 444 | { |
380 | if (dentry->d_flags & DCACHE_SHRINK_LIST) { | 445 | if (dentry->d_flags & DCACHE_LRU_LIST) { |
381 | list_del_init(&dentry->d_lru); | 446 | if (dentry->d_flags & DCACHE_SHRINK_LIST) |
382 | dentry->d_flags &= ~DCACHE_SHRINK_LIST; | 447 | return d_shrink_del(dentry); |
383 | return; | 448 | d_lru_del(dentry); |
384 | } | 449 | } |
385 | |||
386 | if (list_lru_del(&dentry->d_sb->s_dentry_lru, &dentry->d_lru)) | ||
387 | this_cpu_dec(nr_dentry_unused); | ||
388 | dentry->d_flags &= ~DCACHE_LRU_LIST; | ||
389 | } | 450 | } |
390 | 451 | ||
391 | /** | 452 | /** |
@@ -837,6 +898,12 @@ static void shrink_dentry_list(struct list_head *list) | |||
837 | dentry = list_entry_rcu(list->prev, struct dentry, d_lru); | 898 | dentry = list_entry_rcu(list->prev, struct dentry, d_lru); |
838 | if (&dentry->d_lru == list) | 899 | if (&dentry->d_lru == list) |
839 | break; /* empty */ | 900 | break; /* empty */ |
901 | |||
902 | /* | ||
903 | * Get the dentry lock, and re-verify that the dentry is | ||
904 | * this on the shrinking list. If it is, we know that | ||
905 | * DCACHE_SHRINK_LIST and DCACHE_LRU_LIST are set. | ||
906 | */ | ||
840 | spin_lock(&dentry->d_lock); | 907 | spin_lock(&dentry->d_lock); |
841 | if (dentry != list_entry(list->prev, struct dentry, d_lru)) { | 908 | if (dentry != list_entry(list->prev, struct dentry, d_lru)) { |
842 | spin_unlock(&dentry->d_lock); | 909 | spin_unlock(&dentry->d_lock); |
@@ -848,8 +915,7 @@ static void shrink_dentry_list(struct list_head *list) | |||
848 | * to the LRU here, so we can simply remove it from the list | 915 | * to the LRU here, so we can simply remove it from the list |
849 | * here regardless of whether it is referenced or not. | 916 | * here regardless of whether it is referenced or not. |
850 | */ | 917 | */ |
851 | list_del_init(&dentry->d_lru); | 918 | d_shrink_del(dentry); |
852 | dentry->d_flags &= ~DCACHE_SHRINK_LIST; | ||
853 | 919 | ||
854 | /* | 920 | /* |
855 | * We found an inuse dentry which was not removed from | 921 | * We found an inuse dentry which was not removed from |
@@ -861,12 +927,20 @@ static void shrink_dentry_list(struct list_head *list) | |||
861 | } | 927 | } |
862 | rcu_read_unlock(); | 928 | rcu_read_unlock(); |
863 | 929 | ||
930 | /* | ||
931 | * If 'try_to_prune()' returns a dentry, it will | ||
932 | * be the same one we passed in, and d_lock will | ||
933 | * have been held the whole time, so it will not | ||
934 | * have been added to any other lists. We failed | ||
935 | * to get the inode lock. | ||
936 | * | ||
937 | * We just add it back to the shrink list. | ||
938 | */ | ||
864 | dentry = try_prune_one_dentry(dentry); | 939 | dentry = try_prune_one_dentry(dentry); |
865 | 940 | ||
866 | rcu_read_lock(); | 941 | rcu_read_lock(); |
867 | if (dentry) { | 942 | if (dentry) { |
868 | dentry->d_flags |= DCACHE_SHRINK_LIST; | 943 | d_shrink_add(dentry, list); |
869 | list_add(&dentry->d_lru, list); | ||
870 | spin_unlock(&dentry->d_lock); | 944 | spin_unlock(&dentry->d_lock); |
871 | } | 945 | } |
872 | } | 946 | } |
@@ -894,7 +968,7 @@ dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg) | |||
894 | * another pass through the LRU. | 968 | * another pass through the LRU. |
895 | */ | 969 | */ |
896 | if (dentry->d_lockref.count) { | 970 | if (dentry->d_lockref.count) { |
897 | list_del_init(&dentry->d_lru); | 971 | d_lru_isolate(dentry); |
898 | spin_unlock(&dentry->d_lock); | 972 | spin_unlock(&dentry->d_lock); |
899 | return LRU_REMOVED; | 973 | return LRU_REMOVED; |
900 | } | 974 | } |
@@ -925,9 +999,7 @@ dentry_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg) | |||
925 | return LRU_ROTATE; | 999 | return LRU_ROTATE; |
926 | } | 1000 | } |
927 | 1001 | ||
928 | dentry->d_flags |= DCACHE_SHRINK_LIST; | 1002 | d_lru_shrink_move(dentry, freeable); |
929 | list_move_tail(&dentry->d_lru, freeable); | ||
930 | this_cpu_dec(nr_dentry_unused); | ||
931 | spin_unlock(&dentry->d_lock); | 1003 | spin_unlock(&dentry->d_lock); |
932 | 1004 | ||
933 | return LRU_REMOVED; | 1005 | return LRU_REMOVED; |
@@ -972,9 +1044,7 @@ static enum lru_status dentry_lru_isolate_shrink(struct list_head *item, | |||
972 | if (!spin_trylock(&dentry->d_lock)) | 1044 | if (!spin_trylock(&dentry->d_lock)) |
973 | return LRU_SKIP; | 1045 | return LRU_SKIP; |
974 | 1046 | ||
975 | dentry->d_flags |= DCACHE_SHRINK_LIST; | 1047 | d_lru_shrink_move(dentry, freeable); |
976 | list_move_tail(&dentry->d_lru, freeable); | ||
977 | this_cpu_dec(nr_dentry_unused); | ||
978 | spin_unlock(&dentry->d_lock); | 1048 | spin_unlock(&dentry->d_lock); |
979 | 1049 | ||
980 | return LRU_REMOVED; | 1050 | return LRU_REMOVED; |
@@ -1362,9 +1432,13 @@ static enum d_walk_ret select_collect(void *_data, struct dentry *dentry) | |||
1362 | if (dentry->d_lockref.count) { | 1432 | if (dentry->d_lockref.count) { |
1363 | dentry_lru_del(dentry); | 1433 | dentry_lru_del(dentry); |
1364 | } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { | 1434 | } else if (!(dentry->d_flags & DCACHE_SHRINK_LIST)) { |
1365 | dentry_lru_del(dentry); | 1435 | /* |
1366 | list_add_tail(&dentry->d_lru, &data->dispose); | 1436 | * We can't use d_lru_shrink_move() because we |
1367 | dentry->d_flags |= DCACHE_SHRINK_LIST; | 1437 | * need to get the global LRU lock and do the |
1438 | * LRU accounting. | ||
1439 | */ | ||
1440 | d_lru_del(dentry); | ||
1441 | d_shrink_add(dentry, &data->dispose); | ||
1368 | data->found++; | 1442 | data->found++; |
1369 | ret = D_WALK_NORETRY; | 1443 | ret = D_WALK_NORETRY; |
1370 | } | 1444 | } |
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 30f6f27d5a59..9f4935b8f208 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -69,7 +69,7 @@ static inline struct backing_dev_info *inode_to_bdi(struct inode *inode) | |||
69 | { | 69 | { |
70 | struct super_block *sb = inode->i_sb; | 70 | struct super_block *sb = inode->i_sb; |
71 | 71 | ||
72 | if (strcmp(sb->s_type->name, "bdev") == 0) | 72 | if (sb_is_blkdev_sb(sb)) |
73 | return inode->i_mapping->backing_dev_info; | 73 | return inode->i_mapping->backing_dev_info; |
74 | 74 | ||
75 | return sb->s_bdi; | 75 | return sb->s_bdi; |
@@ -251,11 +251,13 @@ static int move_expired_inodes(struct list_head *delaying_queue, | |||
251 | if (work->older_than_this && | 251 | if (work->older_than_this && |
252 | inode_dirtied_after(inode, *work->older_than_this)) | 252 | inode_dirtied_after(inode, *work->older_than_this)) |
253 | break; | 253 | break; |
254 | list_move(&inode->i_wb_list, &tmp); | ||
255 | moved++; | ||
256 | if (sb_is_blkdev_sb(inode->i_sb)) | ||
257 | continue; | ||
254 | if (sb && sb != inode->i_sb) | 258 | if (sb && sb != inode->i_sb) |
255 | do_sb_sort = 1; | 259 | do_sb_sort = 1; |
256 | sb = inode->i_sb; | 260 | sb = inode->i_sb; |
257 | list_move(&inode->i_wb_list, &tmp); | ||
258 | moved++; | ||
259 | } | 261 | } |
260 | 262 | ||
261 | /* just one sb in list, splice to dispatch_queue and we're done */ | 263 | /* just one sb in list, splice to dispatch_queue and we're done */ |
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index 0bd7a55a5f07..91ff089d3412 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
@@ -130,7 +130,6 @@ ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_ | |||
130 | 130 | ||
131 | return -EINVAL; | 131 | return -EINVAL; |
132 | #else | 132 | #else |
133 | VM_BUG_ON(iocb->ki_left != PAGE_SIZE); | ||
134 | VM_BUG_ON(iocb->ki_nbytes != PAGE_SIZE); | 133 | VM_BUG_ON(iocb->ki_nbytes != PAGE_SIZE); |
135 | 134 | ||
136 | if (rw == READ || rw == KERNEL_READ) | 135 | if (rw == READ || rw == KERNEL_READ) |
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 4f8197caa487..d71903c6068b 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c | |||
@@ -2242,7 +2242,7 @@ static ssize_t ocfs2_file_aio_write(struct kiocb *iocb, | |||
2242 | file->f_path.dentry->d_name.name, | 2242 | file->f_path.dentry->d_name.name, |
2243 | (unsigned int)nr_segs); | 2243 | (unsigned int)nr_segs); |
2244 | 2244 | ||
2245 | if (iocb->ki_left == 0) | 2245 | if (iocb->ki_nbytes == 0) |
2246 | return 0; | 2246 | return 0; |
2247 | 2247 | ||
2248 | appending = file->f_flags & O_APPEND ? 1 : 0; | 2248 | appending = file->f_flags & O_APPEND ? 1 : 0; |
@@ -2293,7 +2293,7 @@ relock: | |||
2293 | 2293 | ||
2294 | can_do_direct = direct_io; | 2294 | can_do_direct = direct_io; |
2295 | ret = ocfs2_prepare_inode_for_write(file, ppos, | 2295 | ret = ocfs2_prepare_inode_for_write(file, ppos, |
2296 | iocb->ki_left, appending, | 2296 | iocb->ki_nbytes, appending, |
2297 | &can_do_direct, &has_refcount); | 2297 | &can_do_direct, &has_refcount); |
2298 | if (ret < 0) { | 2298 | if (ret < 0) { |
2299 | mlog_errno(ret); | 2299 | mlog_errno(ret); |
@@ -2301,7 +2301,7 @@ relock: | |||
2301 | } | 2301 | } |
2302 | 2302 | ||
2303 | if (direct_io && !is_sync_kiocb(iocb)) | 2303 | if (direct_io && !is_sync_kiocb(iocb)) |
2304 | unaligned_dio = ocfs2_is_io_unaligned(inode, iocb->ki_left, | 2304 | unaligned_dio = ocfs2_is_io_unaligned(inode, iocb->ki_nbytes, |
2305 | *ppos); | 2305 | *ppos); |
2306 | 2306 | ||
2307 | /* | 2307 | /* |
diff --git a/fs/read_write.c b/fs/read_write.c index 122a3846d9e1..e3cd280b158c 100644 --- a/fs/read_write.c +++ b/fs/read_write.c | |||
@@ -367,7 +367,6 @@ ssize_t do_sync_read(struct file *filp, char __user *buf, size_t len, loff_t *pp | |||
367 | 367 | ||
368 | init_sync_kiocb(&kiocb, filp); | 368 | init_sync_kiocb(&kiocb, filp); |
369 | kiocb.ki_pos = *ppos; | 369 | kiocb.ki_pos = *ppos; |
370 | kiocb.ki_left = len; | ||
371 | kiocb.ki_nbytes = len; | 370 | kiocb.ki_nbytes = len; |
372 | 371 | ||
373 | ret = filp->f_op->aio_read(&kiocb, &iov, 1, kiocb.ki_pos); | 372 | ret = filp->f_op->aio_read(&kiocb, &iov, 1, kiocb.ki_pos); |
@@ -417,7 +416,6 @@ ssize_t do_sync_write(struct file *filp, const char __user *buf, size_t len, lof | |||
417 | 416 | ||
418 | init_sync_kiocb(&kiocb, filp); | 417 | init_sync_kiocb(&kiocb, filp); |
419 | kiocb.ki_pos = *ppos; | 418 | kiocb.ki_pos = *ppos; |
420 | kiocb.ki_left = len; | ||
421 | kiocb.ki_nbytes = len; | 419 | kiocb.ki_nbytes = len; |
422 | 420 | ||
423 | ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos); | 421 | ret = filp->f_op->aio_write(&kiocb, &iov, 1, kiocb.ki_pos); |
@@ -599,7 +597,6 @@ static ssize_t do_sync_readv_writev(struct file *filp, const struct iovec *iov, | |||
599 | 597 | ||
600 | init_sync_kiocb(&kiocb, filp); | 598 | init_sync_kiocb(&kiocb, filp); |
601 | kiocb.ki_pos = *ppos; | 599 | kiocb.ki_pos = *ppos; |
602 | kiocb.ki_left = len; | ||
603 | kiocb.ki_nbytes = len; | 600 | kiocb.ki_nbytes = len; |
604 | 601 | ||
605 | ret = fn(&kiocb, iov, nr_segs, kiocb.ki_pos); | 602 | ret = fn(&kiocb, iov, nr_segs, kiocb.ki_pos); |
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c index 7f60e900edff..6e025e02ffde 100644 --- a/fs/ubifs/debug.c +++ b/fs/ubifs/debug.c | |||
@@ -2587,10 +2587,11 @@ int dbg_leb_write(struct ubifs_info *c, int lnum, const void *buf, | |||
2587 | return -EROFS; | 2587 | return -EROFS; |
2588 | 2588 | ||
2589 | failing = power_cut_emulated(c, lnum, 1); | 2589 | failing = power_cut_emulated(c, lnum, 1); |
2590 | if (failing) | 2590 | if (failing) { |
2591 | len = corrupt_data(c, buf, len); | 2591 | len = corrupt_data(c, buf, len); |
2592 | ubifs_warn("actually write %d bytes to LEB %d:%d (the buffer was corrupted)", | 2592 | ubifs_warn("actually write %d bytes to LEB %d:%d (the buffer was corrupted)", |
2593 | len, lnum, offs); | 2593 | len, lnum, offs); |
2594 | } | ||
2594 | err = ubi_leb_write(c->ubi, lnum, buf, offs, len); | 2595 | err = ubi_leb_write(c->ubi, lnum, buf, offs, len); |
2595 | if (err) | 2596 | if (err) |
2596 | return err; | 2597 | return err; |
diff --git a/fs/udf/file.c b/fs/udf/file.c index 29569dd08168..c02a27a19c6d 100644 --- a/fs/udf/file.c +++ b/fs/udf/file.c | |||
@@ -141,7 +141,7 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov, | |||
141 | struct file *file = iocb->ki_filp; | 141 | struct file *file = iocb->ki_filp; |
142 | struct inode *inode = file_inode(file); | 142 | struct inode *inode = file_inode(file); |
143 | int err, pos; | 143 | int err, pos; |
144 | size_t count = iocb->ki_left; | 144 | size_t count = iocb->ki_nbytes; |
145 | struct udf_inode_info *iinfo = UDF_I(inode); | 145 | struct udf_inode_info *iinfo = UDF_I(inode); |
146 | 146 | ||
147 | down_write(&iinfo->i_data_sem); | 147 | down_write(&iinfo->i_data_sem); |
diff --git a/include/linux/aio.h b/include/linux/aio.h index 1bdf965339f9..d9c92daa3944 100644 --- a/include/linux/aio.h +++ b/include/linux/aio.h | |||
@@ -27,15 +27,13 @@ struct kiocb; | |||
27 | */ | 27 | */ |
28 | #define KIOCB_CANCELLED ((void *) (~0ULL)) | 28 | #define KIOCB_CANCELLED ((void *) (~0ULL)) |
29 | 29 | ||
30 | typedef int (kiocb_cancel_fn)(struct kiocb *, struct io_event *); | 30 | typedef int (kiocb_cancel_fn)(struct kiocb *); |
31 | 31 | ||
32 | struct kiocb { | 32 | struct kiocb { |
33 | atomic_t ki_users; | ||
34 | |||
35 | struct file *ki_filp; | 33 | struct file *ki_filp; |
36 | struct kioctx *ki_ctx; /* NULL for sync ops */ | 34 | struct kioctx *ki_ctx; /* NULL for sync ops */ |
37 | kiocb_cancel_fn *ki_cancel; | 35 | kiocb_cancel_fn *ki_cancel; |
38 | void (*ki_dtor)(struct kiocb *); | 36 | void *private; |
39 | 37 | ||
40 | union { | 38 | union { |
41 | void __user *user; | 39 | void __user *user; |
@@ -44,17 +42,7 @@ struct kiocb { | |||
44 | 42 | ||
45 | __u64 ki_user_data; /* user's data for completion */ | 43 | __u64 ki_user_data; /* user's data for completion */ |
46 | loff_t ki_pos; | 44 | loff_t ki_pos; |
47 | 45 | size_t ki_nbytes; /* copy of iocb->aio_nbytes */ | |
48 | void *private; | ||
49 | /* State that we remember to be able to restart/retry */ | ||
50 | unsigned short ki_opcode; | ||
51 | size_t ki_nbytes; /* copy of iocb->aio_nbytes */ | ||
52 | char __user *ki_buf; /* remaining iocb->aio_buf */ | ||
53 | size_t ki_left; /* remaining bytes */ | ||
54 | struct iovec ki_inline_vec; /* inline vector */ | ||
55 | struct iovec *ki_iovec; | ||
56 | unsigned long ki_nr_segs; | ||
57 | unsigned long ki_cur_seg; | ||
58 | 46 | ||
59 | struct list_head ki_list; /* the aio core uses this | 47 | struct list_head ki_list; /* the aio core uses this |
60 | * for cancellation */ | 48 | * for cancellation */ |
@@ -74,7 +62,6 @@ static inline bool is_sync_kiocb(struct kiocb *kiocb) | |||
74 | static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) | 62 | static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) |
75 | { | 63 | { |
76 | *kiocb = (struct kiocb) { | 64 | *kiocb = (struct kiocb) { |
77 | .ki_users = ATOMIC_INIT(1), | ||
78 | .ki_ctx = NULL, | 65 | .ki_ctx = NULL, |
79 | .ki_filp = filp, | 66 | .ki_filp = filp, |
80 | .ki_obj.tsk = current, | 67 | .ki_obj.tsk = current, |
@@ -84,7 +71,6 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) | |||
84 | /* prototypes */ | 71 | /* prototypes */ |
85 | #ifdef CONFIG_AIO | 72 | #ifdef CONFIG_AIO |
86 | extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb); | 73 | extern ssize_t wait_on_sync_kiocb(struct kiocb *iocb); |
87 | extern void aio_put_req(struct kiocb *iocb); | ||
88 | extern void aio_complete(struct kiocb *iocb, long res, long res2); | 74 | extern void aio_complete(struct kiocb *iocb, long res, long res2); |
89 | struct mm_struct; | 75 | struct mm_struct; |
90 | extern void exit_aio(struct mm_struct *mm); | 76 | extern void exit_aio(struct mm_struct *mm); |
@@ -93,7 +79,6 @@ extern long do_io_submit(aio_context_t ctx_id, long nr, | |||
93 | void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel); | 79 | void kiocb_set_cancel_fn(struct kiocb *req, kiocb_cancel_fn *cancel); |
94 | #else | 80 | #else |
95 | static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; } | 81 | static inline ssize_t wait_on_sync_kiocb(struct kiocb *iocb) { return 0; } |
96 | static inline void aio_put_req(struct kiocb *iocb) { } | ||
97 | static inline void aio_complete(struct kiocb *iocb, long res, long res2) { } | 82 | static inline void aio_complete(struct kiocb *iocb, long res, long res2) { } |
98 | struct mm_struct; | 83 | struct mm_struct; |
99 | static inline void exit_aio(struct mm_struct *mm) { } | 84 | static inline void exit_aio(struct mm_struct *mm) { } |
diff --git a/include/linux/anon_inodes.h b/include/linux/anon_inodes.h index 8013a45242fe..cf573c22b81e 100644 --- a/include/linux/anon_inodes.h +++ b/include/linux/anon_inodes.h | |||
@@ -13,6 +13,9 @@ struct file_operations; | |||
13 | struct file *anon_inode_getfile(const char *name, | 13 | struct file *anon_inode_getfile(const char *name, |
14 | const struct file_operations *fops, | 14 | const struct file_operations *fops, |
15 | void *priv, int flags); | 15 | void *priv, int flags); |
16 | struct file *anon_inode_getfile_private(const char *name, | ||
17 | const struct file_operations *fops, | ||
18 | void *priv, int flags); | ||
16 | int anon_inode_getfd(const char *name, const struct file_operations *fops, | 19 | int anon_inode_getfd(const char *name, const struct file_operations *fops, |
17 | void *priv, int flags); | 20 | void *priv, int flags); |
18 | 21 | ||
diff --git a/include/linux/fs.h b/include/linux/fs.h index a4acd3c61190..3f40547ba191 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -2069,6 +2069,7 @@ extern struct super_block *freeze_bdev(struct block_device *); | |||
2069 | extern void emergency_thaw_all(void); | 2069 | extern void emergency_thaw_all(void); |
2070 | extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); | 2070 | extern int thaw_bdev(struct block_device *bdev, struct super_block *sb); |
2071 | extern int fsync_bdev(struct block_device *); | 2071 | extern int fsync_bdev(struct block_device *); |
2072 | extern int sb_is_blkdev_sb(struct super_block *sb); | ||
2072 | #else | 2073 | #else |
2073 | static inline void bd_forget(struct inode *inode) {} | 2074 | static inline void bd_forget(struct inode *inode) {} |
2074 | static inline int sync_blockdev(struct block_device *bdev) { return 0; } | 2075 | static inline int sync_blockdev(struct block_device *bdev) { return 0; } |
@@ -2088,6 +2089,11 @@ static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb) | |||
2088 | static inline void iterate_bdevs(void (*f)(struct block_device *, void *), void *arg) | 2089 | static inline void iterate_bdevs(void (*f)(struct block_device *, void *), void *arg) |
2089 | { | 2090 | { |
2090 | } | 2091 | } |
2092 | |||
2093 | static inline int sb_is_blkdev_sb(struct super_block *sb) | ||
2094 | { | ||
2095 | return 0; | ||
2096 | } | ||
2091 | #endif | 2097 | #endif |
2092 | extern int sync_filesystem(struct super_block *); | 2098 | extern int sync_filesystem(struct super_block *); |
2093 | extern const struct file_operations def_blk_fops; | 2099 | extern const struct file_operations def_blk_fops; |
diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 6fe521420631..8d3c57fdf221 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h | |||
@@ -53,6 +53,9 @@ extern int migrate_vmas(struct mm_struct *mm, | |||
53 | extern void migrate_page_copy(struct page *newpage, struct page *page); | 53 | extern void migrate_page_copy(struct page *newpage, struct page *page); |
54 | extern int migrate_huge_page_move_mapping(struct address_space *mapping, | 54 | extern int migrate_huge_page_move_mapping(struct address_space *mapping, |
55 | struct page *newpage, struct page *page); | 55 | struct page *newpage, struct page *page); |
56 | extern int migrate_page_move_mapping(struct address_space *mapping, | ||
57 | struct page *newpage, struct page *page, | ||
58 | struct buffer_head *head, enum migrate_mode mode); | ||
56 | #else | 59 | #else |
57 | 60 | ||
58 | static inline void putback_lru_pages(struct list_head *l) {} | 61 | static inline void putback_lru_pages(struct list_head *l) {} |
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index faf4b7c1ad12..d9851eeb6e1d 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
@@ -322,6 +322,7 @@ struct mm_rss_stat { | |||
322 | atomic_long_t count[NR_MM_COUNTERS]; | 322 | atomic_long_t count[NR_MM_COUNTERS]; |
323 | }; | 323 | }; |
324 | 324 | ||
325 | struct kioctx_table; | ||
325 | struct mm_struct { | 326 | struct mm_struct { |
326 | struct vm_area_struct * mmap; /* list of VMAs */ | 327 | struct vm_area_struct * mmap; /* list of VMAs */ |
327 | struct rb_root mm_rb; | 328 | struct rb_root mm_rb; |
@@ -383,8 +384,8 @@ struct mm_struct { | |||
383 | 384 | ||
384 | struct core_state *core_state; /* coredumping support */ | 385 | struct core_state *core_state; /* coredumping support */ |
385 | #ifdef CONFIG_AIO | 386 | #ifdef CONFIG_AIO |
386 | spinlock_t ioctx_lock; | 387 | spinlock_t ioctx_lock; |
387 | struct hlist_head ioctx_list; | 388 | struct kioctx_table __rcu *ioctx_table; |
388 | #endif | 389 | #endif |
389 | #ifdef CONFIG_MM_OWNER | 390 | #ifdef CONFIG_MM_OWNER |
390 | /* | 391 | /* |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index bc95b2b391bf..97fbecdd7a40 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -758,6 +758,7 @@ | |||
758 | #define PCI_DEVICE_ID_HP_CISSE 0x323a | 758 | #define PCI_DEVICE_ID_HP_CISSE 0x323a |
759 | #define PCI_DEVICE_ID_HP_CISSF 0x323b | 759 | #define PCI_DEVICE_ID_HP_CISSF 0x323b |
760 | #define PCI_DEVICE_ID_HP_CISSH 0x323c | 760 | #define PCI_DEVICE_ID_HP_CISSH 0x323c |
761 | #define PCI_DEVICE_ID_HP_CISSI 0x3239 | ||
761 | #define PCI_DEVICE_ID_HP_ZX2_IOC 0x4031 | 762 | #define PCI_DEVICE_ID_HP_ZX2_IOC 0x4031 |
762 | 763 | ||
763 | #define PCI_VENDOR_ID_PCTECH 0x1042 | 764 | #define PCI_VENDOR_ID_PCTECH 0x1042 |
diff --git a/include/linux/slab.h b/include/linux/slab.h index 6c5cc0ea8713..74f105847d13 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -4,6 +4,8 @@ | |||
4 | * (C) SGI 2006, Christoph Lameter | 4 | * (C) SGI 2006, Christoph Lameter |
5 | * Cleaned up and restructured to ease the addition of alternative | 5 | * Cleaned up and restructured to ease the addition of alternative |
6 | * implementations of SLAB allocators. | 6 | * implementations of SLAB allocators. |
7 | * (C) Linux Foundation 2008-2013 | ||
8 | * Unified interface for all slab allocators | ||
7 | */ | 9 | */ |
8 | 10 | ||
9 | #ifndef _LINUX_SLAB_H | 11 | #ifndef _LINUX_SLAB_H |
@@ -94,6 +96,7 @@ | |||
94 | #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ | 96 | #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ |
95 | (unsigned long)ZERO_SIZE_PTR) | 97 | (unsigned long)ZERO_SIZE_PTR) |
96 | 98 | ||
99 | #include <linux/kmemleak.h> | ||
97 | 100 | ||
98 | struct mem_cgroup; | 101 | struct mem_cgroup; |
99 | /* | 102 | /* |
@@ -289,6 +292,57 @@ static __always_inline int kmalloc_index(size_t size) | |||
289 | } | 292 | } |
290 | #endif /* !CONFIG_SLOB */ | 293 | #endif /* !CONFIG_SLOB */ |
291 | 294 | ||
295 | void *__kmalloc(size_t size, gfp_t flags); | ||
296 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t flags); | ||
297 | |||
298 | #ifdef CONFIG_NUMA | ||
299 | void *__kmalloc_node(size_t size, gfp_t flags, int node); | ||
300 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | ||
301 | #else | ||
302 | static __always_inline void *__kmalloc_node(size_t size, gfp_t flags, int node) | ||
303 | { | ||
304 | return __kmalloc(size, flags); | ||
305 | } | ||
306 | |||
307 | static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) | ||
308 | { | ||
309 | return kmem_cache_alloc(s, flags); | ||
310 | } | ||
311 | #endif | ||
312 | |||
313 | #ifdef CONFIG_TRACING | ||
314 | extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t); | ||
315 | |||
316 | #ifdef CONFIG_NUMA | ||
317 | extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, | ||
318 | gfp_t gfpflags, | ||
319 | int node, size_t size); | ||
320 | #else | ||
321 | static __always_inline void * | ||
322 | kmem_cache_alloc_node_trace(struct kmem_cache *s, | ||
323 | gfp_t gfpflags, | ||
324 | int node, size_t size) | ||
325 | { | ||
326 | return kmem_cache_alloc_trace(s, gfpflags, size); | ||
327 | } | ||
328 | #endif /* CONFIG_NUMA */ | ||
329 | |||
330 | #else /* CONFIG_TRACING */ | ||
331 | static __always_inline void *kmem_cache_alloc_trace(struct kmem_cache *s, | ||
332 | gfp_t flags, size_t size) | ||
333 | { | ||
334 | return kmem_cache_alloc(s, flags); | ||
335 | } | ||
336 | |||
337 | static __always_inline void * | ||
338 | kmem_cache_alloc_node_trace(struct kmem_cache *s, | ||
339 | gfp_t gfpflags, | ||
340 | int node, size_t size) | ||
341 | { | ||
342 | return kmem_cache_alloc_node(s, gfpflags, node); | ||
343 | } | ||
344 | #endif /* CONFIG_TRACING */ | ||
345 | |||
292 | #ifdef CONFIG_SLAB | 346 | #ifdef CONFIG_SLAB |
293 | #include <linux/slab_def.h> | 347 | #include <linux/slab_def.h> |
294 | #endif | 348 | #endif |
@@ -297,9 +351,60 @@ static __always_inline int kmalloc_index(size_t size) | |||
297 | #include <linux/slub_def.h> | 351 | #include <linux/slub_def.h> |
298 | #endif | 352 | #endif |
299 | 353 | ||
300 | #ifdef CONFIG_SLOB | 354 | static __always_inline void * |
301 | #include <linux/slob_def.h> | 355 | kmalloc_order(size_t size, gfp_t flags, unsigned int order) |
356 | { | ||
357 | void *ret; | ||
358 | |||
359 | flags |= (__GFP_COMP | __GFP_KMEMCG); | ||
360 | ret = (void *) __get_free_pages(flags, order); | ||
361 | kmemleak_alloc(ret, size, 1, flags); | ||
362 | return ret; | ||
363 | } | ||
364 | |||
365 | #ifdef CONFIG_TRACING | ||
366 | extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); | ||
367 | #else | ||
368 | static __always_inline void * | ||
369 | kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) | ||
370 | { | ||
371 | return kmalloc_order(size, flags, order); | ||
372 | } | ||
373 | #endif | ||
374 | |||
375 | static __always_inline void *kmalloc_large(size_t size, gfp_t flags) | ||
376 | { | ||
377 | unsigned int order = get_order(size); | ||
378 | return kmalloc_order_trace(size, flags, order); | ||
379 | } | ||
380 | |||
381 | /** | ||
382 | * kmalloc - allocate memory | ||
383 | * @size: how many bytes of memory are required. | ||
384 | * @flags: the type of memory to allocate (see kcalloc). | ||
385 | * | ||
386 | * kmalloc is the normal method of allocating memory | ||
387 | * for objects smaller than page size in the kernel. | ||
388 | */ | ||
389 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | ||
390 | { | ||
391 | if (__builtin_constant_p(size)) { | ||
392 | if (size > KMALLOC_MAX_CACHE_SIZE) | ||
393 | return kmalloc_large(size, flags); | ||
394 | #ifndef CONFIG_SLOB | ||
395 | if (!(flags & GFP_DMA)) { | ||
396 | int index = kmalloc_index(size); | ||
397 | |||
398 | if (!index) | ||
399 | return ZERO_SIZE_PTR; | ||
400 | |||
401 | return kmem_cache_alloc_trace(kmalloc_caches[index], | ||
402 | flags, size); | ||
403 | } | ||
302 | #endif | 404 | #endif |
405 | } | ||
406 | return __kmalloc(size, flags); | ||
407 | } | ||
303 | 408 | ||
304 | /* | 409 | /* |
305 | * Determine size used for the nth kmalloc cache. | 410 | * Determine size used for the nth kmalloc cache. |
@@ -321,6 +426,23 @@ static __always_inline int kmalloc_size(int n) | |||
321 | return 0; | 426 | return 0; |
322 | } | 427 | } |
323 | 428 | ||
429 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | ||
430 | { | ||
431 | #ifndef CONFIG_SLOB | ||
432 | if (__builtin_constant_p(size) && | ||
433 | size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) { | ||
434 | int i = kmalloc_index(size); | ||
435 | |||
436 | if (!i) | ||
437 | return ZERO_SIZE_PTR; | ||
438 | |||
439 | return kmem_cache_alloc_node_trace(kmalloc_caches[i], | ||
440 | flags, node, size); | ||
441 | } | ||
442 | #endif | ||
443 | return __kmalloc_node(size, flags, node); | ||
444 | } | ||
445 | |||
324 | /* | 446 | /* |
325 | * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. | 447 | * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment. |
326 | * Intended for arches that get misalignment faults even for 64 bit integer | 448 | * Intended for arches that get misalignment faults even for 64 bit integer |
@@ -451,36 +573,6 @@ static inline void *kcalloc(size_t n, size_t size, gfp_t flags) | |||
451 | return kmalloc_array(n, size, flags | __GFP_ZERO); | 573 | return kmalloc_array(n, size, flags | __GFP_ZERO); |
452 | } | 574 | } |
453 | 575 | ||
454 | #if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB) | ||
455 | /** | ||
456 | * kmalloc_node - allocate memory from a specific node | ||
457 | * @size: how many bytes of memory are required. | ||
458 | * @flags: the type of memory to allocate (see kmalloc). | ||
459 | * @node: node to allocate from. | ||
460 | * | ||
461 | * kmalloc() for non-local nodes, used to allocate from a specific node | ||
462 | * if available. Equivalent to kmalloc() in the non-NUMA single-node | ||
463 | * case. | ||
464 | */ | ||
465 | static inline void *kmalloc_node(size_t size, gfp_t flags, int node) | ||
466 | { | ||
467 | return kmalloc(size, flags); | ||
468 | } | ||
469 | |||
470 | static inline void *__kmalloc_node(size_t size, gfp_t flags, int node) | ||
471 | { | ||
472 | return __kmalloc(size, flags); | ||
473 | } | ||
474 | |||
475 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | ||
476 | |||
477 | static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep, | ||
478 | gfp_t flags, int node) | ||
479 | { | ||
480 | return kmem_cache_alloc(cachep, flags); | ||
481 | } | ||
482 | #endif /* !CONFIG_NUMA && !CONFIG_SLOB */ | ||
483 | |||
484 | /* | 576 | /* |
485 | * kmalloc_track_caller is a special version of kmalloc that records the | 577 | * kmalloc_track_caller is a special version of kmalloc that records the |
486 | * calling function of the routine calling it for slab leak tracking instead | 578 | * calling function of the routine calling it for slab leak tracking instead |
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index cd401580bdd3..e9346b4f1ef4 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h | |||
@@ -3,20 +3,6 @@ | |||
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Definitions unique to the original Linux SLAB allocator. | 5 | * Definitions unique to the original Linux SLAB allocator. |
6 | * | ||
7 | * What we provide here is a way to optimize the frequent kmalloc | ||
8 | * calls in the kernel by selecting the appropriate general cache | ||
9 | * if kmalloc was called with a size that can be established at | ||
10 | * compile time. | ||
11 | */ | ||
12 | |||
13 | #include <linux/init.h> | ||
14 | #include <linux/compiler.h> | ||
15 | |||
16 | /* | ||
17 | * struct kmem_cache | ||
18 | * | ||
19 | * manages a cache. | ||
20 | */ | 6 | */ |
21 | 7 | ||
22 | struct kmem_cache { | 8 | struct kmem_cache { |
@@ -102,96 +88,4 @@ struct kmem_cache { | |||
102 | */ | 88 | */ |
103 | }; | 89 | }; |
104 | 90 | ||
105 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | ||
106 | void *__kmalloc(size_t size, gfp_t flags); | ||
107 | |||
108 | #ifdef CONFIG_TRACING | ||
109 | extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t); | ||
110 | #else | ||
111 | static __always_inline void * | ||
112 | kmem_cache_alloc_trace(struct kmem_cache *cachep, gfp_t flags, size_t size) | ||
113 | { | ||
114 | return kmem_cache_alloc(cachep, flags); | ||
115 | } | ||
116 | #endif | ||
117 | |||
118 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | ||
119 | { | ||
120 | struct kmem_cache *cachep; | ||
121 | void *ret; | ||
122 | |||
123 | if (__builtin_constant_p(size)) { | ||
124 | int i; | ||
125 | |||
126 | if (!size) | ||
127 | return ZERO_SIZE_PTR; | ||
128 | |||
129 | if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE)) | ||
130 | return NULL; | ||
131 | |||
132 | i = kmalloc_index(size); | ||
133 | |||
134 | #ifdef CONFIG_ZONE_DMA | ||
135 | if (flags & GFP_DMA) | ||
136 | cachep = kmalloc_dma_caches[i]; | ||
137 | else | ||
138 | #endif | ||
139 | cachep = kmalloc_caches[i]; | ||
140 | |||
141 | ret = kmem_cache_alloc_trace(cachep, flags, size); | ||
142 | |||
143 | return ret; | ||
144 | } | ||
145 | return __kmalloc(size, flags); | ||
146 | } | ||
147 | |||
148 | #ifdef CONFIG_NUMA | ||
149 | extern void *__kmalloc_node(size_t size, gfp_t flags, int node); | ||
150 | extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | ||
151 | |||
152 | #ifdef CONFIG_TRACING | ||
153 | extern void *kmem_cache_alloc_node_trace(struct kmem_cache *cachep, | ||
154 | gfp_t flags, | ||
155 | int nodeid, | ||
156 | size_t size); | ||
157 | #else | ||
158 | static __always_inline void * | ||
159 | kmem_cache_alloc_node_trace(struct kmem_cache *cachep, | ||
160 | gfp_t flags, | ||
161 | int nodeid, | ||
162 | size_t size) | ||
163 | { | ||
164 | return kmem_cache_alloc_node(cachep, flags, nodeid); | ||
165 | } | ||
166 | #endif | ||
167 | |||
168 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | ||
169 | { | ||
170 | struct kmem_cache *cachep; | ||
171 | |||
172 | if (__builtin_constant_p(size)) { | ||
173 | int i; | ||
174 | |||
175 | if (!size) | ||
176 | return ZERO_SIZE_PTR; | ||
177 | |||
178 | if (WARN_ON_ONCE(size > KMALLOC_MAX_SIZE)) | ||
179 | return NULL; | ||
180 | |||
181 | i = kmalloc_index(size); | ||
182 | |||
183 | #ifdef CONFIG_ZONE_DMA | ||
184 | if (flags & GFP_DMA) | ||
185 | cachep = kmalloc_dma_caches[i]; | ||
186 | else | ||
187 | #endif | ||
188 | cachep = kmalloc_caches[i]; | ||
189 | |||
190 | return kmem_cache_alloc_node_trace(cachep, flags, node, size); | ||
191 | } | ||
192 | return __kmalloc_node(size, flags, node); | ||
193 | } | ||
194 | |||
195 | #endif /* CONFIG_NUMA */ | ||
196 | |||
197 | #endif /* _LINUX_SLAB_DEF_H */ | 91 | #endif /* _LINUX_SLAB_DEF_H */ |
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h deleted file mode 100644 index 095a5a4a8516..000000000000 --- a/include/linux/slob_def.h +++ /dev/null | |||
@@ -1,31 +0,0 @@ | |||
1 | #ifndef __LINUX_SLOB_DEF_H | ||
2 | #define __LINUX_SLOB_DEF_H | ||
3 | |||
4 | #include <linux/numa.h> | ||
5 | |||
6 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | ||
7 | |||
8 | static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep, | ||
9 | gfp_t flags) | ||
10 | { | ||
11 | return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE); | ||
12 | } | ||
13 | |||
14 | void *__kmalloc_node(size_t size, gfp_t flags, int node); | ||
15 | |||
16 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | ||
17 | { | ||
18 | return __kmalloc_node(size, flags, node); | ||
19 | } | ||
20 | |||
21 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | ||
22 | { | ||
23 | return __kmalloc_node(size, flags, NUMA_NO_NODE); | ||
24 | } | ||
25 | |||
26 | static __always_inline void *__kmalloc(size_t size, gfp_t flags) | ||
27 | { | ||
28 | return kmalloc(size, flags); | ||
29 | } | ||
30 | |||
31 | #endif /* __LINUX_SLOB_DEF_H */ | ||
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 027276fa8713..cc0b67eada42 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -6,14 +6,8 @@ | |||
6 | * | 6 | * |
7 | * (C) 2007 SGI, Christoph Lameter | 7 | * (C) 2007 SGI, Christoph Lameter |
8 | */ | 8 | */ |
9 | #include <linux/types.h> | ||
10 | #include <linux/gfp.h> | ||
11 | #include <linux/bug.h> | ||
12 | #include <linux/workqueue.h> | ||
13 | #include <linux/kobject.h> | 9 | #include <linux/kobject.h> |
14 | 10 | ||
15 | #include <linux/kmemleak.h> | ||
16 | |||
17 | enum stat_item { | 11 | enum stat_item { |
18 | ALLOC_FASTPATH, /* Allocation from cpu slab */ | 12 | ALLOC_FASTPATH, /* Allocation from cpu slab */ |
19 | ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ | 13 | ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ |
@@ -104,108 +98,4 @@ struct kmem_cache { | |||
104 | struct kmem_cache_node *node[MAX_NUMNODES]; | 98 | struct kmem_cache_node *node[MAX_NUMNODES]; |
105 | }; | 99 | }; |
106 | 100 | ||
107 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | ||
108 | void *__kmalloc(size_t size, gfp_t flags); | ||
109 | |||
110 | static __always_inline void * | ||
111 | kmalloc_order(size_t size, gfp_t flags, unsigned int order) | ||
112 | { | ||
113 | void *ret; | ||
114 | |||
115 | flags |= (__GFP_COMP | __GFP_KMEMCG); | ||
116 | ret = (void *) __get_free_pages(flags, order); | ||
117 | kmemleak_alloc(ret, size, 1, flags); | ||
118 | return ret; | ||
119 | } | ||
120 | |||
121 | /** | ||
122 | * Calling this on allocated memory will check that the memory | ||
123 | * is expected to be in use, and print warnings if not. | ||
124 | */ | ||
125 | #ifdef CONFIG_SLUB_DEBUG | ||
126 | extern bool verify_mem_not_deleted(const void *x); | ||
127 | #else | ||
128 | static inline bool verify_mem_not_deleted(const void *x) | ||
129 | { | ||
130 | return true; | ||
131 | } | ||
132 | #endif | ||
133 | |||
134 | #ifdef CONFIG_TRACING | ||
135 | extern void * | ||
136 | kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size); | ||
137 | extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order); | ||
138 | #else | ||
139 | static __always_inline void * | ||
140 | kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) | ||
141 | { | ||
142 | return kmem_cache_alloc(s, gfpflags); | ||
143 | } | ||
144 | |||
145 | static __always_inline void * | ||
146 | kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) | ||
147 | { | ||
148 | return kmalloc_order(size, flags, order); | ||
149 | } | ||
150 | #endif | ||
151 | |||
152 | static __always_inline void *kmalloc_large(size_t size, gfp_t flags) | ||
153 | { | ||
154 | unsigned int order = get_order(size); | ||
155 | return kmalloc_order_trace(size, flags, order); | ||
156 | } | ||
157 | |||
158 | static __always_inline void *kmalloc(size_t size, gfp_t flags) | ||
159 | { | ||
160 | if (__builtin_constant_p(size)) { | ||
161 | if (size > KMALLOC_MAX_CACHE_SIZE) | ||
162 | return kmalloc_large(size, flags); | ||
163 | |||
164 | if (!(flags & GFP_DMA)) { | ||
165 | int index = kmalloc_index(size); | ||
166 | |||
167 | if (!index) | ||
168 | return ZERO_SIZE_PTR; | ||
169 | |||
170 | return kmem_cache_alloc_trace(kmalloc_caches[index], | ||
171 | flags, size); | ||
172 | } | ||
173 | } | ||
174 | return __kmalloc(size, flags); | ||
175 | } | ||
176 | |||
177 | #ifdef CONFIG_NUMA | ||
178 | void *__kmalloc_node(size_t size, gfp_t flags, int node); | ||
179 | void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node); | ||
180 | |||
181 | #ifdef CONFIG_TRACING | ||
182 | extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, | ||
183 | gfp_t gfpflags, | ||
184 | int node, size_t size); | ||
185 | #else | ||
186 | static __always_inline void * | ||
187 | kmem_cache_alloc_node_trace(struct kmem_cache *s, | ||
188 | gfp_t gfpflags, | ||
189 | int node, size_t size) | ||
190 | { | ||
191 | return kmem_cache_alloc_node(s, gfpflags, node); | ||
192 | } | ||
193 | #endif | ||
194 | |||
195 | static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node) | ||
196 | { | ||
197 | if (__builtin_constant_p(size) && | ||
198 | size <= KMALLOC_MAX_CACHE_SIZE && !(flags & GFP_DMA)) { | ||
199 | int index = kmalloc_index(size); | ||
200 | |||
201 | if (!index) | ||
202 | return ZERO_SIZE_PTR; | ||
203 | |||
204 | return kmem_cache_alloc_node_trace(kmalloc_caches[index], | ||
205 | flags, node, size); | ||
206 | } | ||
207 | return __kmalloc_node(size, flags, node); | ||
208 | } | ||
209 | #endif | ||
210 | |||
211 | #endif /* _LINUX_SLUB_DEF_H */ | 101 | #endif /* _LINUX_SLUB_DEF_H */ |
diff --git a/include/linux/time-armada-370-xp.h b/include/linux/time-armada-370-xp.h deleted file mode 100644 index 6fb0856b9405..000000000000 --- a/include/linux/time-armada-370-xp.h +++ /dev/null | |||
@@ -1,16 +0,0 @@ | |||
1 | /* | ||
2 | * Marvell Armada 370/XP SoC timer handling. | ||
3 | * | ||
4 | * Copyright (C) 2012 Marvell | ||
5 | * | ||
6 | * Lior Amsalem <alior@marvell.com> | ||
7 | * Gregory CLEMENT <gregory.clement@free-electrons.com> | ||
8 | * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> | ||
9 | * | ||
10 | */ | ||
11 | #ifndef __TIME_ARMADA_370_XPPRCMU_H | ||
12 | #define __TIME_ARMADA_370_XPPRCMU_H | ||
13 | |||
14 | void armada_370_xp_timer_init(void); | ||
15 | |||
16 | #endif | ||
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h index d08abf9101d2..a3726275876d 100644 --- a/include/uapi/linux/input.h +++ b/include/uapi/linux/input.h | |||
@@ -152,6 +152,7 @@ struct input_keymap_entry { | |||
152 | #define EVIOCGEFFECTS _IOR('E', 0x84, int) /* Report number of effects playable at the same time */ | 152 | #define EVIOCGEFFECTS _IOR('E', 0x84, int) /* Report number of effects playable at the same time */ |
153 | 153 | ||
154 | #define EVIOCGRAB _IOW('E', 0x90, int) /* Grab/Release device */ | 154 | #define EVIOCGRAB _IOW('E', 0x90, int) /* Grab/Release device */ |
155 | #define EVIOCREVOKE _IOW('E', 0x91, int) /* Revoke device access */ | ||
155 | 156 | ||
156 | #define EVIOCSCLOCKID _IOW('E', 0xa0, int) /* Set clockid to be used for timestamps */ | 157 | #define EVIOCSCLOCKID _IOW('E', 0xa0, int) /* Set clockid to be used for timestamps */ |
157 | 158 | ||
diff --git a/init/Kconfig b/init/Kconfig index 18bd9e3d3274..3ecd8a1178f1 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -1602,7 +1602,7 @@ endchoice | |||
1602 | 1602 | ||
1603 | config SLUB_CPU_PARTIAL | 1603 | config SLUB_CPU_PARTIAL |
1604 | default y | 1604 | default y |
1605 | depends on SLUB | 1605 | depends on SLUB && SMP |
1606 | bool "SLUB per cpu partial cache" | 1606 | bool "SLUB per cpu partial cache" |
1607 | help | 1607 | help |
1608 | Per cpu partial caches accellerate objects allocation and freeing | 1608 | Per cpu partial caches accellerate objects allocation and freeing |
diff --git a/kernel/fork.c b/kernel/fork.c index 81ccb4f010c2..086fe73ad6bd 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -519,7 +519,7 @@ static void mm_init_aio(struct mm_struct *mm) | |||
519 | { | 519 | { |
520 | #ifdef CONFIG_AIO | 520 | #ifdef CONFIG_AIO |
521 | spin_lock_init(&mm->ioctx_lock); | 521 | spin_lock_init(&mm->ioctx_lock); |
522 | INIT_HLIST_HEAD(&mm->ioctx_list); | 522 | mm->ioctx_table = NULL; |
523 | #endif | 523 | #endif |
524 | } | 524 | } |
525 | 525 | ||
diff --git a/mm/migrate.c b/mm/migrate.c index b7ded7eafe3a..9c8d5f59d30b 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -311,7 +311,7 @@ static inline bool buffer_migrate_lock_buffers(struct buffer_head *head, | |||
311 | * 2 for pages with a mapping | 311 | * 2 for pages with a mapping |
312 | * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. | 312 | * 3 for pages with a mapping and PagePrivate/PagePrivate2 set. |
313 | */ | 313 | */ |
314 | static int migrate_page_move_mapping(struct address_space *mapping, | 314 | int migrate_page_move_mapping(struct address_space *mapping, |
315 | struct page *newpage, struct page *page, | 315 | struct page *newpage, struct page *page, |
316 | struct buffer_head *head, enum migrate_mode mode) | 316 | struct buffer_head *head, enum migrate_mode mode) |
317 | { | 317 | { |
diff --git a/mm/page_io.c b/mm/page_io.c index ba05b64e5d8d..8c79a4764be0 100644 --- a/mm/page_io.c +++ b/mm/page_io.c | |||
@@ -266,7 +266,6 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc, | |||
266 | 266 | ||
267 | init_sync_kiocb(&kiocb, swap_file); | 267 | init_sync_kiocb(&kiocb, swap_file); |
268 | kiocb.ki_pos = page_file_offset(page); | 268 | kiocb.ki_pos = page_file_offset(page); |
269 | kiocb.ki_left = PAGE_SIZE; | ||
270 | kiocb.ki_nbytes = PAGE_SIZE; | 269 | kiocb.ki_nbytes = PAGE_SIZE; |
271 | 270 | ||
272 | set_page_writeback(page); | 271 | set_page_writeback(page); |
diff --git a/mm/slab_common.c b/mm/slab_common.c index 538bade6df7d..a3443278ce3a 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <asm/tlbflush.h> | 19 | #include <asm/tlbflush.h> |
20 | #include <asm/page.h> | 20 | #include <asm/page.h> |
21 | #include <linux/memcontrol.h> | 21 | #include <linux/memcontrol.h> |
22 | #include <trace/events/kmem.h> | ||
22 | 23 | ||
23 | #include "slab.h" | 24 | #include "slab.h" |
24 | 25 | ||
@@ -373,7 +374,7 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags) | |||
373 | { | 374 | { |
374 | int index; | 375 | int index; |
375 | 376 | ||
376 | if (size > KMALLOC_MAX_SIZE) { | 377 | if (unlikely(size > KMALLOC_MAX_SIZE)) { |
377 | WARN_ON_ONCE(!(flags & __GFP_NOWARN)); | 378 | WARN_ON_ONCE(!(flags & __GFP_NOWARN)); |
378 | return NULL; | 379 | return NULL; |
379 | } | 380 | } |
@@ -495,6 +496,15 @@ void __init create_kmalloc_caches(unsigned long flags) | |||
495 | } | 496 | } |
496 | #endif /* !CONFIG_SLOB */ | 497 | #endif /* !CONFIG_SLOB */ |
497 | 498 | ||
499 | #ifdef CONFIG_TRACING | ||
500 | void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) | ||
501 | { | ||
502 | void *ret = kmalloc_order(size, flags, order); | ||
503 | trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags); | ||
504 | return ret; | ||
505 | } | ||
506 | EXPORT_SYMBOL(kmalloc_order_trace); | ||
507 | #endif | ||
498 | 508 | ||
499 | #ifdef CONFIG_SLABINFO | 509 | #ifdef CONFIG_SLABINFO |
500 | 510 | ||
@@ -462,11 +462,11 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller) | |||
462 | return ret; | 462 | return ret; |
463 | } | 463 | } |
464 | 464 | ||
465 | void *__kmalloc_node(size_t size, gfp_t gfp, int node) | 465 | void *__kmalloc(size_t size, gfp_t gfp) |
466 | { | 466 | { |
467 | return __do_kmalloc_node(size, gfp, node, _RET_IP_); | 467 | return __do_kmalloc_node(size, gfp, NUMA_NO_NODE, _RET_IP_); |
468 | } | 468 | } |
469 | EXPORT_SYMBOL(__kmalloc_node); | 469 | EXPORT_SYMBOL(__kmalloc); |
470 | 470 | ||
471 | #ifdef CONFIG_TRACING | 471 | #ifdef CONFIG_TRACING |
472 | void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller) | 472 | void *__kmalloc_track_caller(size_t size, gfp_t gfp, unsigned long caller) |
@@ -534,7 +534,7 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags) | |||
534 | return 0; | 534 | return 0; |
535 | } | 535 | } |
536 | 536 | ||
537 | void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) | 537 | void *slob_alloc_node(struct kmem_cache *c, gfp_t flags, int node) |
538 | { | 538 | { |
539 | void *b; | 539 | void *b; |
540 | 540 | ||
@@ -560,7 +560,27 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) | |||
560 | kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags); | 560 | kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags); |
561 | return b; | 561 | return b; |
562 | } | 562 | } |
563 | EXPORT_SYMBOL(slob_alloc_node); | ||
564 | |||
565 | void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) | ||
566 | { | ||
567 | return slob_alloc_node(cachep, flags, NUMA_NO_NODE); | ||
568 | } | ||
569 | EXPORT_SYMBOL(kmem_cache_alloc); | ||
570 | |||
571 | #ifdef CONFIG_NUMA | ||
572 | void *__kmalloc_node(size_t size, gfp_t gfp, int node) | ||
573 | { | ||
574 | return __do_kmalloc_node(size, gfp, node, _RET_IP_); | ||
575 | } | ||
576 | EXPORT_SYMBOL(__kmalloc_node); | ||
577 | |||
578 | void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t gfp, int node) | ||
579 | { | ||
580 | return slob_alloc_node(cachep, gfp, node); | ||
581 | } | ||
563 | EXPORT_SYMBOL(kmem_cache_alloc_node); | 582 | EXPORT_SYMBOL(kmem_cache_alloc_node); |
583 | #endif | ||
564 | 584 | ||
565 | static void __kmem_cache_free(void *b, int size) | 585 | static void __kmem_cache_free(void *b, int size) |
566 | { | 586 | { |
@@ -373,7 +373,8 @@ static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page | |||
373 | #endif | 373 | #endif |
374 | { | 374 | { |
375 | slab_lock(page); | 375 | slab_lock(page); |
376 | if (page->freelist == freelist_old && page->counters == counters_old) { | 376 | if (page->freelist == freelist_old && |
377 | page->counters == counters_old) { | ||
377 | page->freelist = freelist_new; | 378 | page->freelist = freelist_new; |
378 | page->counters = counters_new; | 379 | page->counters = counters_new; |
379 | slab_unlock(page); | 380 | slab_unlock(page); |
@@ -411,7 +412,8 @@ static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page, | |||
411 | 412 | ||
412 | local_irq_save(flags); | 413 | local_irq_save(flags); |
413 | slab_lock(page); | 414 | slab_lock(page); |
414 | if (page->freelist == freelist_old && page->counters == counters_old) { | 415 | if (page->freelist == freelist_old && |
416 | page->counters == counters_old) { | ||
415 | page->freelist = freelist_new; | 417 | page->freelist = freelist_new; |
416 | page->counters = counters_new; | 418 | page->counters = counters_new; |
417 | slab_unlock(page); | 419 | slab_unlock(page); |
@@ -553,8 +555,9 @@ static void print_tracking(struct kmem_cache *s, void *object) | |||
553 | 555 | ||
554 | static void print_page_info(struct page *page) | 556 | static void print_page_info(struct page *page) |
555 | { | 557 | { |
556 | printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n", | 558 | printk(KERN_ERR |
557 | page, page->objects, page->inuse, page->freelist, page->flags); | 559 | "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n", |
560 | page, page->objects, page->inuse, page->freelist, page->flags); | ||
558 | 561 | ||
559 | } | 562 | } |
560 | 563 | ||
@@ -629,7 +632,8 @@ static void object_err(struct kmem_cache *s, struct page *page, | |||
629 | print_trailer(s, page, object); | 632 | print_trailer(s, page, object); |
630 | } | 633 | } |
631 | 634 | ||
632 | static void slab_err(struct kmem_cache *s, struct page *page, const char *fmt, ...) | 635 | static void slab_err(struct kmem_cache *s, struct page *page, |
636 | const char *fmt, ...) | ||
633 | { | 637 | { |
634 | va_list args; | 638 | va_list args; |
635 | char buf[100]; | 639 | char buf[100]; |
@@ -788,7 +792,8 @@ static int check_object(struct kmem_cache *s, struct page *page, | |||
788 | } else { | 792 | } else { |
789 | if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { | 793 | if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) { |
790 | check_bytes_and_report(s, page, p, "Alignment padding", | 794 | check_bytes_and_report(s, page, p, "Alignment padding", |
791 | endobject, POISON_INUSE, s->inuse - s->object_size); | 795 | endobject, POISON_INUSE, |
796 | s->inuse - s->object_size); | ||
792 | } | 797 | } |
793 | } | 798 | } |
794 | 799 | ||
@@ -873,7 +878,6 @@ static int on_freelist(struct kmem_cache *s, struct page *page, void *search) | |||
873 | object_err(s, page, object, | 878 | object_err(s, page, object, |
874 | "Freechain corrupt"); | 879 | "Freechain corrupt"); |
875 | set_freepointer(s, object, NULL); | 880 | set_freepointer(s, object, NULL); |
876 | break; | ||
877 | } else { | 881 | } else { |
878 | slab_err(s, page, "Freepointer corrupt"); | 882 | slab_err(s, page, "Freepointer corrupt"); |
879 | page->freelist = NULL; | 883 | page->freelist = NULL; |
@@ -918,7 +922,8 @@ static void trace(struct kmem_cache *s, struct page *page, void *object, | |||
918 | page->freelist); | 922 | page->freelist); |
919 | 923 | ||
920 | if (!alloc) | 924 | if (!alloc) |
921 | print_section("Object ", (void *)object, s->object_size); | 925 | print_section("Object ", (void *)object, |
926 | s->object_size); | ||
922 | 927 | ||
923 | dump_stack(); | 928 | dump_stack(); |
924 | } | 929 | } |
@@ -937,7 +942,8 @@ static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags) | |||
937 | return should_failslab(s->object_size, flags, s->flags); | 942 | return should_failslab(s->object_size, flags, s->flags); |
938 | } | 943 | } |
939 | 944 | ||
940 | static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, void *object) | 945 | static inline void slab_post_alloc_hook(struct kmem_cache *s, |
946 | gfp_t flags, void *object) | ||
941 | { | 947 | { |
942 | flags &= gfp_allowed_mask; | 948 | flags &= gfp_allowed_mask; |
943 | kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); | 949 | kmemcheck_slab_alloc(s, flags, object, slab_ksize(s)); |
@@ -1039,7 +1045,8 @@ static void setup_object_debug(struct kmem_cache *s, struct page *page, | |||
1039 | init_tracking(s, object); | 1045 | init_tracking(s, object); |
1040 | } | 1046 | } |
1041 | 1047 | ||
1042 | static noinline int alloc_debug_processing(struct kmem_cache *s, struct page *page, | 1048 | static noinline int alloc_debug_processing(struct kmem_cache *s, |
1049 | struct page *page, | ||
1043 | void *object, unsigned long addr) | 1050 | void *object, unsigned long addr) |
1044 | { | 1051 | { |
1045 | if (!check_slab(s, page)) | 1052 | if (!check_slab(s, page)) |
@@ -1743,7 +1750,8 @@ static void init_kmem_cache_cpus(struct kmem_cache *s) | |||
1743 | /* | 1750 | /* |
1744 | * Remove the cpu slab | 1751 | * Remove the cpu slab |
1745 | */ | 1752 | */ |
1746 | static void deactivate_slab(struct kmem_cache *s, struct page *page, void *freelist) | 1753 | static void deactivate_slab(struct kmem_cache *s, struct page *page, |
1754 | void *freelist) | ||
1747 | { | 1755 | { |
1748 | enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE }; | 1756 | enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE }; |
1749 | struct kmem_cache_node *n = get_node(s, page_to_nid(page)); | 1757 | struct kmem_cache_node *n = get_node(s, page_to_nid(page)); |
@@ -1999,7 +2007,8 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain) | |||
1999 | page->pobjects = pobjects; | 2007 | page->pobjects = pobjects; |
2000 | page->next = oldpage; | 2008 | page->next = oldpage; |
2001 | 2009 | ||
2002 | } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) != oldpage); | 2010 | } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page) |
2011 | != oldpage); | ||
2003 | #endif | 2012 | #endif |
2004 | } | 2013 | } |
2005 | 2014 | ||
@@ -2169,8 +2178,8 @@ static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags) | |||
2169 | } | 2178 | } |
2170 | 2179 | ||
2171 | /* | 2180 | /* |
2172 | * Check the page->freelist of a page and either transfer the freelist to the per cpu freelist | 2181 | * Check the page->freelist of a page and either transfer the freelist to the |
2173 | * or deactivate the page. | 2182 | * per cpu freelist or deactivate the page. |
2174 | * | 2183 | * |
2175 | * The page is still frozen if the return value is not NULL. | 2184 | * The page is still frozen if the return value is not NULL. |
2176 | * | 2185 | * |
@@ -2314,7 +2323,8 @@ new_slab: | |||
2314 | goto load_freelist; | 2323 | goto load_freelist; |
2315 | 2324 | ||
2316 | /* Only entered in the debug case */ | 2325 | /* Only entered in the debug case */ |
2317 | if (kmem_cache_debug(s) && !alloc_debug_processing(s, page, freelist, addr)) | 2326 | if (kmem_cache_debug(s) && |
2327 | !alloc_debug_processing(s, page, freelist, addr)) | ||
2318 | goto new_slab; /* Slab failed checks. Next slab needed */ | 2328 | goto new_slab; /* Slab failed checks. Next slab needed */ |
2319 | 2329 | ||
2320 | deactivate_slab(s, page, get_freepointer(s, freelist)); | 2330 | deactivate_slab(s, page, get_freepointer(s, freelist)); |
@@ -2372,7 +2382,7 @@ redo: | |||
2372 | 2382 | ||
2373 | object = c->freelist; | 2383 | object = c->freelist; |
2374 | page = c->page; | 2384 | page = c->page; |
2375 | if (unlikely(!object || !page || !node_match(page, node))) | 2385 | if (unlikely(!object || !node_match(page, node))) |
2376 | object = __slab_alloc(s, gfpflags, node, addr, c); | 2386 | object = __slab_alloc(s, gfpflags, node, addr, c); |
2377 | 2387 | ||
2378 | else { | 2388 | else { |
@@ -2382,13 +2392,15 @@ redo: | |||
2382 | * The cmpxchg will only match if there was no additional | 2392 | * The cmpxchg will only match if there was no additional |
2383 | * operation and if we are on the right processor. | 2393 | * operation and if we are on the right processor. |
2384 | * | 2394 | * |
2385 | * The cmpxchg does the following atomically (without lock semantics!) | 2395 | * The cmpxchg does the following atomically (without lock |
2396 | * semantics!) | ||
2386 | * 1. Relocate first pointer to the current per cpu area. | 2397 | * 1. Relocate first pointer to the current per cpu area. |
2387 | * 2. Verify that tid and freelist have not been changed | 2398 | * 2. Verify that tid and freelist have not been changed |
2388 | * 3. If they were not changed replace tid and freelist | 2399 | * 3. If they were not changed replace tid and freelist |
2389 | * | 2400 | * |
2390 | * Since this is without lock semantics the protection is only against | 2401 | * Since this is without lock semantics the protection is only |
2391 | * code executing on this cpu *not* from access by other cpus. | 2402 | * against code executing on this cpu *not* from access by |
2403 | * other cpus. | ||
2392 | */ | 2404 | */ |
2393 | if (unlikely(!this_cpu_cmpxchg_double( | 2405 | if (unlikely(!this_cpu_cmpxchg_double( |
2394 | s->cpu_slab->freelist, s->cpu_slab->tid, | 2406 | s->cpu_slab->freelist, s->cpu_slab->tid, |
@@ -2420,7 +2432,8 @@ void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags) | |||
2420 | { | 2432 | { |
2421 | void *ret = slab_alloc(s, gfpflags, _RET_IP_); | 2433 | void *ret = slab_alloc(s, gfpflags, _RET_IP_); |
2422 | 2434 | ||
2423 | trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, s->size, gfpflags); | 2435 | trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size, |
2436 | s->size, gfpflags); | ||
2424 | 2437 | ||
2425 | return ret; | 2438 | return ret; |
2426 | } | 2439 | } |
@@ -2434,14 +2447,6 @@ void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size) | |||
2434 | return ret; | 2447 | return ret; |
2435 | } | 2448 | } |
2436 | EXPORT_SYMBOL(kmem_cache_alloc_trace); | 2449 | EXPORT_SYMBOL(kmem_cache_alloc_trace); |
2437 | |||
2438 | void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order) | ||
2439 | { | ||
2440 | void *ret = kmalloc_order(size, flags, order); | ||
2441 | trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags); | ||
2442 | return ret; | ||
2443 | } | ||
2444 | EXPORT_SYMBOL(kmalloc_order_trace); | ||
2445 | #endif | 2450 | #endif |
2446 | 2451 | ||
2447 | #ifdef CONFIG_NUMA | 2452 | #ifdef CONFIG_NUMA |
@@ -2512,8 +2517,10 @@ static void __slab_free(struct kmem_cache *s, struct page *page, | |||
2512 | if (kmem_cache_has_cpu_partial(s) && !prior) | 2517 | if (kmem_cache_has_cpu_partial(s) && !prior) |
2513 | 2518 | ||
2514 | /* | 2519 | /* |
2515 | * Slab was on no list before and will be partially empty | 2520 | * Slab was on no list before and will be |
2516 | * We can defer the list move and instead freeze it. | 2521 | * partially empty |
2522 | * We can defer the list move and instead | ||
2523 | * freeze it. | ||
2517 | */ | 2524 | */ |
2518 | new.frozen = 1; | 2525 | new.frozen = 1; |
2519 | 2526 | ||
@@ -3071,8 +3078,8 @@ static int kmem_cache_open(struct kmem_cache *s, unsigned long flags) | |||
3071 | * A) The number of objects from per cpu partial slabs dumped to the | 3078 | * A) The number of objects from per cpu partial slabs dumped to the |
3072 | * per node list when we reach the limit. | 3079 | * per node list when we reach the limit. |
3073 | * B) The number of objects in cpu partial slabs to extract from the | 3080 | * B) The number of objects in cpu partial slabs to extract from the |
3074 | * per node list when we run out of per cpu objects. We only fetch 50% | 3081 | * per node list when we run out of per cpu objects. We only fetch |
3075 | * to keep some capacity around for frees. | 3082 | * 50% to keep some capacity around for frees. |
3076 | */ | 3083 | */ |
3077 | if (!kmem_cache_has_cpu_partial(s)) | 3084 | if (!kmem_cache_has_cpu_partial(s)) |
3078 | s->cpu_partial = 0; | 3085 | s->cpu_partial = 0; |
@@ -3099,8 +3106,8 @@ error: | |||
3099 | if (flags & SLAB_PANIC) | 3106 | if (flags & SLAB_PANIC) |
3100 | panic("Cannot create slab %s size=%lu realsize=%u " | 3107 | panic("Cannot create slab %s size=%lu realsize=%u " |
3101 | "order=%u offset=%u flags=%lx\n", | 3108 | "order=%u offset=%u flags=%lx\n", |
3102 | s->name, (unsigned long)s->size, s->size, oo_order(s->oo), | 3109 | s->name, (unsigned long)s->size, s->size, |
3103 | s->offset, flags); | 3110 | oo_order(s->oo), s->offset, flags); |
3104 | return -EINVAL; | 3111 | return -EINVAL; |
3105 | } | 3112 | } |
3106 | 3113 | ||
@@ -3316,42 +3323,6 @@ size_t ksize(const void *object) | |||
3316 | } | 3323 | } |
3317 | EXPORT_SYMBOL(ksize); | 3324 | EXPORT_SYMBOL(ksize); |
3318 | 3325 | ||
3319 | #ifdef CONFIG_SLUB_DEBUG | ||
3320 | bool verify_mem_not_deleted(const void *x) | ||
3321 | { | ||
3322 | struct page *page; | ||
3323 | void *object = (void *)x; | ||
3324 | unsigned long flags; | ||
3325 | bool rv; | ||
3326 | |||
3327 | if (unlikely(ZERO_OR_NULL_PTR(x))) | ||
3328 | return false; | ||
3329 | |||
3330 | local_irq_save(flags); | ||
3331 | |||
3332 | page = virt_to_head_page(x); | ||
3333 | if (unlikely(!PageSlab(page))) { | ||
3334 | /* maybe it was from stack? */ | ||
3335 | rv = true; | ||
3336 | goto out_unlock; | ||
3337 | } | ||
3338 | |||
3339 | slab_lock(page); | ||
3340 | if (on_freelist(page->slab_cache, page, object)) { | ||
3341 | object_err(page->slab_cache, page, object, "Object is on free-list"); | ||
3342 | rv = false; | ||
3343 | } else { | ||
3344 | rv = true; | ||
3345 | } | ||
3346 | slab_unlock(page); | ||
3347 | |||
3348 | out_unlock: | ||
3349 | local_irq_restore(flags); | ||
3350 | return rv; | ||
3351 | } | ||
3352 | EXPORT_SYMBOL(verify_mem_not_deleted); | ||
3353 | #endif | ||
3354 | |||
3355 | void kfree(const void *x) | 3326 | void kfree(const void *x) |
3356 | { | 3327 | { |
3357 | struct page *page; | 3328 | struct page *page; |
@@ -4162,15 +4133,17 @@ static int list_locations(struct kmem_cache *s, char *buf, | |||
4162 | !cpumask_empty(to_cpumask(l->cpus)) && | 4133 | !cpumask_empty(to_cpumask(l->cpus)) && |
4163 | len < PAGE_SIZE - 60) { | 4134 | len < PAGE_SIZE - 60) { |
4164 | len += sprintf(buf + len, " cpus="); | 4135 | len += sprintf(buf + len, " cpus="); |
4165 | len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50, | 4136 | len += cpulist_scnprintf(buf + len, |
4137 | PAGE_SIZE - len - 50, | ||
4166 | to_cpumask(l->cpus)); | 4138 | to_cpumask(l->cpus)); |
4167 | } | 4139 | } |
4168 | 4140 | ||
4169 | if (nr_online_nodes > 1 && !nodes_empty(l->nodes) && | 4141 | if (nr_online_nodes > 1 && !nodes_empty(l->nodes) && |
4170 | len < PAGE_SIZE - 60) { | 4142 | len < PAGE_SIZE - 60) { |
4171 | len += sprintf(buf + len, " nodes="); | 4143 | len += sprintf(buf + len, " nodes="); |
4172 | len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50, | 4144 | len += nodelist_scnprintf(buf + len, |
4173 | l->nodes); | 4145 | PAGE_SIZE - len - 50, |
4146 | l->nodes); | ||
4174 | } | 4147 | } |
4175 | 4148 | ||
4176 | len += sprintf(buf + len, "\n"); | 4149 | len += sprintf(buf + len, "\n"); |
@@ -4268,18 +4241,17 @@ static ssize_t show_slab_objects(struct kmem_cache *s, | |||
4268 | int node; | 4241 | int node; |
4269 | int x; | 4242 | int x; |
4270 | unsigned long *nodes; | 4243 | unsigned long *nodes; |
4271 | unsigned long *per_cpu; | ||
4272 | 4244 | ||
4273 | nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL); | 4245 | nodes = kzalloc(sizeof(unsigned long) * nr_node_ids, GFP_KERNEL); |
4274 | if (!nodes) | 4246 | if (!nodes) |
4275 | return -ENOMEM; | 4247 | return -ENOMEM; |
4276 | per_cpu = nodes + nr_node_ids; | ||
4277 | 4248 | ||
4278 | if (flags & SO_CPU) { | 4249 | if (flags & SO_CPU) { |
4279 | int cpu; | 4250 | int cpu; |
4280 | 4251 | ||
4281 | for_each_possible_cpu(cpu) { | 4252 | for_each_possible_cpu(cpu) { |
4282 | struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu); | 4253 | struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, |
4254 | cpu); | ||
4283 | int node; | 4255 | int node; |
4284 | struct page *page; | 4256 | struct page *page; |
4285 | 4257 | ||
@@ -4304,8 +4276,6 @@ static ssize_t show_slab_objects(struct kmem_cache *s, | |||
4304 | total += x; | 4276 | total += x; |
4305 | nodes[node] += x; | 4277 | nodes[node] += x; |
4306 | } | 4278 | } |
4307 | |||
4308 | per_cpu[node]++; | ||
4309 | } | 4279 | } |
4310 | } | 4280 | } |
4311 | 4281 | ||
@@ -4315,12 +4285,11 @@ static ssize_t show_slab_objects(struct kmem_cache *s, | |||
4315 | for_each_node_state(node, N_NORMAL_MEMORY) { | 4285 | for_each_node_state(node, N_NORMAL_MEMORY) { |
4316 | struct kmem_cache_node *n = get_node(s, node); | 4286 | struct kmem_cache_node *n = get_node(s, node); |
4317 | 4287 | ||
4318 | if (flags & SO_TOTAL) | 4288 | if (flags & SO_TOTAL) |
4319 | x = atomic_long_read(&n->total_objects); | 4289 | x = atomic_long_read(&n->total_objects); |
4320 | else if (flags & SO_OBJECTS) | 4290 | else if (flags & SO_OBJECTS) |
4321 | x = atomic_long_read(&n->total_objects) - | 4291 | x = atomic_long_read(&n->total_objects) - |
4322 | count_partial(n, count_free); | 4292 | count_partial(n, count_free); |
4323 | |||
4324 | else | 4293 | else |
4325 | x = atomic_long_read(&n->nr_slabs); | 4294 | x = atomic_long_read(&n->nr_slabs); |
4326 | total += x; | 4295 | total += x; |
@@ -5136,7 +5105,8 @@ static char *create_unique_id(struct kmem_cache *s) | |||
5136 | 5105 | ||
5137 | #ifdef CONFIG_MEMCG_KMEM | 5106 | #ifdef CONFIG_MEMCG_KMEM |
5138 | if (!is_root_cache(s)) | 5107 | if (!is_root_cache(s)) |
5139 | p += sprintf(p, "-%08d", memcg_cache_id(s->memcg_params->memcg)); | 5108 | p += sprintf(p, "-%08d", |
5109 | memcg_cache_id(s->memcg_params->memcg)); | ||
5140 | #endif | 5110 | #endif |
5141 | 5111 | ||
5142 | BUG_ON(p > name + ID_STR_LENGTH - 1); | 5112 | BUG_ON(p > name + ID_STR_LENGTH - 1); |
diff --git a/net/socket.c b/net/socket.c index 0ceaa5cb9ead..ebed4b68f768 100644 --- a/net/socket.c +++ b/net/socket.c | |||
@@ -854,11 +854,6 @@ int kernel_recvmsg(struct socket *sock, struct msghdr *msg, | |||
854 | } | 854 | } |
855 | EXPORT_SYMBOL(kernel_recvmsg); | 855 | EXPORT_SYMBOL(kernel_recvmsg); |
856 | 856 | ||
857 | static void sock_aio_dtor(struct kiocb *iocb) | ||
858 | { | ||
859 | kfree(iocb->private); | ||
860 | } | ||
861 | |||
862 | static ssize_t sock_sendpage(struct file *file, struct page *page, | 857 | static ssize_t sock_sendpage(struct file *file, struct page *page, |
863 | int offset, size_t size, loff_t *ppos, int more) | 858 | int offset, size_t size, loff_t *ppos, int more) |
864 | { | 859 | { |
@@ -889,12 +884,8 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos, | |||
889 | static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb, | 884 | static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb, |
890 | struct sock_iocb *siocb) | 885 | struct sock_iocb *siocb) |
891 | { | 886 | { |
892 | if (!is_sync_kiocb(iocb)) { | 887 | if (!is_sync_kiocb(iocb)) |
893 | siocb = kmalloc(sizeof(*siocb), GFP_KERNEL); | 888 | BUG(); |
894 | if (!siocb) | ||
895 | return NULL; | ||
896 | iocb->ki_dtor = sock_aio_dtor; | ||
897 | } | ||
898 | 889 | ||
899 | siocb->kiocb = iocb; | 890 | siocb->kiocb = iocb; |
900 | iocb->private = siocb; | 891 | iocb->private = siocb; |
@@ -931,7 +922,7 @@ static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov, | |||
931 | if (pos != 0) | 922 | if (pos != 0) |
932 | return -ESPIPE; | 923 | return -ESPIPE; |
933 | 924 | ||
934 | if (iocb->ki_left == 0) /* Match SYS5 behaviour */ | 925 | if (iocb->ki_nbytes == 0) /* Match SYS5 behaviour */ |
935 | return 0; | 926 | return 0; |
936 | 927 | ||
937 | 928 | ||