diff options
40 files changed, 388 insertions, 214 deletions
diff --git a/Documentation/powerpc/transactional_memory.txt b/Documentation/powerpc/transactional_memory.txt index c907be41d60f..dc23e58ae264 100644 --- a/Documentation/powerpc/transactional_memory.txt +++ b/Documentation/powerpc/transactional_memory.txt | |||
@@ -147,6 +147,25 @@ Example signal handler: | |||
147 | fix_the_problem(ucp->dar); | 147 | fix_the_problem(ucp->dar); |
148 | } | 148 | } |
149 | 149 | ||
150 | When in an active transaction that takes a signal, we need to be careful with | ||
151 | the stack. It's possible that the stack has moved back up after the tbegin. | ||
152 | The obvious case here is when the tbegin is called inside a function that | ||
153 | returns before a tend. In this case, the stack is part of the checkpointed | ||
154 | transactional memory state. If we write over this non transactionally or in | ||
155 | suspend, we are in trouble because if we get a tm abort, the program counter and | ||
156 | stack pointer will be back at the tbegin but our in memory stack won't be valid | ||
157 | anymore. | ||
158 | |||
159 | To avoid this, when taking a signal in an active transaction, we need to use | ||
160 | the stack pointer from the checkpointed state, rather than the speculated | ||
161 | state. This ensures that the signal context (written tm suspended) will be | ||
162 | written below the stack required for the rollback. The transaction is aborted | ||
163 | becuase of the treclaim, so any memory written between the tbegin and the | ||
164 | signal will be rolled back anyway. | ||
165 | |||
166 | For signals taken in non-TM or suspended mode, we use the | ||
167 | normal/non-checkpointed stack pointer. | ||
168 | |||
150 | 169 | ||
151 | Failure cause codes used by kernel | 170 | Failure cause codes used by kernel |
152 | ================================== | 171 | ================================== |
@@ -155,14 +174,18 @@ These are defined in <asm/reg.h>, and distinguish different reasons why the | |||
155 | kernel aborted a transaction: | 174 | kernel aborted a transaction: |
156 | 175 | ||
157 | TM_CAUSE_RESCHED Thread was rescheduled. | 176 | TM_CAUSE_RESCHED Thread was rescheduled. |
177 | TM_CAUSE_TLBI Software TLB invalide. | ||
158 | TM_CAUSE_FAC_UNAV FP/VEC/VSX unavailable trap. | 178 | TM_CAUSE_FAC_UNAV FP/VEC/VSX unavailable trap. |
159 | TM_CAUSE_SYSCALL Currently unused; future syscalls that must abort | 179 | TM_CAUSE_SYSCALL Currently unused; future syscalls that must abort |
160 | transactions for consistency will use this. | 180 | transactions for consistency will use this. |
161 | TM_CAUSE_SIGNAL Signal delivered. | 181 | TM_CAUSE_SIGNAL Signal delivered. |
162 | TM_CAUSE_MISC Currently unused. | 182 | TM_CAUSE_MISC Currently unused. |
183 | TM_CAUSE_ALIGNMENT Alignment fault. | ||
184 | TM_CAUSE_EMULATE Emulation that touched memory. | ||
163 | 185 | ||
164 | These can be checked by the user program's abort handler as TEXASR[0:7]. | 186 | These can be checked by the user program's abort handler as TEXASR[0:7]. If |
165 | 187 | bit 7 is set, it indicates that the error is consider persistent. For example | |
188 | a TM_CAUSE_ALIGNMENT will be persistent while a TM_CAUSE_RESCHED will not.q | ||
166 | 189 | ||
167 | GDB | 190 | GDB |
168 | === | 191 | === |
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index cf4df8e2139a..0c7f2bfcf134 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h | |||
@@ -264,6 +264,7 @@ | |||
264 | #define H_GET_MPP 0x2D4 | 264 | #define H_GET_MPP 0x2D4 |
265 | #define H_HOME_NODE_ASSOCIATIVITY 0x2EC | 265 | #define H_HOME_NODE_ASSOCIATIVITY 0x2EC |
266 | #define H_BEST_ENERGY 0x2F4 | 266 | #define H_BEST_ENERGY 0x2F4 |
267 | #define H_XIRR_X 0x2FC | ||
267 | #define H_RANDOM 0x300 | 268 | #define H_RANDOM 0x300 |
268 | #define H_COP 0x304 | 269 | #define H_COP 0x304 |
269 | #define H_GET_MPP_X 0x314 | 270 | #define H_GET_MPP_X 0x314 |
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index cea8496091ff..2f1b6c5f8174 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h | |||
@@ -523,6 +523,17 @@ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,946) | |||
523 | #define PPC440EP_ERR42 | 523 | #define PPC440EP_ERR42 |
524 | #endif | 524 | #endif |
525 | 525 | ||
526 | /* The following stops all load and store data streams associated with stream | ||
527 | * ID (ie. streams created explicitly). The embedded and server mnemonics for | ||
528 | * dcbt are different so we use machine "power4" here explicitly. | ||
529 | */ | ||
530 | #define DCBT_STOP_ALL_STREAM_IDS(scratch) \ | ||
531 | .machine push ; \ | ||
532 | .machine "power4" ; \ | ||
533 | lis scratch,0x60000000@h; \ | ||
534 | dcbt r0,scratch,0b01010; \ | ||
535 | .machine pop | ||
536 | |||
526 | /* | 537 | /* |
527 | * toreal/fromreal/tophys/tovirt macros. 32-bit BookE makes them | 538 | * toreal/fromreal/tophys/tovirt macros. 32-bit BookE makes them |
528 | * keep the address intact to be compatible with code shared with | 539 | * keep the address intact to be compatible with code shared with |
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 594db6bc093c..14a658363698 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h | |||
@@ -409,21 +409,16 @@ static inline void prefetchw(const void *x) | |||
409 | #endif | 409 | #endif |
410 | 410 | ||
411 | #ifdef CONFIG_PPC64 | 411 | #ifdef CONFIG_PPC64 |
412 | static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32) | 412 | static inline unsigned long get_clean_sp(unsigned long sp, int is_32) |
413 | { | 413 | { |
414 | unsigned long sp; | ||
415 | |||
416 | if (is_32) | 414 | if (is_32) |
417 | sp = regs->gpr[1] & 0x0ffffffffUL; | 415 | return sp & 0x0ffffffffUL; |
418 | else | ||
419 | sp = regs->gpr[1]; | ||
420 | |||
421 | return sp; | 416 | return sp; |
422 | } | 417 | } |
423 | #else | 418 | #else |
424 | static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32) | 419 | static inline unsigned long get_clean_sp(unsigned long sp, int is_32) |
425 | { | 420 | { |
426 | return regs->gpr[1]; | 421 | return sp; |
427 | } | 422 | } |
428 | #endif | 423 | #endif |
429 | 424 | ||
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index a6136515c7f2..4a9e408644fe 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h | |||
@@ -111,17 +111,6 @@ | |||
111 | #define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T) | 111 | #define MSR_TM_TRANSACTIONAL(x) (((x) & MSR_TS_MASK) == MSR_TS_T) |
112 | #define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S) | 112 | #define MSR_TM_SUSPENDED(x) (((x) & MSR_TS_MASK) == MSR_TS_S) |
113 | 113 | ||
114 | /* Reason codes describing kernel causes for transaction aborts. By | ||
115 | convention, bit0 is copied to TEXASR[56] (IBM bit 7) which is set if | ||
116 | the failure is persistent. | ||
117 | */ | ||
118 | #define TM_CAUSE_RESCHED 0xfe | ||
119 | #define TM_CAUSE_TLBI 0xfc | ||
120 | #define TM_CAUSE_FAC_UNAV 0xfa | ||
121 | #define TM_CAUSE_SYSCALL 0xf9 /* Persistent */ | ||
122 | #define TM_CAUSE_MISC 0xf6 | ||
123 | #define TM_CAUSE_SIGNAL 0xf4 | ||
124 | |||
125 | #if defined(CONFIG_PPC_BOOK3S_64) | 114 | #if defined(CONFIG_PPC_BOOK3S_64) |
126 | #define MSR_64BIT MSR_SF | 115 | #define MSR_64BIT MSR_SF |
127 | 116 | ||
diff --git a/arch/powerpc/include/asm/signal.h b/arch/powerpc/include/asm/signal.h index fbe66c463891..9322c28aebd2 100644 --- a/arch/powerpc/include/asm/signal.h +++ b/arch/powerpc/include/asm/signal.h | |||
@@ -3,5 +3,8 @@ | |||
3 | 3 | ||
4 | #define __ARCH_HAS_SA_RESTORER | 4 | #define __ARCH_HAS_SA_RESTORER |
5 | #include <uapi/asm/signal.h> | 5 | #include <uapi/asm/signal.h> |
6 | #include <uapi/asm/ptrace.h> | ||
7 | |||
8 | extern unsigned long get_tm_stackpointer(struct pt_regs *regs); | ||
6 | 9 | ||
7 | #endif /* _ASM_POWERPC_SIGNAL_H */ | 10 | #endif /* _ASM_POWERPC_SIGNAL_H */ |
diff --git a/arch/powerpc/include/asm/tm.h b/arch/powerpc/include/asm/tm.h index 4b4449abf3f8..9dfbc34bdbf5 100644 --- a/arch/powerpc/include/asm/tm.h +++ b/arch/powerpc/include/asm/tm.h | |||
@@ -5,6 +5,8 @@ | |||
5 | * Copyright 2012 Matt Evans & Michael Neuling, IBM Corporation. | 5 | * Copyright 2012 Matt Evans & Michael Neuling, IBM Corporation. |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <uapi/asm/tm.h> | ||
9 | |||
8 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | 10 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
9 | extern void do_load_up_transact_fpu(struct thread_struct *thread); | 11 | extern void do_load_up_transact_fpu(struct thread_struct *thread); |
10 | extern void do_load_up_transact_altivec(struct thread_struct *thread); | 12 | extern void do_load_up_transact_altivec(struct thread_struct *thread); |
diff --git a/arch/powerpc/include/uapi/asm/Kbuild b/arch/powerpc/include/uapi/asm/Kbuild index f7bca6370745..5182c8622b54 100644 --- a/arch/powerpc/include/uapi/asm/Kbuild +++ b/arch/powerpc/include/uapi/asm/Kbuild | |||
@@ -40,6 +40,7 @@ header-y += statfs.h | |||
40 | header-y += swab.h | 40 | header-y += swab.h |
41 | header-y += termbits.h | 41 | header-y += termbits.h |
42 | header-y += termios.h | 42 | header-y += termios.h |
43 | header-y += tm.h | ||
43 | header-y += types.h | 44 | header-y += types.h |
44 | header-y += ucontext.h | 45 | header-y += ucontext.h |
45 | header-y += unistd.h | 46 | header-y += unistd.h |
diff --git a/arch/powerpc/include/uapi/asm/tm.h b/arch/powerpc/include/uapi/asm/tm.h new file mode 100644 index 000000000000..85059a00f560 --- /dev/null +++ b/arch/powerpc/include/uapi/asm/tm.h | |||
@@ -0,0 +1,18 @@ | |||
1 | #ifndef _ASM_POWERPC_TM_H | ||
2 | #define _ASM_POWERPC_TM_H | ||
3 | |||
4 | /* Reason codes describing kernel causes for transaction aborts. By | ||
5 | * convention, bit0 is copied to TEXASR[56] (IBM bit 7) which is set if | ||
6 | * the failure is persistent. PAPR saves 0xff-0xe0 for the hypervisor. | ||
7 | */ | ||
8 | #define TM_CAUSE_PERSISTENT 0x01 | ||
9 | #define TM_CAUSE_RESCHED 0xde | ||
10 | #define TM_CAUSE_TLBI 0xdc | ||
11 | #define TM_CAUSE_FAC_UNAV 0xda | ||
12 | #define TM_CAUSE_SYSCALL 0xd8 /* future use */ | ||
13 | #define TM_CAUSE_MISC 0xd6 /* future use */ | ||
14 | #define TM_CAUSE_SIGNAL 0xd4 | ||
15 | #define TM_CAUSE_ALIGNMENT 0xd2 | ||
16 | #define TM_CAUSE_EMULATE 0xd0 | ||
17 | |||
18 | #endif | ||
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index c60bbec25c1f..1f0937d7d4b5 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c | |||
@@ -453,7 +453,7 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
453 | .icache_bsize = 128, | 453 | .icache_bsize = 128, |
454 | .dcache_bsize = 128, | 454 | .dcache_bsize = 128, |
455 | .oprofile_type = PPC_OPROFILE_POWER4, | 455 | .oprofile_type = PPC_OPROFILE_POWER4, |
456 | .oprofile_cpu_type = "ppc64/ibm-compat-v1", | 456 | .oprofile_cpu_type = 0, |
457 | .cpu_setup = __setup_cpu_power8, | 457 | .cpu_setup = __setup_cpu_power8, |
458 | .cpu_restore = __restore_cpu_power8, | 458 | .cpu_restore = __restore_cpu_power8, |
459 | .platform = "power8", | 459 | .platform = "power8", |
@@ -482,7 +482,7 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
482 | .cpu_name = "POWER7+ (raw)", | 482 | .cpu_name = "POWER7+ (raw)", |
483 | .cpu_features = CPU_FTRS_POWER7, | 483 | .cpu_features = CPU_FTRS_POWER7, |
484 | .cpu_user_features = COMMON_USER_POWER7, | 484 | .cpu_user_features = COMMON_USER_POWER7, |
485 | .cpu_user_features = COMMON_USER2_POWER7, | 485 | .cpu_user_features2 = COMMON_USER2_POWER7, |
486 | .mmu_features = MMU_FTRS_POWER7, | 486 | .mmu_features = MMU_FTRS_POWER7, |
487 | .icache_bsize = 128, | 487 | .icache_bsize = 128, |
488 | .dcache_bsize = 128, | 488 | .dcache_bsize = 128, |
@@ -506,7 +506,7 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
506 | .dcache_bsize = 128, | 506 | .dcache_bsize = 128, |
507 | .num_pmcs = 6, | 507 | .num_pmcs = 6, |
508 | .pmc_type = PPC_PMC_IBM, | 508 | .pmc_type = PPC_PMC_IBM, |
509 | .oprofile_cpu_type = "ppc64/power8", | 509 | .oprofile_cpu_type = 0, |
510 | .oprofile_type = PPC_OPROFILE_POWER4, | 510 | .oprofile_type = PPC_OPROFILE_POWER4, |
511 | .cpu_setup = __setup_cpu_power8, | 511 | .cpu_setup = __setup_cpu_power8, |
512 | .cpu_restore = __restore_cpu_power8, | 512 | .cpu_restore = __restore_cpu_power8, |
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index d22e73e4618b..22b45a4955cd 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S | |||
@@ -849,7 +849,7 @@ resume_kernel: | |||
849 | /* check current_thread_info, _TIF_EMULATE_STACK_STORE */ | 849 | /* check current_thread_info, _TIF_EMULATE_STACK_STORE */ |
850 | CURRENT_THREAD_INFO(r9, r1) | 850 | CURRENT_THREAD_INFO(r9, r1) |
851 | lwz r8,TI_FLAGS(r9) | 851 | lwz r8,TI_FLAGS(r9) |
852 | andis. r8,r8,_TIF_EMULATE_STACK_STORE@h | 852 | andis. r0,r8,_TIF_EMULATE_STACK_STORE@h |
853 | beq+ 1f | 853 | beq+ 1f |
854 | 854 | ||
855 | addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */ | 855 | addi r8,r1,INT_FRAME_SIZE /* Get the kprobed function entry */ |
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 0e9095e47b5b..246b11c4fe7e 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
@@ -501,6 +501,13 @@ BEGIN_FTR_SECTION | |||
501 | ldarx r6,0,r1 | 501 | ldarx r6,0,r1 |
502 | END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS) | 502 | END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS) |
503 | 503 | ||
504 | #ifdef CONFIG_PPC_BOOK3S | ||
505 | /* Cancel all explict user streams as they will have no use after context | ||
506 | * switch and will stop the HW from creating streams itself | ||
507 | */ | ||
508 | DCBT_STOP_ALL_STREAM_IDS(r6) | ||
509 | #endif | ||
510 | |||
504 | addi r6,r4,-THREAD /* Convert THREAD to 'current' */ | 511 | addi r6,r4,-THREAD /* Convert THREAD to 'current' */ |
505 | std r6,PACACURRENT(r13) /* Set new 'current' */ | 512 | std r6,PACACURRENT(r13) /* Set new 'current' */ |
506 | 513 | ||
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index e9acf50dd5b2..7f2273cc3c7d 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c | |||
@@ -657,15 +657,6 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar, | |||
657 | * ranges. However, some machines (thanks Apple !) tend to split their | 657 | * ranges. However, some machines (thanks Apple !) tend to split their |
658 | * space into lots of small contiguous ranges. So we have to coalesce. | 658 | * space into lots of small contiguous ranges. So we have to coalesce. |
659 | * | 659 | * |
660 | * - We can only cope with all memory ranges having the same offset | ||
661 | * between CPU addresses and PCI addresses. Unfortunately, some bridges | ||
662 | * are setup for a large 1:1 mapping along with a small "window" which | ||
663 | * maps PCI address 0 to some arbitrary high address of the CPU space in | ||
664 | * order to give access to the ISA memory hole. | ||
665 | * The way out of here that I've chosen for now is to always set the | ||
666 | * offset based on the first resource found, then override it if we | ||
667 | * have a different offset and the previous was set by an ISA hole. | ||
668 | * | ||
669 | * - Some busses have IO space not starting at 0, which causes trouble with | 660 | * - Some busses have IO space not starting at 0, which causes trouble with |
670 | * the way we do our IO resource renumbering. The code somewhat deals with | 661 | * the way we do our IO resource renumbering. The code somewhat deals with |
671 | * it for 64 bits but I would expect problems on 32 bits. | 662 | * it for 64 bits but I would expect problems on 32 bits. |
@@ -680,10 +671,9 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose, | |||
680 | int rlen; | 671 | int rlen; |
681 | int pna = of_n_addr_cells(dev); | 672 | int pna = of_n_addr_cells(dev); |
682 | int np = pna + 5; | 673 | int np = pna + 5; |
683 | int memno = 0, isa_hole = -1; | 674 | int memno = 0; |
684 | u32 pci_space; | 675 | u32 pci_space; |
685 | unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size; | 676 | unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size; |
686 | unsigned long long isa_mb = 0; | ||
687 | struct resource *res; | 677 | struct resource *res; |
688 | 678 | ||
689 | printk(KERN_INFO "PCI host bridge %s %s ranges:\n", | 679 | printk(KERN_INFO "PCI host bridge %s %s ranges:\n", |
@@ -777,8 +767,6 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose, | |||
777 | } | 767 | } |
778 | /* Handles ISA memory hole space here */ | 768 | /* Handles ISA memory hole space here */ |
779 | if (pci_addr == 0) { | 769 | if (pci_addr == 0) { |
780 | isa_mb = cpu_addr; | ||
781 | isa_hole = memno; | ||
782 | if (primary || isa_mem_base == 0) | 770 | if (primary || isa_mem_base == 0) |
783 | isa_mem_base = cpu_addr; | 771 | isa_mem_base = cpu_addr; |
784 | hose->isa_mem_phys = cpu_addr; | 772 | hose->isa_mem_phys = cpu_addr; |
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c index 577a8aa69c6e..457e97aa2945 100644 --- a/arch/powerpc/kernel/signal.c +++ b/arch/powerpc/kernel/signal.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <asm/uaccess.h> | 18 | #include <asm/uaccess.h> |
19 | #include <asm/unistd.h> | 19 | #include <asm/unistd.h> |
20 | #include <asm/debug.h> | 20 | #include <asm/debug.h> |
21 | #include <asm/tm.h> | ||
21 | 22 | ||
22 | #include "signal.h" | 23 | #include "signal.h" |
23 | 24 | ||
@@ -30,13 +31,13 @@ int show_unhandled_signals = 1; | |||
30 | /* | 31 | /* |
31 | * Allocate space for the signal frame | 32 | * Allocate space for the signal frame |
32 | */ | 33 | */ |
33 | void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, | 34 | void __user * get_sigframe(struct k_sigaction *ka, unsigned long sp, |
34 | size_t frame_size, int is_32) | 35 | size_t frame_size, int is_32) |
35 | { | 36 | { |
36 | unsigned long oldsp, newsp; | 37 | unsigned long oldsp, newsp; |
37 | 38 | ||
38 | /* Default to using normal stack */ | 39 | /* Default to using normal stack */ |
39 | oldsp = get_clean_sp(regs, is_32); | 40 | oldsp = get_clean_sp(sp, is_32); |
40 | 41 | ||
41 | /* Check for alt stack */ | 42 | /* Check for alt stack */ |
42 | if ((ka->sa.sa_flags & SA_ONSTACK) && | 43 | if ((ka->sa.sa_flags & SA_ONSTACK) && |
@@ -175,3 +176,38 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) | |||
175 | 176 | ||
176 | user_enter(); | 177 | user_enter(); |
177 | } | 178 | } |
179 | |||
180 | unsigned long get_tm_stackpointer(struct pt_regs *regs) | ||
181 | { | ||
182 | /* When in an active transaction that takes a signal, we need to be | ||
183 | * careful with the stack. It's possible that the stack has moved back | ||
184 | * up after the tbegin. The obvious case here is when the tbegin is | ||
185 | * called inside a function that returns before a tend. In this case, | ||
186 | * the stack is part of the checkpointed transactional memory state. | ||
187 | * If we write over this non transactionally or in suspend, we are in | ||
188 | * trouble because if we get a tm abort, the program counter and stack | ||
189 | * pointer will be back at the tbegin but our in memory stack won't be | ||
190 | * valid anymore. | ||
191 | * | ||
192 | * To avoid this, when taking a signal in an active transaction, we | ||
193 | * need to use the stack pointer from the checkpointed state, rather | ||
194 | * than the speculated state. This ensures that the signal context | ||
195 | * (written tm suspended) will be written below the stack required for | ||
196 | * the rollback. The transaction is aborted becuase of the treclaim, | ||
197 | * so any memory written between the tbegin and the signal will be | ||
198 | * rolled back anyway. | ||
199 | * | ||
200 | * For signals taken in non-TM or suspended mode, we use the | ||
201 | * normal/non-checkpointed stack pointer. | ||
202 | */ | ||
203 | |||
204 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
205 | if (MSR_TM_ACTIVE(regs->msr)) { | ||
206 | tm_enable(); | ||
207 | tm_reclaim(¤t->thread, regs->msr, TM_CAUSE_SIGNAL); | ||
208 | if (MSR_TM_TRANSACTIONAL(regs->msr)) | ||
209 | return current->thread.ckpt_regs.gpr[1]; | ||
210 | } | ||
211 | #endif | ||
212 | return regs->gpr[1]; | ||
213 | } | ||
diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h index ec84c901ceab..c69b9aeb9f23 100644 --- a/arch/powerpc/kernel/signal.h +++ b/arch/powerpc/kernel/signal.h | |||
@@ -12,7 +12,7 @@ | |||
12 | 12 | ||
13 | extern void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags); | 13 | extern void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags); |
14 | 14 | ||
15 | extern void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, | 15 | extern void __user * get_sigframe(struct k_sigaction *ka, unsigned long sp, |
16 | size_t frame_size, int is_32); | 16 | size_t frame_size, int is_32); |
17 | 17 | ||
18 | extern int handle_signal32(unsigned long sig, struct k_sigaction *ka, | 18 | extern int handle_signal32(unsigned long sig, struct k_sigaction *ka, |
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 95068bf569ad..201385c3a1ae 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c | |||
@@ -503,12 +503,6 @@ static int save_tm_user_regs(struct pt_regs *regs, | |||
503 | { | 503 | { |
504 | unsigned long msr = regs->msr; | 504 | unsigned long msr = regs->msr; |
505 | 505 | ||
506 | /* tm_reclaim rolls back all reg states, updating thread.ckpt_regs, | ||
507 | * thread.transact_fpr[], thread.transact_vr[], etc. | ||
508 | */ | ||
509 | tm_enable(); | ||
510 | tm_reclaim(¤t->thread, msr, TM_CAUSE_SIGNAL); | ||
511 | |||
512 | /* Make sure floating point registers are stored in regs */ | 506 | /* Make sure floating point registers are stored in regs */ |
513 | flush_fp_to_thread(current); | 507 | flush_fp_to_thread(current); |
514 | 508 | ||
@@ -965,7 +959,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka, | |||
965 | 959 | ||
966 | /* Set up Signal Frame */ | 960 | /* Set up Signal Frame */ |
967 | /* Put a Real Time Context onto stack */ | 961 | /* Put a Real Time Context onto stack */ |
968 | rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf), 1); | 962 | rt_sf = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*rt_sf), 1); |
969 | addr = rt_sf; | 963 | addr = rt_sf; |
970 | if (unlikely(rt_sf == NULL)) | 964 | if (unlikely(rt_sf == NULL)) |
971 | goto badframe; | 965 | goto badframe; |
@@ -1403,7 +1397,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka, | |||
1403 | unsigned long tramp; | 1397 | unsigned long tramp; |
1404 | 1398 | ||
1405 | /* Set up Signal Frame */ | 1399 | /* Set up Signal Frame */ |
1406 | frame = get_sigframe(ka, regs, sizeof(*frame), 1); | 1400 | frame = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*frame), 1); |
1407 | if (unlikely(frame == NULL)) | 1401 | if (unlikely(frame == NULL)) |
1408 | goto badframe; | 1402 | goto badframe; |
1409 | sc = (struct sigcontext __user *) &frame->sctx; | 1403 | sc = (struct sigcontext __user *) &frame->sctx; |
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index c1794286098c..345947367ec0 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c | |||
@@ -154,11 +154,12 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, | |||
154 | * As above, but Transactional Memory is in use, so deliver sigcontexts | 154 | * As above, but Transactional Memory is in use, so deliver sigcontexts |
155 | * containing checkpointed and transactional register states. | 155 | * containing checkpointed and transactional register states. |
156 | * | 156 | * |
157 | * To do this, we treclaim to gather both sets of registers and set up the | 157 | * To do this, we treclaim (done before entering here) to gather both sets of |
158 | * 'normal' sigcontext registers with rolled-back register values such that a | 158 | * registers and set up the 'normal' sigcontext registers with rolled-back |
159 | * simple signal handler sees a correct checkpointed register state. | 159 | * register values such that a simple signal handler sees a correct |
160 | * If interested, a TM-aware sighandler can examine the transactional registers | 160 | * checkpointed register state. If interested, a TM-aware sighandler can |
161 | * in the 2nd sigcontext to determine the real origin of the signal. | 161 | * examine the transactional registers in the 2nd sigcontext to determine the |
162 | * real origin of the signal. | ||
162 | */ | 163 | */ |
163 | static long setup_tm_sigcontexts(struct sigcontext __user *sc, | 164 | static long setup_tm_sigcontexts(struct sigcontext __user *sc, |
164 | struct sigcontext __user *tm_sc, | 165 | struct sigcontext __user *tm_sc, |
@@ -184,16 +185,6 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc, | |||
184 | 185 | ||
185 | BUG_ON(!MSR_TM_ACTIVE(regs->msr)); | 186 | BUG_ON(!MSR_TM_ACTIVE(regs->msr)); |
186 | 187 | ||
187 | /* tm_reclaim rolls back all reg states, saving checkpointed (older) | ||
188 | * GPRs to thread.ckpt_regs and (if used) FPRs to (newer) | ||
189 | * thread.transact_fp and/or VRs to (newer) thread.transact_vr. | ||
190 | * THEN we save out FP/VRs, if necessary, to the checkpointed (older) | ||
191 | * thread.fr[]/vr[]s. The transactional (newer) GPRs are on the | ||
192 | * stack, in *regs. | ||
193 | */ | ||
194 | tm_enable(); | ||
195 | tm_reclaim(¤t->thread, msr, TM_CAUSE_SIGNAL); | ||
196 | |||
197 | flush_fp_to_thread(current); | 188 | flush_fp_to_thread(current); |
198 | 189 | ||
199 | #ifdef CONFIG_ALTIVEC | 190 | #ifdef CONFIG_ALTIVEC |
@@ -711,7 +702,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info, | |||
711 | unsigned long newsp = 0; | 702 | unsigned long newsp = 0; |
712 | long err = 0; | 703 | long err = 0; |
713 | 704 | ||
714 | frame = get_sigframe(ka, regs, sizeof(*frame), 0); | 705 | frame = get_sigframe(ka, get_tm_stackpointer(regs), sizeof(*frame), 0); |
715 | if (unlikely(frame == NULL)) | 706 | if (unlikely(frame == NULL)) |
716 | goto badframe; | 707 | goto badframe; |
717 | 708 | ||
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index a7a648f6b750..f18c79c324ef 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c | |||
@@ -53,6 +53,7 @@ | |||
53 | #ifdef CONFIG_PPC64 | 53 | #ifdef CONFIG_PPC64 |
54 | #include <asm/firmware.h> | 54 | #include <asm/firmware.h> |
55 | #include <asm/processor.h> | 55 | #include <asm/processor.h> |
56 | #include <asm/tm.h> | ||
56 | #endif | 57 | #endif |
57 | #include <asm/kexec.h> | 58 | #include <asm/kexec.h> |
58 | #include <asm/ppc-opcode.h> | 59 | #include <asm/ppc-opcode.h> |
@@ -932,6 +933,28 @@ static int emulate_isel(struct pt_regs *regs, u32 instword) | |||
932 | return 0; | 933 | return 0; |
933 | } | 934 | } |
934 | 935 | ||
936 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | ||
937 | static inline bool tm_abort_check(struct pt_regs *regs, int cause) | ||
938 | { | ||
939 | /* If we're emulating a load/store in an active transaction, we cannot | ||
940 | * emulate it as the kernel operates in transaction suspended context. | ||
941 | * We need to abort the transaction. This creates a persistent TM | ||
942 | * abort so tell the user what caused it with a new code. | ||
943 | */ | ||
944 | if (MSR_TM_TRANSACTIONAL(regs->msr)) { | ||
945 | tm_enable(); | ||
946 | tm_abort(cause); | ||
947 | return true; | ||
948 | } | ||
949 | return false; | ||
950 | } | ||
951 | #else | ||
952 | static inline bool tm_abort_check(struct pt_regs *regs, int reason) | ||
953 | { | ||
954 | return false; | ||
955 | } | ||
956 | #endif | ||
957 | |||
935 | static int emulate_instruction(struct pt_regs *regs) | 958 | static int emulate_instruction(struct pt_regs *regs) |
936 | { | 959 | { |
937 | u32 instword; | 960 | u32 instword; |
@@ -971,6 +994,9 @@ static int emulate_instruction(struct pt_regs *regs) | |||
971 | 994 | ||
972 | /* Emulate load/store string insn. */ | 995 | /* Emulate load/store string insn. */ |
973 | if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { | 996 | if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) { |
997 | if (tm_abort_check(regs, | ||
998 | TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT)) | ||
999 | return -EINVAL; | ||
974 | PPC_WARN_EMULATED(string, regs); | 1000 | PPC_WARN_EMULATED(string, regs); |
975 | return emulate_string_inst(regs, instword); | 1001 | return emulate_string_inst(regs, instword); |
976 | } | 1002 | } |
@@ -1148,6 +1174,9 @@ void alignment_exception(struct pt_regs *regs) | |||
1148 | if (!arch_irq_disabled_regs(regs)) | 1174 | if (!arch_irq_disabled_regs(regs)) |
1149 | local_irq_enable(); | 1175 | local_irq_enable(); |
1150 | 1176 | ||
1177 | if (tm_abort_check(regs, TM_CAUSE_ALIGNMENT | TM_CAUSE_PERSISTENT)) | ||
1178 | goto bail; | ||
1179 | |||
1151 | /* we don't implement logging of alignment exceptions */ | 1180 | /* we don't implement logging of alignment exceptions */ |
1152 | if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) | 1181 | if (!(current->thread.align_ctl & PR_UNALIGN_SIGBUS)) |
1153 | fixed = fix_alignment(regs); | 1182 | fixed = fix_alignment(regs); |
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 9de24f8e03c7..550f5928b394 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c | |||
@@ -562,6 +562,8 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) | |||
562 | case H_CPPR: | 562 | case H_CPPR: |
563 | case H_EOI: | 563 | case H_EOI: |
564 | case H_IPI: | 564 | case H_IPI: |
565 | case H_IPOLL: | ||
566 | case H_XIRR_X: | ||
565 | if (kvmppc_xics_enabled(vcpu)) { | 567 | if (kvmppc_xics_enabled(vcpu)) { |
566 | ret = kvmppc_xics_hcall(vcpu, req); | 568 | ret = kvmppc_xics_hcall(vcpu, req); |
567 | break; | 569 | break; |
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c index b24309c6c2d5..da0e0bc268bd 100644 --- a/arch/powerpc/kvm/book3s_pr_papr.c +++ b/arch/powerpc/kvm/book3s_pr_papr.c | |||
@@ -257,6 +257,8 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) | |||
257 | case H_CPPR: | 257 | case H_CPPR: |
258 | case H_EOI: | 258 | case H_EOI: |
259 | case H_IPI: | 259 | case H_IPI: |
260 | case H_IPOLL: | ||
261 | case H_XIRR_X: | ||
260 | if (kvmppc_xics_enabled(vcpu)) | 262 | if (kvmppc_xics_enabled(vcpu)) |
261 | return kvmppc_h_pr_xics_hcall(vcpu, cmd); | 263 | return kvmppc_h_pr_xics_hcall(vcpu, cmd); |
262 | break; | 264 | break; |
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c index f7a103756618..94c1dd46b83d 100644 --- a/arch/powerpc/kvm/book3s_xics.c +++ b/arch/powerpc/kvm/book3s_xics.c | |||
@@ -650,6 +650,23 @@ static noinline int kvmppc_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, | |||
650 | return H_SUCCESS; | 650 | return H_SUCCESS; |
651 | } | 651 | } |
652 | 652 | ||
653 | static int kvmppc_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server) | ||
654 | { | ||
655 | union kvmppc_icp_state state; | ||
656 | struct kvmppc_icp *icp; | ||
657 | |||
658 | icp = vcpu->arch.icp; | ||
659 | if (icp->server_num != server) { | ||
660 | icp = kvmppc_xics_find_server(vcpu->kvm, server); | ||
661 | if (!icp) | ||
662 | return H_PARAMETER; | ||
663 | } | ||
664 | state = ACCESS_ONCE(icp->state); | ||
665 | kvmppc_set_gpr(vcpu, 4, ((u32)state.cppr << 24) | state.xisr); | ||
666 | kvmppc_set_gpr(vcpu, 5, state.mfrr); | ||
667 | return H_SUCCESS; | ||
668 | } | ||
669 | |||
653 | static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) | 670 | static noinline void kvmppc_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) |
654 | { | 671 | { |
655 | union kvmppc_icp_state old_state, new_state; | 672 | union kvmppc_icp_state old_state, new_state; |
@@ -787,6 +804,18 @@ int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 req) | |||
787 | if (!xics || !vcpu->arch.icp) | 804 | if (!xics || !vcpu->arch.icp) |
788 | return H_HARDWARE; | 805 | return H_HARDWARE; |
789 | 806 | ||
807 | /* These requests don't have real-mode implementations at present */ | ||
808 | switch (req) { | ||
809 | case H_XIRR_X: | ||
810 | res = kvmppc_h_xirr(vcpu); | ||
811 | kvmppc_set_gpr(vcpu, 4, res); | ||
812 | kvmppc_set_gpr(vcpu, 5, get_tb()); | ||
813 | return rc; | ||
814 | case H_IPOLL: | ||
815 | rc = kvmppc_h_ipoll(vcpu, kvmppc_get_gpr(vcpu, 4)); | ||
816 | return rc; | ||
817 | } | ||
818 | |||
790 | /* Check for real mode returning too hard */ | 819 | /* Check for real mode returning too hard */ |
791 | if (xics->real_mode) | 820 | if (xics->real_mode) |
792 | return kvmppc_xics_rm_complete(vcpu, req); | 821 | return kvmppc_xics_rm_complete(vcpu, req); |
diff --git a/arch/powerpc/lib/copypage_power7.S b/arch/powerpc/lib/copypage_power7.S index 0ef75bf0695c..395c594722a2 100644 --- a/arch/powerpc/lib/copypage_power7.S +++ b/arch/powerpc/lib/copypage_power7.S | |||
@@ -28,13 +28,14 @@ _GLOBAL(copypage_power7) | |||
28 | * aligned we don't need to clear the bottom 7 bits of either | 28 | * aligned we don't need to clear the bottom 7 bits of either |
29 | * address. | 29 | * address. |
30 | */ | 30 | */ |
31 | ori r9,r3,1 /* stream=1 */ | 31 | ori r9,r3,1 /* stream=1 => to */ |
32 | 32 | ||
33 | #ifdef CONFIG_PPC_64K_PAGES | 33 | #ifdef CONFIG_PPC_64K_PAGES |
34 | lis r7,0x0E01 /* depth=7, units=512 */ | 34 | lis r7,0x0E01 /* depth=7 |
35 | * units/cachelines=512 */ | ||
35 | #else | 36 | #else |
36 | lis r7,0x0E00 /* depth=7 */ | 37 | lis r7,0x0E00 /* depth=7 */ |
37 | ori r7,r7,0x1000 /* units=32 */ | 38 | ori r7,r7,0x1000 /* units/cachelines=32 */ |
38 | #endif | 39 | #endif |
39 | ori r10,r7,1 /* stream=1 */ | 40 | ori r10,r7,1 /* stream=1 */ |
40 | 41 | ||
@@ -43,12 +44,14 @@ _GLOBAL(copypage_power7) | |||
43 | 44 | ||
44 | .machine push | 45 | .machine push |
45 | .machine "power4" | 46 | .machine "power4" |
46 | dcbt r0,r4,0b01000 | 47 | /* setup read stream 0 */ |
47 | dcbt r0,r7,0b01010 | 48 | dcbt r0,r4,0b01000 /* addr from */ |
48 | dcbtst r0,r9,0b01000 | 49 | dcbt r0,r7,0b01010 /* length and depth from */ |
49 | dcbtst r0,r10,0b01010 | 50 | /* setup write stream 1 */ |
51 | dcbtst r0,r9,0b01000 /* addr to */ | ||
52 | dcbtst r0,r10,0b01010 /* length and depth to */ | ||
50 | eieio | 53 | eieio |
51 | dcbt r0,r8,0b01010 /* GO */ | 54 | dcbt r0,r8,0b01010 /* all streams GO */ |
52 | .machine pop | 55 | .machine pop |
53 | 56 | ||
54 | #ifdef CONFIG_ALTIVEC | 57 | #ifdef CONFIG_ALTIVEC |
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S index 0d24ff15f5f6..d1f11795a7ad 100644 --- a/arch/powerpc/lib/copyuser_power7.S +++ b/arch/powerpc/lib/copyuser_power7.S | |||
@@ -318,12 +318,14 @@ err1; stb r0,0(r3) | |||
318 | 318 | ||
319 | .machine push | 319 | .machine push |
320 | .machine "power4" | 320 | .machine "power4" |
321 | dcbt r0,r6,0b01000 | 321 | /* setup read stream 0 */ |
322 | dcbt r0,r7,0b01010 | 322 | dcbt r0,r6,0b01000 /* addr from */ |
323 | dcbtst r0,r9,0b01000 | 323 | dcbt r0,r7,0b01010 /* length and depth from */ |
324 | dcbtst r0,r10,0b01010 | 324 | /* setup write stream 1 */ |
325 | dcbtst r0,r9,0b01000 /* addr to */ | ||
326 | dcbtst r0,r10,0b01010 /* length and depth to */ | ||
325 | eieio | 327 | eieio |
326 | dcbt r0,r8,0b01010 /* GO */ | 328 | dcbt r0,r8,0b01010 /* all streams GO */ |
327 | .machine pop | 329 | .machine pop |
328 | 330 | ||
329 | beq cr1,.Lunwind_stack_nonvmx_copy | 331 | beq cr1,.Lunwind_stack_nonvmx_copy |
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 6a2aead5b0e5..4c122c3f1623 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c | |||
@@ -336,11 +336,18 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, | |||
336 | 336 | ||
337 | hpte_v = hptep->v; | 337 | hpte_v = hptep->v; |
338 | actual_psize = hpte_actual_psize(hptep, psize); | 338 | actual_psize = hpte_actual_psize(hptep, psize); |
339 | /* | ||
340 | * We need to invalidate the TLB always because hpte_remove doesn't do | ||
341 | * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less | ||
342 | * random entry from it. When we do that we don't invalidate the TLB | ||
343 | * (hpte_remove) because we assume the old translation is still | ||
344 | * technically "valid". | ||
345 | */ | ||
339 | if (actual_psize < 0) { | 346 | if (actual_psize < 0) { |
340 | native_unlock_hpte(hptep); | 347 | actual_psize = psize; |
341 | return -1; | 348 | ret = -1; |
349 | goto err_out; | ||
342 | } | 350 | } |
343 | /* Even if we miss, we need to invalidate the TLB */ | ||
344 | if (!HPTE_V_COMPARE(hpte_v, want_v)) { | 351 | if (!HPTE_V_COMPARE(hpte_v, want_v)) { |
345 | DBG_LOW(" -> miss\n"); | 352 | DBG_LOW(" -> miss\n"); |
346 | ret = -1; | 353 | ret = -1; |
@@ -350,6 +357,7 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, | |||
350 | hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) | | 357 | hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) | |
351 | (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C)); | 358 | (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C)); |
352 | } | 359 | } |
360 | err_out: | ||
353 | native_unlock_hpte(hptep); | 361 | native_unlock_hpte(hptep); |
354 | 362 | ||
355 | /* Ensure it is out of the tlb too. */ | 363 | /* Ensure it is out of the tlb too. */ |
@@ -409,7 +417,7 @@ static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, | |||
409 | hptep = htab_address + slot; | 417 | hptep = htab_address + slot; |
410 | actual_psize = hpte_actual_psize(hptep, psize); | 418 | actual_psize = hpte_actual_psize(hptep, psize); |
411 | if (actual_psize < 0) | 419 | if (actual_psize < 0) |
412 | return; | 420 | actual_psize = psize; |
413 | 421 | ||
414 | /* Update the HPTE */ | 422 | /* Update the HPTE */ |
415 | hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) | | 423 | hptep->r = (hptep->r & ~(HPTE_R_PP | HPTE_R_N)) | |
@@ -437,21 +445,27 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn, | |||
437 | hpte_v = hptep->v; | 445 | hpte_v = hptep->v; |
438 | 446 | ||
439 | actual_psize = hpte_actual_psize(hptep, psize); | 447 | actual_psize = hpte_actual_psize(hptep, psize); |
448 | /* | ||
449 | * We need to invalidate the TLB always because hpte_remove doesn't do | ||
450 | * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less | ||
451 | * random entry from it. When we do that we don't invalidate the TLB | ||
452 | * (hpte_remove) because we assume the old translation is still | ||
453 | * technically "valid". | ||
454 | */ | ||
440 | if (actual_psize < 0) { | 455 | if (actual_psize < 0) { |
456 | actual_psize = psize; | ||
441 | native_unlock_hpte(hptep); | 457 | native_unlock_hpte(hptep); |
442 | local_irq_restore(flags); | 458 | goto err_out; |
443 | return; | ||
444 | } | 459 | } |
445 | /* Even if we miss, we need to invalidate the TLB */ | ||
446 | if (!HPTE_V_COMPARE(hpte_v, want_v)) | 460 | if (!HPTE_V_COMPARE(hpte_v, want_v)) |
447 | native_unlock_hpte(hptep); | 461 | native_unlock_hpte(hptep); |
448 | else | 462 | else |
449 | /* Invalidate the hpte. NOTE: this also unlocks it */ | 463 | /* Invalidate the hpte. NOTE: this also unlocks it */ |
450 | hptep->v = 0; | 464 | hptep->v = 0; |
451 | 465 | ||
466 | err_out: | ||
452 | /* Invalidate the TLB */ | 467 | /* Invalidate the TLB */ |
453 | tlbie(vpn, psize, actual_psize, ssize, local); | 468 | tlbie(vpn, psize, actual_psize, ssize, local); |
454 | |||
455 | local_irq_restore(flags); | 469 | local_irq_restore(flags); |
456 | } | 470 | } |
457 | 471 | ||
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 426180b84978..845c867444e6 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c | |||
@@ -110,7 +110,7 @@ static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {} | |||
110 | 110 | ||
111 | static bool regs_use_siar(struct pt_regs *regs) | 111 | static bool regs_use_siar(struct pt_regs *regs) |
112 | { | 112 | { |
113 | return !!(regs->result & 1); | 113 | return !!regs->result; |
114 | } | 114 | } |
115 | 115 | ||
116 | /* | 116 | /* |
@@ -136,22 +136,30 @@ static inline unsigned long perf_ip_adjust(struct pt_regs *regs) | |||
136 | * If we're not doing instruction sampling, give them the SDAR | 136 | * If we're not doing instruction sampling, give them the SDAR |
137 | * (sampled data address). If we are doing instruction sampling, then | 137 | * (sampled data address). If we are doing instruction sampling, then |
138 | * only give them the SDAR if it corresponds to the instruction | 138 | * only give them the SDAR if it corresponds to the instruction |
139 | * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC or | 139 | * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC, the |
140 | * the [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA. | 140 | * [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA, or the SDAR_VALID bit in SIER. |
141 | */ | 141 | */ |
142 | static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) | 142 | static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) |
143 | { | 143 | { |
144 | unsigned long mmcra = regs->dsisr; | 144 | unsigned long mmcra = regs->dsisr; |
145 | unsigned long sdsync; | 145 | bool sdar_valid; |
146 | 146 | ||
147 | if (ppmu->flags & PPMU_SIAR_VALID) | 147 | if (ppmu->flags & PPMU_HAS_SIER) |
148 | sdsync = POWER7P_MMCRA_SDAR_VALID; | 148 | sdar_valid = regs->dar & SIER_SDAR_VALID; |
149 | else if (ppmu->flags & PPMU_ALT_SIPR) | 149 | else { |
150 | sdsync = POWER6_MMCRA_SDSYNC; | 150 | unsigned long sdsync; |
151 | else | 151 | |
152 | sdsync = MMCRA_SDSYNC; | 152 | if (ppmu->flags & PPMU_SIAR_VALID) |
153 | sdsync = POWER7P_MMCRA_SDAR_VALID; | ||
154 | else if (ppmu->flags & PPMU_ALT_SIPR) | ||
155 | sdsync = POWER6_MMCRA_SDSYNC; | ||
156 | else | ||
157 | sdsync = MMCRA_SDSYNC; | ||
158 | |||
159 | sdar_valid = mmcra & sdsync; | ||
160 | } | ||
153 | 161 | ||
154 | if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync)) | 162 | if (!(mmcra & MMCRA_SAMPLE_ENABLE) || sdar_valid) |
155 | *addrp = mfspr(SPRN_SDAR); | 163 | *addrp = mfspr(SPRN_SDAR); |
156 | } | 164 | } |
157 | 165 | ||
@@ -181,11 +189,6 @@ static bool regs_sipr(struct pt_regs *regs) | |||
181 | return !!(regs->dsisr & sipr); | 189 | return !!(regs->dsisr & sipr); |
182 | } | 190 | } |
183 | 191 | ||
184 | static bool regs_no_sipr(struct pt_regs *regs) | ||
185 | { | ||
186 | return !!(regs->result & 2); | ||
187 | } | ||
188 | |||
189 | static inline u32 perf_flags_from_msr(struct pt_regs *regs) | 192 | static inline u32 perf_flags_from_msr(struct pt_regs *regs) |
190 | { | 193 | { |
191 | if (regs->msr & MSR_PR) | 194 | if (regs->msr & MSR_PR) |
@@ -208,7 +211,7 @@ static inline u32 perf_get_misc_flags(struct pt_regs *regs) | |||
208 | * SIAR which should give slightly more reliable | 211 | * SIAR which should give slightly more reliable |
209 | * results | 212 | * results |
210 | */ | 213 | */ |
211 | if (regs_no_sipr(regs)) { | 214 | if (ppmu->flags & PPMU_NO_SIPR) { |
212 | unsigned long siar = mfspr(SPRN_SIAR); | 215 | unsigned long siar = mfspr(SPRN_SIAR); |
213 | if (siar >= PAGE_OFFSET) | 216 | if (siar >= PAGE_OFFSET) |
214 | return PERF_RECORD_MISC_KERNEL; | 217 | return PERF_RECORD_MISC_KERNEL; |
@@ -239,22 +242,9 @@ static inline void perf_read_regs(struct pt_regs *regs) | |||
239 | int use_siar; | 242 | int use_siar; |
240 | 243 | ||
241 | regs->dsisr = mmcra; | 244 | regs->dsisr = mmcra; |
242 | regs->result = 0; | ||
243 | |||
244 | if (ppmu->flags & PPMU_NO_SIPR) | ||
245 | regs->result |= 2; | ||
246 | |||
247 | /* | ||
248 | * On power8 if we're in random sampling mode, the SIER is updated. | ||
249 | * If we're in continuous sampling mode, we don't have SIPR. | ||
250 | */ | ||
251 | if (ppmu->flags & PPMU_HAS_SIER) { | ||
252 | if (marked) | ||
253 | regs->dar = mfspr(SPRN_SIER); | ||
254 | else | ||
255 | regs->result |= 2; | ||
256 | } | ||
257 | 245 | ||
246 | if (ppmu->flags & PPMU_HAS_SIER) | ||
247 | regs->dar = mfspr(SPRN_SIER); | ||
258 | 248 | ||
259 | /* | 249 | /* |
260 | * If this isn't a PMU exception (eg a software event) the SIAR is | 250 | * If this isn't a PMU exception (eg a software event) the SIAR is |
@@ -279,12 +269,12 @@ static inline void perf_read_regs(struct pt_regs *regs) | |||
279 | use_siar = 1; | 269 | use_siar = 1; |
280 | else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING)) | 270 | else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING)) |
281 | use_siar = 0; | 271 | use_siar = 0; |
282 | else if (!regs_no_sipr(regs) && regs_sipr(regs)) | 272 | else if (!(ppmu->flags & PPMU_NO_SIPR) && regs_sipr(regs)) |
283 | use_siar = 0; | 273 | use_siar = 0; |
284 | else | 274 | else |
285 | use_siar = 1; | 275 | use_siar = 1; |
286 | 276 | ||
287 | regs->result |= use_siar; | 277 | regs->result = use_siar; |
288 | } | 278 | } |
289 | 279 | ||
290 | /* | 280 | /* |
@@ -308,8 +298,13 @@ static inline int siar_valid(struct pt_regs *regs) | |||
308 | unsigned long mmcra = regs->dsisr; | 298 | unsigned long mmcra = regs->dsisr; |
309 | int marked = mmcra & MMCRA_SAMPLE_ENABLE; | 299 | int marked = mmcra & MMCRA_SAMPLE_ENABLE; |
310 | 300 | ||
311 | if ((ppmu->flags & PPMU_SIAR_VALID) && marked) | 301 | if (marked) { |
312 | return mmcra & POWER7P_MMCRA_SIAR_VALID; | 302 | if (ppmu->flags & PPMU_HAS_SIER) |
303 | return regs->dar & SIER_SIAR_VALID; | ||
304 | |||
305 | if (ppmu->flags & PPMU_SIAR_VALID) | ||
306 | return mmcra & POWER7P_MMCRA_SIAR_VALID; | ||
307 | } | ||
313 | 308 | ||
314 | return 1; | 309 | return 1; |
315 | } | 310 | } |
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index 023b288f895b..4459eff7a75a 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig | |||
@@ -19,6 +19,8 @@ config PPC_PSERIES | |||
19 | select ZLIB_DEFLATE | 19 | select ZLIB_DEFLATE |
20 | select PPC_DOORBELL | 20 | select PPC_DOORBELL |
21 | select HAVE_CONTEXT_TRACKING | 21 | select HAVE_CONTEXT_TRACKING |
22 | select HOTPLUG if SMP | ||
23 | select HOTPLUG_CPU if SMP | ||
22 | default y | 24 | default y |
23 | 25 | ||
24 | config PPC_SPLPAR | 26 | config PPC_SPLPAR |
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 0a13ecb270c7..3cc2f9159ab1 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c | |||
@@ -54,7 +54,7 @@ static DEFINE_RAW_SPINLOCK(mpic_lock); | |||
54 | 54 | ||
55 | #ifdef CONFIG_PPC32 /* XXX for now */ | 55 | #ifdef CONFIG_PPC32 /* XXX for now */ |
56 | #ifdef CONFIG_IRQ_ALL_CPUS | 56 | #ifdef CONFIG_IRQ_ALL_CPUS |
57 | #define distribute_irqs (!(mpic->flags & MPIC_SINGLE_DEST_CPU)) | 57 | #define distribute_irqs (1) |
58 | #else | 58 | #else |
59 | #define distribute_irqs (0) | 59 | #define distribute_irqs (0) |
60 | #endif | 60 | #endif |
@@ -1703,7 +1703,7 @@ void mpic_setup_this_cpu(void) | |||
1703 | * it differently, then we should make sure we also change the default | 1703 | * it differently, then we should make sure we also change the default |
1704 | * values of irq_desc[].affinity in irq.c. | 1704 | * values of irq_desc[].affinity in irq.c. |
1705 | */ | 1705 | */ |
1706 | if (distribute_irqs) { | 1706 | if (distribute_irqs && !(mpic->flags & MPIC_SINGLE_DEST_CPU)) { |
1707 | for (i = 0; i < mpic->num_sources ; i++) | 1707 | for (i = 0; i < mpic->num_sources ; i++) |
1708 | mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), | 1708 | mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), |
1709 | mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) | msk); | 1709 | mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)) | msk); |
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index b08ca7a9f76b..3f3f0416fbdd 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c | |||
@@ -2227,6 +2227,27 @@ static void srpt_close_ch(struct srpt_rdma_ch *ch) | |||
2227 | } | 2227 | } |
2228 | 2228 | ||
2229 | /** | 2229 | /** |
2230 | * srpt_shutdown_session() - Whether or not a session may be shut down. | ||
2231 | */ | ||
2232 | static int srpt_shutdown_session(struct se_session *se_sess) | ||
2233 | { | ||
2234 | struct srpt_rdma_ch *ch = se_sess->fabric_sess_ptr; | ||
2235 | unsigned long flags; | ||
2236 | |||
2237 | spin_lock_irqsave(&ch->spinlock, flags); | ||
2238 | if (ch->in_shutdown) { | ||
2239 | spin_unlock_irqrestore(&ch->spinlock, flags); | ||
2240 | return true; | ||
2241 | } | ||
2242 | |||
2243 | ch->in_shutdown = true; | ||
2244 | target_sess_cmd_list_set_waiting(se_sess); | ||
2245 | spin_unlock_irqrestore(&ch->spinlock, flags); | ||
2246 | |||
2247 | return true; | ||
2248 | } | ||
2249 | |||
2250 | /** | ||
2230 | * srpt_drain_channel() - Drain a channel by resetting the IB queue pair. | 2251 | * srpt_drain_channel() - Drain a channel by resetting the IB queue pair. |
2231 | * @cm_id: Pointer to the CM ID of the channel to be drained. | 2252 | * @cm_id: Pointer to the CM ID of the channel to be drained. |
2232 | * | 2253 | * |
@@ -2264,6 +2285,9 @@ static void srpt_drain_channel(struct ib_cm_id *cm_id) | |||
2264 | spin_unlock_irq(&sdev->spinlock); | 2285 | spin_unlock_irq(&sdev->spinlock); |
2265 | 2286 | ||
2266 | if (do_reset) { | 2287 | if (do_reset) { |
2288 | if (ch->sess) | ||
2289 | srpt_shutdown_session(ch->sess); | ||
2290 | |||
2267 | ret = srpt_ch_qp_err(ch); | 2291 | ret = srpt_ch_qp_err(ch); |
2268 | if (ret < 0) | 2292 | if (ret < 0) |
2269 | printk(KERN_ERR "Setting queue pair in error state" | 2293 | printk(KERN_ERR "Setting queue pair in error state" |
@@ -2328,7 +2352,7 @@ static void srpt_release_channel_work(struct work_struct *w) | |||
2328 | se_sess = ch->sess; | 2352 | se_sess = ch->sess; |
2329 | BUG_ON(!se_sess); | 2353 | BUG_ON(!se_sess); |
2330 | 2354 | ||
2331 | target_wait_for_sess_cmds(se_sess, 0); | 2355 | target_wait_for_sess_cmds(se_sess); |
2332 | 2356 | ||
2333 | transport_deregister_session_configfs(se_sess); | 2357 | transport_deregister_session_configfs(se_sess); |
2334 | transport_deregister_session(se_sess); | 2358 | transport_deregister_session(se_sess); |
@@ -3467,14 +3491,6 @@ static void srpt_release_cmd(struct se_cmd *se_cmd) | |||
3467 | } | 3491 | } |
3468 | 3492 | ||
3469 | /** | 3493 | /** |
3470 | * srpt_shutdown_session() - Whether or not a session may be shut down. | ||
3471 | */ | ||
3472 | static int srpt_shutdown_session(struct se_session *se_sess) | ||
3473 | { | ||
3474 | return true; | ||
3475 | } | ||
3476 | |||
3477 | /** | ||
3478 | * srpt_close_session() - Forcibly close a session. | 3494 | * srpt_close_session() - Forcibly close a session. |
3479 | * | 3495 | * |
3480 | * Callback function invoked by the TCM core to clean up sessions associated | 3496 | * Callback function invoked by the TCM core to clean up sessions associated |
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h index 4caf55cda7b1..3dae156905de 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.h +++ b/drivers/infiniband/ulp/srpt/ib_srpt.h | |||
@@ -325,6 +325,7 @@ struct srpt_rdma_ch { | |||
325 | u8 sess_name[36]; | 325 | u8 sess_name[36]; |
326 | struct work_struct release_work; | 326 | struct work_struct release_work; |
327 | struct completion *release_done; | 327 | struct completion *release_done; |
328 | bool in_shutdown; | ||
328 | }; | 329 | }; |
329 | 330 | ||
330 | /** | 331 | /** |
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index d182c96e17ea..7a3870f385f6 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c | |||
@@ -1370,7 +1370,7 @@ static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess) | |||
1370 | dump_stack(); | 1370 | dump_stack(); |
1371 | return; | 1371 | return; |
1372 | } | 1372 | } |
1373 | target_wait_for_sess_cmds(se_sess, 0); | 1373 | target_wait_for_sess_cmds(se_sess); |
1374 | 1374 | ||
1375 | transport_deregister_session_configfs(sess->se_sess); | 1375 | transport_deregister_session_configfs(sess->se_sess); |
1376 | transport_deregister_session(sess->se_sess); | 1376 | transport_deregister_session(sess->se_sess); |
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 262ef1f23b38..d7705e5824fb 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c | |||
@@ -651,7 +651,7 @@ static int iscsit_add_reject( | |||
651 | cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL); | 651 | cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL); |
652 | if (!cmd->buf_ptr) { | 652 | if (!cmd->buf_ptr) { |
653 | pr_err("Unable to allocate memory for cmd->buf_ptr\n"); | 653 | pr_err("Unable to allocate memory for cmd->buf_ptr\n"); |
654 | iscsit_release_cmd(cmd); | 654 | iscsit_free_cmd(cmd, false); |
655 | return -1; | 655 | return -1; |
656 | } | 656 | } |
657 | 657 | ||
@@ -697,7 +697,7 @@ int iscsit_add_reject_from_cmd( | |||
697 | cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL); | 697 | cmd->buf_ptr = kmemdup(buf, ISCSI_HDR_LEN, GFP_KERNEL); |
698 | if (!cmd->buf_ptr) { | 698 | if (!cmd->buf_ptr) { |
699 | pr_err("Unable to allocate memory for cmd->buf_ptr\n"); | 699 | pr_err("Unable to allocate memory for cmd->buf_ptr\n"); |
700 | iscsit_release_cmd(cmd); | 700 | iscsit_free_cmd(cmd, false); |
701 | return -1; | 701 | return -1; |
702 | } | 702 | } |
703 | 703 | ||
@@ -1743,7 +1743,7 @@ int iscsit_handle_nop_out(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
1743 | return 0; | 1743 | return 0; |
1744 | out: | 1744 | out: |
1745 | if (cmd) | 1745 | if (cmd) |
1746 | iscsit_release_cmd(cmd); | 1746 | iscsit_free_cmd(cmd, false); |
1747 | ping_out: | 1747 | ping_out: |
1748 | kfree(ping_data); | 1748 | kfree(ping_data); |
1749 | return ret; | 1749 | return ret; |
@@ -2251,7 +2251,7 @@ iscsit_handle_logout_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, | |||
2251 | if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) { | 2251 | if (conn->conn_state != TARG_CONN_STATE_LOGGED_IN) { |
2252 | pr_err("Received logout request on connection that" | 2252 | pr_err("Received logout request on connection that" |
2253 | " is not in logged in state, ignoring request.\n"); | 2253 | " is not in logged in state, ignoring request.\n"); |
2254 | iscsit_release_cmd(cmd); | 2254 | iscsit_free_cmd(cmd, false); |
2255 | return 0; | 2255 | return 0; |
2256 | } | 2256 | } |
2257 | 2257 | ||
@@ -3665,7 +3665,7 @@ iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state | |||
3665 | list_del(&cmd->i_conn_node); | 3665 | list_del(&cmd->i_conn_node); |
3666 | spin_unlock_bh(&conn->cmd_lock); | 3666 | spin_unlock_bh(&conn->cmd_lock); |
3667 | 3667 | ||
3668 | iscsit_free_cmd(cmd); | 3668 | iscsit_free_cmd(cmd, false); |
3669 | break; | 3669 | break; |
3670 | case ISTATE_SEND_NOPIN_WANT_RESPONSE: | 3670 | case ISTATE_SEND_NOPIN_WANT_RESPONSE: |
3671 | iscsit_mod_nopin_response_timer(conn); | 3671 | iscsit_mod_nopin_response_timer(conn); |
@@ -4122,7 +4122,7 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn) | |||
4122 | 4122 | ||
4123 | iscsit_increment_maxcmdsn(cmd, sess); | 4123 | iscsit_increment_maxcmdsn(cmd, sess); |
4124 | 4124 | ||
4125 | iscsit_free_cmd(cmd); | 4125 | iscsit_free_cmd(cmd, true); |
4126 | 4126 | ||
4127 | spin_lock_bh(&conn->cmd_lock); | 4127 | spin_lock_bh(&conn->cmd_lock); |
4128 | } | 4128 | } |
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c index ba6091bf93fc..45a5afd5ea13 100644 --- a/drivers/target/iscsi/iscsi_target_erl2.c +++ b/drivers/target/iscsi/iscsi_target_erl2.c | |||
@@ -143,7 +143,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess) | |||
143 | list_del(&cmd->i_conn_node); | 143 | list_del(&cmd->i_conn_node); |
144 | cmd->conn = NULL; | 144 | cmd->conn = NULL; |
145 | spin_unlock(&cr->conn_recovery_cmd_lock); | 145 | spin_unlock(&cr->conn_recovery_cmd_lock); |
146 | iscsit_free_cmd(cmd); | 146 | iscsit_free_cmd(cmd, true); |
147 | spin_lock(&cr->conn_recovery_cmd_lock); | 147 | spin_lock(&cr->conn_recovery_cmd_lock); |
148 | } | 148 | } |
149 | spin_unlock(&cr->conn_recovery_cmd_lock); | 149 | spin_unlock(&cr->conn_recovery_cmd_lock); |
@@ -165,7 +165,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess) | |||
165 | list_del(&cmd->i_conn_node); | 165 | list_del(&cmd->i_conn_node); |
166 | cmd->conn = NULL; | 166 | cmd->conn = NULL; |
167 | spin_unlock(&cr->conn_recovery_cmd_lock); | 167 | spin_unlock(&cr->conn_recovery_cmd_lock); |
168 | iscsit_free_cmd(cmd); | 168 | iscsit_free_cmd(cmd, true); |
169 | spin_lock(&cr->conn_recovery_cmd_lock); | 169 | spin_lock(&cr->conn_recovery_cmd_lock); |
170 | } | 170 | } |
171 | spin_unlock(&cr->conn_recovery_cmd_lock); | 171 | spin_unlock(&cr->conn_recovery_cmd_lock); |
@@ -248,7 +248,7 @@ void iscsit_discard_cr_cmds_by_expstatsn( | |||
248 | iscsit_remove_cmd_from_connection_recovery(cmd, sess); | 248 | iscsit_remove_cmd_from_connection_recovery(cmd, sess); |
249 | 249 | ||
250 | spin_unlock(&cr->conn_recovery_cmd_lock); | 250 | spin_unlock(&cr->conn_recovery_cmd_lock); |
251 | iscsit_free_cmd(cmd); | 251 | iscsit_free_cmd(cmd, true); |
252 | spin_lock(&cr->conn_recovery_cmd_lock); | 252 | spin_lock(&cr->conn_recovery_cmd_lock); |
253 | } | 253 | } |
254 | spin_unlock(&cr->conn_recovery_cmd_lock); | 254 | spin_unlock(&cr->conn_recovery_cmd_lock); |
@@ -302,7 +302,7 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn) | |||
302 | list_del(&cmd->i_conn_node); | 302 | list_del(&cmd->i_conn_node); |
303 | 303 | ||
304 | spin_unlock_bh(&conn->cmd_lock); | 304 | spin_unlock_bh(&conn->cmd_lock); |
305 | iscsit_free_cmd(cmd); | 305 | iscsit_free_cmd(cmd, true); |
306 | spin_lock_bh(&conn->cmd_lock); | 306 | spin_lock_bh(&conn->cmd_lock); |
307 | } | 307 | } |
308 | spin_unlock_bh(&conn->cmd_lock); | 308 | spin_unlock_bh(&conn->cmd_lock); |
@@ -355,7 +355,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) | |||
355 | 355 | ||
356 | list_del(&cmd->i_conn_node); | 356 | list_del(&cmd->i_conn_node); |
357 | spin_unlock_bh(&conn->cmd_lock); | 357 | spin_unlock_bh(&conn->cmd_lock); |
358 | iscsit_free_cmd(cmd); | 358 | iscsit_free_cmd(cmd, true); |
359 | spin_lock_bh(&conn->cmd_lock); | 359 | spin_lock_bh(&conn->cmd_lock); |
360 | continue; | 360 | continue; |
361 | } | 361 | } |
@@ -375,7 +375,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) | |||
375 | iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) { | 375 | iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) { |
376 | list_del(&cmd->i_conn_node); | 376 | list_del(&cmd->i_conn_node); |
377 | spin_unlock_bh(&conn->cmd_lock); | 377 | spin_unlock_bh(&conn->cmd_lock); |
378 | iscsit_free_cmd(cmd); | 378 | iscsit_free_cmd(cmd, true); |
379 | spin_lock_bh(&conn->cmd_lock); | 379 | spin_lock_bh(&conn->cmd_lock); |
380 | continue; | 380 | continue; |
381 | } | 381 | } |
diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index c2185fc31136..e38222191a33 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c | |||
@@ -758,9 +758,9 @@ static int iscsi_add_notunderstood_response( | |||
758 | } | 758 | } |
759 | INIT_LIST_HEAD(&extra_response->er_list); | 759 | INIT_LIST_HEAD(&extra_response->er_list); |
760 | 760 | ||
761 | strncpy(extra_response->key, key, strlen(key) + 1); | 761 | strlcpy(extra_response->key, key, sizeof(extra_response->key)); |
762 | strncpy(extra_response->value, NOTUNDERSTOOD, | 762 | strlcpy(extra_response->value, NOTUNDERSTOOD, |
763 | strlen(NOTUNDERSTOOD) + 1); | 763 | sizeof(extra_response->value)); |
764 | 764 | ||
765 | list_add_tail(&extra_response->er_list, | 765 | list_add_tail(&extra_response->er_list, |
766 | ¶m_list->extra_response_list); | 766 | ¶m_list->extra_response_list); |
@@ -1629,8 +1629,6 @@ int iscsi_decode_text_input( | |||
1629 | 1629 | ||
1630 | if (phase & PHASE_SECURITY) { | 1630 | if (phase & PHASE_SECURITY) { |
1631 | if (iscsi_check_for_auth_key(key) > 0) { | 1631 | if (iscsi_check_for_auth_key(key) > 0) { |
1632 | char *tmpptr = key + strlen(key); | ||
1633 | *tmpptr = '='; | ||
1634 | kfree(tmpbuf); | 1632 | kfree(tmpbuf); |
1635 | return 1; | 1633 | return 1; |
1636 | } | 1634 | } |
diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h index 915b06798505..a47046a752aa 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.h +++ b/drivers/target/iscsi/iscsi_target_parameters.h | |||
@@ -1,8 +1,10 @@ | |||
1 | #ifndef ISCSI_PARAMETERS_H | 1 | #ifndef ISCSI_PARAMETERS_H |
2 | #define ISCSI_PARAMETERS_H | 2 | #define ISCSI_PARAMETERS_H |
3 | 3 | ||
4 | #include <scsi/iscsi_proto.h> | ||
5 | |||
4 | struct iscsi_extra_response { | 6 | struct iscsi_extra_response { |
5 | char key[64]; | 7 | char key[KEY_MAXLEN]; |
6 | char value[32]; | 8 | char value[32]; |
7 | struct list_head er_list; | 9 | struct list_head er_list; |
8 | } ____cacheline_aligned; | 10 | } ____cacheline_aligned; |
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 2cc6c9a3ffb8..08a3bacef0c5 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c | |||
@@ -676,40 +676,56 @@ void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *conn) | |||
676 | 676 | ||
677 | void iscsit_release_cmd(struct iscsi_cmd *cmd) | 677 | void iscsit_release_cmd(struct iscsi_cmd *cmd) |
678 | { | 678 | { |
679 | struct iscsi_conn *conn = cmd->conn; | ||
680 | |||
681 | iscsit_free_r2ts_from_list(cmd); | ||
682 | iscsit_free_all_datain_reqs(cmd); | ||
683 | |||
684 | kfree(cmd->buf_ptr); | 679 | kfree(cmd->buf_ptr); |
685 | kfree(cmd->pdu_list); | 680 | kfree(cmd->pdu_list); |
686 | kfree(cmd->seq_list); | 681 | kfree(cmd->seq_list); |
687 | kfree(cmd->tmr_req); | 682 | kfree(cmd->tmr_req); |
688 | kfree(cmd->iov_data); | 683 | kfree(cmd->iov_data); |
689 | 684 | ||
690 | if (conn) { | 685 | kmem_cache_free(lio_cmd_cache, cmd); |
686 | } | ||
687 | |||
688 | static void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd, | ||
689 | bool check_queues) | ||
690 | { | ||
691 | struct iscsi_conn *conn = cmd->conn; | ||
692 | |||
693 | if (scsi_cmd) { | ||
694 | if (cmd->data_direction == DMA_TO_DEVICE) { | ||
695 | iscsit_stop_dataout_timer(cmd); | ||
696 | iscsit_free_r2ts_from_list(cmd); | ||
697 | } | ||
698 | if (cmd->data_direction == DMA_FROM_DEVICE) | ||
699 | iscsit_free_all_datain_reqs(cmd); | ||
700 | } | ||
701 | |||
702 | if (conn && check_queues) { | ||
691 | iscsit_remove_cmd_from_immediate_queue(cmd, conn); | 703 | iscsit_remove_cmd_from_immediate_queue(cmd, conn); |
692 | iscsit_remove_cmd_from_response_queue(cmd, conn); | 704 | iscsit_remove_cmd_from_response_queue(cmd, conn); |
693 | } | 705 | } |
694 | |||
695 | kmem_cache_free(lio_cmd_cache, cmd); | ||
696 | } | 706 | } |
697 | 707 | ||
698 | void iscsit_free_cmd(struct iscsi_cmd *cmd) | 708 | void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown) |
699 | { | 709 | { |
710 | struct se_cmd *se_cmd = NULL; | ||
711 | int rc; | ||
700 | /* | 712 | /* |
701 | * Determine if a struct se_cmd is associated with | 713 | * Determine if a struct se_cmd is associated with |
702 | * this struct iscsi_cmd. | 714 | * this struct iscsi_cmd. |
703 | */ | 715 | */ |
704 | switch (cmd->iscsi_opcode) { | 716 | switch (cmd->iscsi_opcode) { |
705 | case ISCSI_OP_SCSI_CMD: | 717 | case ISCSI_OP_SCSI_CMD: |
706 | if (cmd->data_direction == DMA_TO_DEVICE) | 718 | se_cmd = &cmd->se_cmd; |
707 | iscsit_stop_dataout_timer(cmd); | 719 | __iscsit_free_cmd(cmd, true, shutdown); |
708 | /* | 720 | /* |
709 | * Fallthrough | 721 | * Fallthrough |
710 | */ | 722 | */ |
711 | case ISCSI_OP_SCSI_TMFUNC: | 723 | case ISCSI_OP_SCSI_TMFUNC: |
712 | transport_generic_free_cmd(&cmd->se_cmd, 1); | 724 | rc = transport_generic_free_cmd(&cmd->se_cmd, 1); |
725 | if (!rc && shutdown && se_cmd && se_cmd->se_sess) { | ||
726 | __iscsit_free_cmd(cmd, true, shutdown); | ||
727 | target_put_sess_cmd(se_cmd->se_sess, se_cmd); | ||
728 | } | ||
713 | break; | 729 | break; |
714 | case ISCSI_OP_REJECT: | 730 | case ISCSI_OP_REJECT: |
715 | /* | 731 | /* |
@@ -718,11 +734,19 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd) | |||
718 | * associated cmd->se_cmd needs to be released. | 734 | * associated cmd->se_cmd needs to be released. |
719 | */ | 735 | */ |
720 | if (cmd->se_cmd.se_tfo != NULL) { | 736 | if (cmd->se_cmd.se_tfo != NULL) { |
721 | transport_generic_free_cmd(&cmd->se_cmd, 1); | 737 | se_cmd = &cmd->se_cmd; |
738 | __iscsit_free_cmd(cmd, true, shutdown); | ||
739 | |||
740 | rc = transport_generic_free_cmd(&cmd->se_cmd, 1); | ||
741 | if (!rc && shutdown && se_cmd->se_sess) { | ||
742 | __iscsit_free_cmd(cmd, true, shutdown); | ||
743 | target_put_sess_cmd(se_cmd->se_sess, se_cmd); | ||
744 | } | ||
722 | break; | 745 | break; |
723 | } | 746 | } |
724 | /* Fall-through */ | 747 | /* Fall-through */ |
725 | default: | 748 | default: |
749 | __iscsit_free_cmd(cmd, false, shutdown); | ||
726 | cmd->release_cmd(cmd); | 750 | cmd->release_cmd(cmd); |
727 | break; | 751 | break; |
728 | } | 752 | } |
diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h index 4f8e01a47081..a4422659d049 100644 --- a/drivers/target/iscsi/iscsi_target_util.h +++ b/drivers/target/iscsi/iscsi_target_util.h | |||
@@ -29,7 +29,7 @@ extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_co | |||
29 | extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *); | 29 | extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *); |
30 | extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *); | 30 | extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *); |
31 | extern void iscsit_release_cmd(struct iscsi_cmd *); | 31 | extern void iscsit_release_cmd(struct iscsi_cmd *); |
32 | extern void iscsit_free_cmd(struct iscsi_cmd *); | 32 | extern void iscsit_free_cmd(struct iscsi_cmd *, bool); |
33 | extern int iscsit_check_session_usage_count(struct iscsi_session *); | 33 | extern int iscsit_check_session_usage_count(struct iscsi_session *); |
34 | extern void iscsit_dec_session_usage_count(struct iscsi_session *); | 34 | extern void iscsit_dec_session_usage_count(struct iscsi_session *); |
35 | extern void iscsit_inc_session_usage_count(struct iscsi_session *); | 35 | extern void iscsit_inc_session_usage_count(struct iscsi_session *); |
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index 1b1d544e927a..b11890d85120 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c | |||
@@ -153,6 +153,7 @@ static int fd_configure_device(struct se_device *dev) | |||
153 | struct request_queue *q = bdev_get_queue(inode->i_bdev); | 153 | struct request_queue *q = bdev_get_queue(inode->i_bdev); |
154 | unsigned long long dev_size; | 154 | unsigned long long dev_size; |
155 | 155 | ||
156 | fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev); | ||
156 | /* | 157 | /* |
157 | * Determine the number of bytes from i_size_read() minus | 158 | * Determine the number of bytes from i_size_read() minus |
158 | * one (1) logical sector from underlying struct block_device | 159 | * one (1) logical sector from underlying struct block_device |
@@ -199,6 +200,7 @@ static int fd_configure_device(struct se_device *dev) | |||
199 | goto fail; | 200 | goto fail; |
200 | } | 201 | } |
201 | 202 | ||
203 | fd_dev->fd_block_size = FD_BLOCKSIZE; | ||
202 | /* | 204 | /* |
203 | * Limit UNMAP emulation to 8k Number of LBAs (NoLB) | 205 | * Limit UNMAP emulation to 8k Number of LBAs (NoLB) |
204 | */ | 206 | */ |
@@ -217,9 +219,7 @@ static int fd_configure_device(struct se_device *dev) | |||
217 | dev->dev_attrib.max_write_same_len = 0x1000; | 219 | dev->dev_attrib.max_write_same_len = 0x1000; |
218 | } | 220 | } |
219 | 221 | ||
220 | fd_dev->fd_block_size = dev->dev_attrib.hw_block_size; | 222 | dev->dev_attrib.hw_block_size = fd_dev->fd_block_size; |
221 | |||
222 | dev->dev_attrib.hw_block_size = FD_BLOCKSIZE; | ||
223 | dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS; | 223 | dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS; |
224 | dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; | 224 | dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH; |
225 | 225 | ||
@@ -694,11 +694,12 @@ static sector_t fd_get_blocks(struct se_device *dev) | |||
694 | * to handle underlying block_device resize operations. | 694 | * to handle underlying block_device resize operations. |
695 | */ | 695 | */ |
696 | if (S_ISBLK(i->i_mode)) | 696 | if (S_ISBLK(i->i_mode)) |
697 | dev_size = (i_size_read(i) - fd_dev->fd_block_size); | 697 | dev_size = i_size_read(i); |
698 | else | 698 | else |
699 | dev_size = fd_dev->fd_dev_size; | 699 | dev_size = fd_dev->fd_dev_size; |
700 | 700 | ||
701 | return div_u64(dev_size, dev->dev_attrib.block_size); | 701 | return div_u64(dev_size - dev->dev_attrib.block_size, |
702 | dev->dev_attrib.block_size); | ||
702 | } | 703 | } |
703 | 704 | ||
704 | static struct sbc_ops fd_sbc_ops = { | 705 | static struct sbc_ops fd_sbc_ops = { |
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 4a793362309d..21e315874a54 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c | |||
@@ -65,7 +65,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd); | |||
65 | static void transport_handle_queue_full(struct se_cmd *cmd, | 65 | static void transport_handle_queue_full(struct se_cmd *cmd, |
66 | struct se_device *dev); | 66 | struct se_device *dev); |
67 | static int transport_generic_get_mem(struct se_cmd *cmd); | 67 | static int transport_generic_get_mem(struct se_cmd *cmd); |
68 | static void transport_put_cmd(struct se_cmd *cmd); | 68 | static int transport_put_cmd(struct se_cmd *cmd); |
69 | static void target_complete_ok_work(struct work_struct *work); | 69 | static void target_complete_ok_work(struct work_struct *work); |
70 | 70 | ||
71 | int init_se_kmem_caches(void) | 71 | int init_se_kmem_caches(void) |
@@ -221,6 +221,7 @@ struct se_session *transport_init_session(void) | |||
221 | INIT_LIST_HEAD(&se_sess->sess_list); | 221 | INIT_LIST_HEAD(&se_sess->sess_list); |
222 | INIT_LIST_HEAD(&se_sess->sess_acl_list); | 222 | INIT_LIST_HEAD(&se_sess->sess_acl_list); |
223 | INIT_LIST_HEAD(&se_sess->sess_cmd_list); | 223 | INIT_LIST_HEAD(&se_sess->sess_cmd_list); |
224 | INIT_LIST_HEAD(&se_sess->sess_wait_list); | ||
224 | spin_lock_init(&se_sess->sess_cmd_lock); | 225 | spin_lock_init(&se_sess->sess_cmd_lock); |
225 | kref_init(&se_sess->sess_kref); | 226 | kref_init(&se_sess->sess_kref); |
226 | 227 | ||
@@ -1943,7 +1944,7 @@ static inline void transport_free_pages(struct se_cmd *cmd) | |||
1943 | * This routine unconditionally frees a command, and reference counting | 1944 | * This routine unconditionally frees a command, and reference counting |
1944 | * or list removal must be done in the caller. | 1945 | * or list removal must be done in the caller. |
1945 | */ | 1946 | */ |
1946 | static void transport_release_cmd(struct se_cmd *cmd) | 1947 | static int transport_release_cmd(struct se_cmd *cmd) |
1947 | { | 1948 | { |
1948 | BUG_ON(!cmd->se_tfo); | 1949 | BUG_ON(!cmd->se_tfo); |
1949 | 1950 | ||
@@ -1955,11 +1956,11 @@ static void transport_release_cmd(struct se_cmd *cmd) | |||
1955 | * If this cmd has been setup with target_get_sess_cmd(), drop | 1956 | * If this cmd has been setup with target_get_sess_cmd(), drop |
1956 | * the kref and call ->release_cmd() in kref callback. | 1957 | * the kref and call ->release_cmd() in kref callback. |
1957 | */ | 1958 | */ |
1958 | if (cmd->check_release != 0) { | 1959 | if (cmd->check_release != 0) |
1959 | target_put_sess_cmd(cmd->se_sess, cmd); | 1960 | return target_put_sess_cmd(cmd->se_sess, cmd); |
1960 | return; | 1961 | |
1961 | } | ||
1962 | cmd->se_tfo->release_cmd(cmd); | 1962 | cmd->se_tfo->release_cmd(cmd); |
1963 | return 1; | ||
1963 | } | 1964 | } |
1964 | 1965 | ||
1965 | /** | 1966 | /** |
@@ -1968,7 +1969,7 @@ static void transport_release_cmd(struct se_cmd *cmd) | |||
1968 | * | 1969 | * |
1969 | * This routine releases our reference to the command and frees it if possible. | 1970 | * This routine releases our reference to the command and frees it if possible. |
1970 | */ | 1971 | */ |
1971 | static void transport_put_cmd(struct se_cmd *cmd) | 1972 | static int transport_put_cmd(struct se_cmd *cmd) |
1972 | { | 1973 | { |
1973 | unsigned long flags; | 1974 | unsigned long flags; |
1974 | 1975 | ||
@@ -1976,7 +1977,7 @@ static void transport_put_cmd(struct se_cmd *cmd) | |||
1976 | if (atomic_read(&cmd->t_fe_count) && | 1977 | if (atomic_read(&cmd->t_fe_count) && |
1977 | !atomic_dec_and_test(&cmd->t_fe_count)) { | 1978 | !atomic_dec_and_test(&cmd->t_fe_count)) { |
1978 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 1979 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
1979 | return; | 1980 | return 0; |
1980 | } | 1981 | } |
1981 | 1982 | ||
1982 | if (cmd->transport_state & CMD_T_DEV_ACTIVE) { | 1983 | if (cmd->transport_state & CMD_T_DEV_ACTIVE) { |
@@ -1986,8 +1987,7 @@ static void transport_put_cmd(struct se_cmd *cmd) | |||
1986 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); | 1987 | spin_unlock_irqrestore(&cmd->t_state_lock, flags); |
1987 | 1988 | ||
1988 | transport_free_pages(cmd); | 1989 | transport_free_pages(cmd); |
1989 | transport_release_cmd(cmd); | 1990 | return transport_release_cmd(cmd); |
1990 | return; | ||
1991 | } | 1991 | } |
1992 | 1992 | ||
1993 | void *transport_kmap_data_sg(struct se_cmd *cmd) | 1993 | void *transport_kmap_data_sg(struct se_cmd *cmd) |
@@ -2152,13 +2152,15 @@ static void transport_write_pending_qf(struct se_cmd *cmd) | |||
2152 | } | 2152 | } |
2153 | } | 2153 | } |
2154 | 2154 | ||
2155 | void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) | 2155 | int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) |
2156 | { | 2156 | { |
2157 | int ret = 0; | ||
2158 | |||
2157 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { | 2159 | if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { |
2158 | if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) | 2160 | if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) |
2159 | transport_wait_for_tasks(cmd); | 2161 | transport_wait_for_tasks(cmd); |
2160 | 2162 | ||
2161 | transport_release_cmd(cmd); | 2163 | ret = transport_release_cmd(cmd); |
2162 | } else { | 2164 | } else { |
2163 | if (wait_for_tasks) | 2165 | if (wait_for_tasks) |
2164 | transport_wait_for_tasks(cmd); | 2166 | transport_wait_for_tasks(cmd); |
@@ -2166,8 +2168,9 @@ void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) | |||
2166 | if (cmd->se_lun) | 2168 | if (cmd->se_lun) |
2167 | transport_lun_remove_cmd(cmd); | 2169 | transport_lun_remove_cmd(cmd); |
2168 | 2170 | ||
2169 | transport_put_cmd(cmd); | 2171 | ret = transport_put_cmd(cmd); |
2170 | } | 2172 | } |
2173 | return ret; | ||
2171 | } | 2174 | } |
2172 | EXPORT_SYMBOL(transport_generic_free_cmd); | 2175 | EXPORT_SYMBOL(transport_generic_free_cmd); |
2173 | 2176 | ||
@@ -2250,11 +2253,14 @@ void target_sess_cmd_list_set_waiting(struct se_session *se_sess) | |||
2250 | unsigned long flags; | 2253 | unsigned long flags; |
2251 | 2254 | ||
2252 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); | 2255 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); |
2253 | 2256 | if (se_sess->sess_tearing_down) { | |
2254 | WARN_ON(se_sess->sess_tearing_down); | 2257 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); |
2258 | return; | ||
2259 | } | ||
2255 | se_sess->sess_tearing_down = 1; | 2260 | se_sess->sess_tearing_down = 1; |
2261 | list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list); | ||
2256 | 2262 | ||
2257 | list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) | 2263 | list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) |
2258 | se_cmd->cmd_wait_set = 1; | 2264 | se_cmd->cmd_wait_set = 1; |
2259 | 2265 | ||
2260 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | 2266 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); |
@@ -2263,44 +2269,32 @@ EXPORT_SYMBOL(target_sess_cmd_list_set_waiting); | |||
2263 | 2269 | ||
2264 | /* target_wait_for_sess_cmds - Wait for outstanding descriptors | 2270 | /* target_wait_for_sess_cmds - Wait for outstanding descriptors |
2265 | * @se_sess: session to wait for active I/O | 2271 | * @se_sess: session to wait for active I/O |
2266 | * @wait_for_tasks: Make extra transport_wait_for_tasks call | ||
2267 | */ | 2272 | */ |
2268 | void target_wait_for_sess_cmds( | 2273 | void target_wait_for_sess_cmds(struct se_session *se_sess) |
2269 | struct se_session *se_sess, | ||
2270 | int wait_for_tasks) | ||
2271 | { | 2274 | { |
2272 | struct se_cmd *se_cmd, *tmp_cmd; | 2275 | struct se_cmd *se_cmd, *tmp_cmd; |
2273 | bool rc = false; | 2276 | unsigned long flags; |
2274 | 2277 | ||
2275 | list_for_each_entry_safe(se_cmd, tmp_cmd, | 2278 | list_for_each_entry_safe(se_cmd, tmp_cmd, |
2276 | &se_sess->sess_cmd_list, se_cmd_list) { | 2279 | &se_sess->sess_wait_list, se_cmd_list) { |
2277 | list_del(&se_cmd->se_cmd_list); | 2280 | list_del(&se_cmd->se_cmd_list); |
2278 | 2281 | ||
2279 | pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" | 2282 | pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:" |
2280 | " %d\n", se_cmd, se_cmd->t_state, | 2283 | " %d\n", se_cmd, se_cmd->t_state, |
2281 | se_cmd->se_tfo->get_cmd_state(se_cmd)); | 2284 | se_cmd->se_tfo->get_cmd_state(se_cmd)); |
2282 | 2285 | ||
2283 | if (wait_for_tasks) { | 2286 | wait_for_completion(&se_cmd->cmd_wait_comp); |
2284 | pr_debug("Calling transport_wait_for_tasks se_cmd: %p t_state: %d," | 2287 | pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" |
2285 | " fabric state: %d\n", se_cmd, se_cmd->t_state, | 2288 | " fabric state: %d\n", se_cmd, se_cmd->t_state, |
2286 | se_cmd->se_tfo->get_cmd_state(se_cmd)); | 2289 | se_cmd->se_tfo->get_cmd_state(se_cmd)); |
2287 | |||
2288 | rc = transport_wait_for_tasks(se_cmd); | ||
2289 | |||
2290 | pr_debug("After transport_wait_for_tasks se_cmd: %p t_state: %d," | ||
2291 | " fabric state: %d\n", se_cmd, se_cmd->t_state, | ||
2292 | se_cmd->se_tfo->get_cmd_state(se_cmd)); | ||
2293 | } | ||
2294 | |||
2295 | if (!rc) { | ||
2296 | wait_for_completion(&se_cmd->cmd_wait_comp); | ||
2297 | pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d" | ||
2298 | " fabric state: %d\n", se_cmd, se_cmd->t_state, | ||
2299 | se_cmd->se_tfo->get_cmd_state(se_cmd)); | ||
2300 | } | ||
2301 | 2290 | ||
2302 | se_cmd->se_tfo->release_cmd(se_cmd); | 2291 | se_cmd->se_tfo->release_cmd(se_cmd); |
2303 | } | 2292 | } |
2293 | |||
2294 | spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); | ||
2295 | WARN_ON(!list_empty(&se_sess->sess_cmd_list)); | ||
2296 | spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); | ||
2297 | |||
2304 | } | 2298 | } |
2305 | EXPORT_SYMBOL(target_wait_for_sess_cmds); | 2299 | EXPORT_SYMBOL(target_wait_for_sess_cmds); |
2306 | 2300 | ||
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index e773dfa5f98f..4ea4f985f394 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h | |||
@@ -543,6 +543,7 @@ struct se_session { | |||
543 | struct list_head sess_list; | 543 | struct list_head sess_list; |
544 | struct list_head sess_acl_list; | 544 | struct list_head sess_acl_list; |
545 | struct list_head sess_cmd_list; | 545 | struct list_head sess_cmd_list; |
546 | struct list_head sess_wait_list; | ||
546 | spinlock_t sess_cmd_lock; | 547 | spinlock_t sess_cmd_lock; |
547 | struct kref sess_kref; | 548 | struct kref sess_kref; |
548 | }; | 549 | }; |
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h index ba3471b73c07..1dcce9cc99b9 100644 --- a/include/target/target_core_fabric.h +++ b/include/target/target_core_fabric.h | |||
@@ -114,7 +114,7 @@ sense_reason_t transport_generic_new_cmd(struct se_cmd *); | |||
114 | 114 | ||
115 | void target_execute_cmd(struct se_cmd *cmd); | 115 | void target_execute_cmd(struct se_cmd *cmd); |
116 | 116 | ||
117 | void transport_generic_free_cmd(struct se_cmd *, int); | 117 | int transport_generic_free_cmd(struct se_cmd *, int); |
118 | 118 | ||
119 | bool transport_wait_for_tasks(struct se_cmd *); | 119 | bool transport_wait_for_tasks(struct se_cmd *); |
120 | int transport_check_aborted_status(struct se_cmd *, int); | 120 | int transport_check_aborted_status(struct se_cmd *, int); |
@@ -123,7 +123,7 @@ int transport_send_check_condition_and_sense(struct se_cmd *, | |||
123 | int target_get_sess_cmd(struct se_session *, struct se_cmd *, bool); | 123 | int target_get_sess_cmd(struct se_session *, struct se_cmd *, bool); |
124 | int target_put_sess_cmd(struct se_session *, struct se_cmd *); | 124 | int target_put_sess_cmd(struct se_session *, struct se_cmd *); |
125 | void target_sess_cmd_list_set_waiting(struct se_session *); | 125 | void target_sess_cmd_list_set_waiting(struct se_session *); |
126 | void target_wait_for_sess_cmds(struct se_session *, int); | 126 | void target_wait_for_sess_cmds(struct se_session *); |
127 | 127 | ||
128 | int core_alua_check_nonop_delay(struct se_cmd *); | 128 | int core_alua_check_nonop_delay(struct se_cmd *); |
129 | 129 | ||