aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-s390/airq.h19
-rw-r--r--include/asm-s390/cio.h4
-rw-r--r--include/asm-s390/dasd.h2
-rw-r--r--include/asm-s390/ipl.h8
-rw-r--r--include/asm-s390/mmu_context.h27
-rw-r--r--include/asm-s390/pgtable.h46
-rw-r--r--include/asm-s390/processor.h4
-rw-r--r--include/asm-s390/ptrace.h8
-rw-r--r--include/asm-s390/qdio.h2
-rw-r--r--include/asm-s390/rwsem.h4
-rw-r--r--include/asm-s390/sclp.h20
-rw-r--r--include/asm-s390/smp.h5
-rw-r--r--include/asm-s390/spinlock.h32
-rw-r--r--include/asm-s390/spinlock_types.h1
-rw-r--r--include/asm-s390/tlbflush.h32
-rw-r--r--include/asm-s390/zcrypt.h2
16 files changed, 131 insertions, 85 deletions
diff --git a/include/asm-s390/airq.h b/include/asm-s390/airq.h
new file mode 100644
index 000000000000..41d028cb52a4
--- /dev/null
+++ b/include/asm-s390/airq.h
@@ -0,0 +1,19 @@
1/*
2 * include/asm-s390/airq.h
3 *
4 * Copyright IBM Corp. 2002,2007
5 * Author(s): Ingo Adlung <adlung@de.ibm.com>
6 * Cornelia Huck <cornelia.huck@de.ibm.com>
7 * Arnd Bergmann <arndb@de.ibm.com>
8 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
9 */
10
11#ifndef _ASM_S390_AIRQ_H
12#define _ASM_S390_AIRQ_H
13
14typedef void (*adapter_int_handler_t)(void *, void *);
15
16void *s390_register_adapter_interrupt(adapter_int_handler_t, void *);
17void s390_unregister_adapter_interrupt(void *);
18
19#endif /* _ASM_S390_AIRQ_H */
diff --git a/include/asm-s390/cio.h b/include/asm-s390/cio.h
index 2f08c16e44ad..123b557c3ff4 100644
--- a/include/asm-s390/cio.h
+++ b/include/asm-s390/cio.h
@@ -24,8 +24,8 @@
24 * @fmt: format 24 * @fmt: format
25 * @pfch: prefetch 25 * @pfch: prefetch
26 * @isic: initial-status interruption control 26 * @isic: initial-status interruption control
27 * @alcc: adress-limit checking control 27 * @alcc: address-limit checking control
28 * @ssi: supress-suspended interruption 28 * @ssi: suppress-suspended interruption
29 * @zcc: zero condition code 29 * @zcc: zero condition code
30 * @ectl: extended control 30 * @ectl: extended control
31 * @pno: path not operational 31 * @pno: path not operational
diff --git a/include/asm-s390/dasd.h b/include/asm-s390/dasd.h
index 604f68fa6f56..3f002e13d024 100644
--- a/include/asm-s390/dasd.h
+++ b/include/asm-s390/dasd.h
@@ -105,7 +105,7 @@ typedef struct dasd_information_t {
105} dasd_information_t; 105} dasd_information_t;
106 106
107/* 107/*
108 * Read Subsystem Data - Perfomance Statistics 108 * Read Subsystem Data - Performance Statistics
109 */ 109 */
110typedef struct dasd_rssd_perf_stats_t { 110typedef struct dasd_rssd_perf_stats_t {
111 unsigned char invalid:1; 111 unsigned char invalid:1;
diff --git a/include/asm-s390/ipl.h b/include/asm-s390/ipl.h
index 2c40fd3a137f..c1b2e50392bb 100644
--- a/include/asm-s390/ipl.h
+++ b/include/asm-s390/ipl.h
@@ -83,6 +83,8 @@ extern u32 dump_prefix_page;
83extern unsigned int zfcpdump_prefix_array[]; 83extern unsigned int zfcpdump_prefix_array[];
84 84
85extern void do_reipl(void); 85extern void do_reipl(void);
86extern void do_halt(void);
87extern void do_poff(void);
86extern void ipl_save_parameters(void); 88extern void ipl_save_parameters(void);
87 89
88enum { 90enum {
@@ -118,7 +120,7 @@ struct ipl_info
118}; 120};
119 121
120extern struct ipl_info ipl_info; 122extern struct ipl_info ipl_info;
121extern void setup_ipl_info(void); 123extern void setup_ipl(void);
122 124
123/* 125/*
124 * DIAG 308 support 126 * DIAG 308 support
@@ -141,6 +143,10 @@ enum diag308_opt {
141 DIAG308_IPL_OPT_DUMP = 0x20, 143 DIAG308_IPL_OPT_DUMP = 0x20,
142}; 144};
143 145
146enum diag308_flags {
147 DIAG308_FLAGS_LP_VALID = 0x80,
148};
149
144enum diag308_rc { 150enum diag308_rc {
145 DIAG308_RC_OK = 1, 151 DIAG308_RC_OK = 1,
146}; 152};
diff --git a/include/asm-s390/mmu_context.h b/include/asm-s390/mmu_context.h
index 05b842126b99..a77d4ba3c8eb 100644
--- a/include/asm-s390/mmu_context.h
+++ b/include/asm-s390/mmu_context.h
@@ -12,10 +12,15 @@
12#include <asm/pgalloc.h> 12#include <asm/pgalloc.h>
13#include <asm-generic/mm_hooks.h> 13#include <asm-generic/mm_hooks.h>
14 14
15/* 15static inline int init_new_context(struct task_struct *tsk,
16 * get a new mmu context.. S390 don't know about contexts. 16 struct mm_struct *mm)
17 */ 17{
18#define init_new_context(tsk,mm) 0 18 mm->context = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
19#ifdef CONFIG_64BIT
20 mm->context |= _ASCE_TYPE_REGION3;
21#endif
22 return 0;
23}
19 24
20#define destroy_context(mm) do { } while (0) 25#define destroy_context(mm) do { } while (0)
21 26
@@ -27,19 +32,11 @@
27 32
28static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk) 33static inline void update_mm(struct mm_struct *mm, struct task_struct *tsk)
29{ 34{
30 pgd_t *pgd = mm->pgd; 35 S390_lowcore.user_asce = mm->context | __pa(mm->pgd);
31 unsigned long asce_bits;
32
33 /* Calculate asce bits from the first pgd table entry. */
34 asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
35#ifdef CONFIG_64BIT
36 asce_bits |= _ASCE_TYPE_REGION3;
37#endif
38 S390_lowcore.user_asce = asce_bits | __pa(pgd);
39 if (switch_amode) { 36 if (switch_amode) {
40 /* Load primary space page table origin. */ 37 /* Load primary space page table origin. */
41 pgd_t *shadow_pgd = get_shadow_table(pgd) ? : pgd; 38 pgd_t *shadow_pgd = get_shadow_table(mm->pgd) ? : mm->pgd;
42 S390_lowcore.user_exec_asce = asce_bits | __pa(shadow_pgd); 39 S390_lowcore.user_exec_asce = mm->context | __pa(shadow_pgd);
43 asm volatile(LCTL_OPCODE" 1,1,%0\n" 40 asm volatile(LCTL_OPCODE" 1,1,%0\n"
44 : : "m" (S390_lowcore.user_exec_asce) ); 41 : : "m" (S390_lowcore.user_exec_asce) );
45 } else 42 } else
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 1f530f8a6280..79b9eab1a0c7 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -104,41 +104,27 @@ extern char empty_zero_page[PAGE_SIZE];
104 104
105#ifndef __ASSEMBLY__ 105#ifndef __ASSEMBLY__
106/* 106/*
107 * Just any arbitrary offset to the start of the vmalloc VM area: the 107 * The vmalloc area will always be on the topmost area of the kernel
108 * current 8MB value just means that there will be a 8MB "hole" after the 108 * mapping. We reserve 96MB (31bit) / 1GB (64bit) for vmalloc,
109 * physical memory until the kernel virtual memory starts. That means that 109 * which should be enough for any sane case.
110 * any out-of-bounds memory accesses will hopefully be caught. 110 * By putting vmalloc at the top, we maximise the gap between physical
111 * The vmalloc() routines leaves a hole of 4kB between each vmalloced 111 * memory and vmalloc to catch misplaced memory accesses. As a side
112 * area for the same reason. ;) 112 * effect, this also makes sure that 64 bit module code cannot be used
113 * vmalloc area starts at 4GB to prevent syscall table entry exchanging 113 * as system call address.
114 * from modules.
115 */
116extern unsigned long vmalloc_end;
117
118#ifdef CONFIG_64BIT
119#define VMALLOC_ADDR (max(0x100000000UL, (unsigned long) high_memory))
120#else
121#define VMALLOC_ADDR ((unsigned long) high_memory)
122#endif
123#define VMALLOC_OFFSET (8*1024*1024)
124#define VMALLOC_START ((VMALLOC_ADDR + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))
125#define VMALLOC_END vmalloc_end
126
127/*
128 * We need some free virtual space to be able to do vmalloc.
129 * VMALLOC_MIN_SIZE defines the minimum size of the vmalloc
130 * area. On a machine with 2GB memory we make sure that we
131 * have at least 128MB free space for vmalloc. On a machine
132 * with 4TB we make sure we have at least 128GB.
133 */ 114 */
134#ifndef __s390x__ 115#ifndef __s390x__
135#define VMALLOC_MIN_SIZE 0x8000000UL 116#define VMALLOC_START 0x78000000UL
136#define VMALLOC_END_INIT 0x80000000UL 117#define VMALLOC_END 0x7e000000UL
118#define VMEM_MAP_MAX 0x80000000UL
137#else /* __s390x__ */ 119#else /* __s390x__ */
138#define VMALLOC_MIN_SIZE 0x2000000000UL 120#define VMALLOC_START 0x3e000000000UL
139#define VMALLOC_END_INIT 0x40000000000UL 121#define VMALLOC_END 0x3e040000000UL
122#define VMEM_MAP_MAX 0x40000000000UL
140#endif /* __s390x__ */ 123#endif /* __s390x__ */
141 124
125#define VMEM_MAP ((struct page *) VMALLOC_END)
126#define VMEM_MAP_SIZE ((VMALLOC_START / PAGE_SIZE) * sizeof(struct page))
127
142/* 128/*
143 * A 31 bit pagetable entry of S390 has following format: 129 * A 31 bit pagetable entry of S390 has following format:
144 * | PFRA | | OS | 130 * | PFRA | | OS |
diff --git a/include/asm-s390/processor.h b/include/asm-s390/processor.h
index 21d40a19355e..c86b982aef5a 100644
--- a/include/asm-s390/processor.h
+++ b/include/asm-s390/processor.h
@@ -59,9 +59,6 @@ extern void s390_adjust_jiffies(void);
59extern void print_cpu_info(struct cpuinfo_S390 *); 59extern void print_cpu_info(struct cpuinfo_S390 *);
60extern int get_cpu_capability(unsigned int *); 60extern int get_cpu_capability(unsigned int *);
61 61
62/* Lazy FPU handling on uni-processor */
63extern struct task_struct *last_task_used_math;
64
65/* 62/*
66 * User space process size: 2GB for 31 bit, 4TB for 64 bit. 63 * User space process size: 2GB for 31 bit, 4TB for 64 bit.
67 */ 64 */
@@ -95,7 +92,6 @@ struct thread_struct {
95 unsigned long ksp; /* kernel stack pointer */ 92 unsigned long ksp; /* kernel stack pointer */
96 mm_segment_t mm_segment; 93 mm_segment_t mm_segment;
97 unsigned long prot_addr; /* address of protection-excep. */ 94 unsigned long prot_addr; /* address of protection-excep. */
98 unsigned int error_code; /* error-code of last prog-excep. */
99 unsigned int trap_no; 95 unsigned int trap_no;
100 per_struct per_info; 96 per_struct per_info;
101 /* Used to give failing instruction back to user for ieee exceptions */ 97 /* Used to give failing instruction back to user for ieee exceptions */
diff --git a/include/asm-s390/ptrace.h b/include/asm-s390/ptrace.h
index 332ee73688fc..61f6952f2e35 100644
--- a/include/asm-s390/ptrace.h
+++ b/include/asm-s390/ptrace.h
@@ -465,6 +465,14 @@ struct user_regs_struct
465#ifdef __KERNEL__ 465#ifdef __KERNEL__
466#define __ARCH_SYS_PTRACE 1 466#define __ARCH_SYS_PTRACE 1
467 467
468/*
469 * These are defined as per linux/ptrace.h, which see.
470 */
471#define arch_has_single_step() (1)
472struct task_struct;
473extern void user_enable_single_step(struct task_struct *);
474extern void user_disable_single_step(struct task_struct *);
475
468#define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0) 476#define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0)
469#define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN) 477#define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN)
470#define regs_return_value(regs)((regs)->gprs[2]) 478#define regs_return_value(regs)((regs)->gprs[2])
diff --git a/include/asm-s390/qdio.h b/include/asm-s390/qdio.h
index 74db1dc10a7d..4b8ff55f680e 100644
--- a/include/asm-s390/qdio.h
+++ b/include/asm-s390/qdio.h
@@ -184,7 +184,7 @@ struct qdr {
184#endif /* QDIO_32_BIT */ 184#endif /* QDIO_32_BIT */
185 unsigned long qiba; /* queue-information-block address */ 185 unsigned long qiba; /* queue-information-block address */
186 unsigned int res8; /* reserved */ 186 unsigned int res8; /* reserved */
187 unsigned int qkey : 4; /* queue-informatio-block key */ 187 unsigned int qkey : 4; /* queue-information-block key */
188 unsigned int res9 : 28; /* reserved */ 188 unsigned int res9 : 28; /* reserved */
189/* union _qd {*/ /* why this? */ 189/* union _qd {*/ /* why this? */
190 struct qdesfmt0 qdf0[126]; 190 struct qdesfmt0 qdf0[126];
diff --git a/include/asm-s390/rwsem.h b/include/asm-s390/rwsem.h
index 90f4eccaa290..9d2a17971805 100644
--- a/include/asm-s390/rwsem.h
+++ b/include/asm-s390/rwsem.h
@@ -91,8 +91,8 @@ struct rw_semaphore {
91#endif 91#endif
92 92
93#define __RWSEM_INITIALIZER(name) \ 93#define __RWSEM_INITIALIZER(name) \
94{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) \ 94 { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait.lock), \
95 __RWSEM_DEP_MAP_INIT(name) } 95 LIST_HEAD_INIT((name).wait_list) __RWSEM_DEP_MAP_INIT(name) }
96 96
97#define DECLARE_RWSEM(name) \ 97#define DECLARE_RWSEM(name) \
98 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 98 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
diff --git a/include/asm-s390/sclp.h b/include/asm-s390/sclp.h
index cb9faf1ea5cf..b5f2843013a3 100644
--- a/include/asm-s390/sclp.h
+++ b/include/asm-s390/sclp.h
@@ -27,7 +27,25 @@ struct sclp_ipl_info {
27 char loadparm[LOADPARM_LEN]; 27 char loadparm[LOADPARM_LEN];
28}; 28};
29 29
30void sclp_readinfo_early(void); 30struct sclp_cpu_entry {
31 u8 address;
32 u8 reserved0[13];
33 u8 type;
34 u8 reserved1;
35} __attribute__((packed));
36
37struct sclp_cpu_info {
38 unsigned int configured;
39 unsigned int standby;
40 unsigned int combined;
41 int has_cpu_type;
42 struct sclp_cpu_entry cpu[255];
43};
44
45int sclp_get_cpu_info(struct sclp_cpu_info *info);
46int sclp_cpu_configure(u8 cpu);
47int sclp_cpu_deconfigure(u8 cpu);
48void sclp_read_info_early(void);
31void sclp_facilities_detect(void); 49void sclp_facilities_detect(void);
32unsigned long long sclp_memory_detect(void); 50unsigned long long sclp_memory_detect(void);
33int sclp_sdias_blk_count(void); 51int sclp_sdias_blk_count(void);
diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h
index 07708c07701e..c7b74326a527 100644
--- a/include/asm-s390/smp.h
+++ b/include/asm-s390/smp.h
@@ -35,8 +35,6 @@ extern void machine_restart_smp(char *);
35extern void machine_halt_smp(void); 35extern void machine_halt_smp(void);
36extern void machine_power_off_smp(void); 36extern void machine_power_off_smp(void);
37 37
38extern void smp_setup_cpu_possible_map(void);
39
40#define NO_PROC_ID 0xFF /* No processor magic marker */ 38#define NO_PROC_ID 0xFF /* No processor magic marker */
41 39
42/* 40/*
@@ -92,6 +90,8 @@ extern void __cpu_die (unsigned int cpu);
92extern void cpu_die (void) __attribute__ ((noreturn)); 90extern void cpu_die (void) __attribute__ ((noreturn));
93extern int __cpu_up (unsigned int cpu); 91extern int __cpu_up (unsigned int cpu);
94 92
93extern int smp_call_function_mask(cpumask_t mask, void (*func)(void *),
94 void *info, int wait);
95#endif 95#endif
96 96
97#ifndef CONFIG_SMP 97#ifndef CONFIG_SMP
@@ -103,7 +103,6 @@ static inline void smp_send_stop(void)
103 103
104#define hard_smp_processor_id() 0 104#define hard_smp_processor_id() 0
105#define smp_cpu_not_running(cpu) 1 105#define smp_cpu_not_running(cpu) 1
106#define smp_setup_cpu_possible_map() do { } while (0)
107#endif 106#endif
108 107
109extern union save_area *zfcpdump_save_areas[NR_CPUS + 1]; 108extern union save_area *zfcpdump_save_areas[NR_CPUS + 1];
diff --git a/include/asm-s390/spinlock.h b/include/asm-s390/spinlock.h
index 3fd43826fd0b..df84ae96915f 100644
--- a/include/asm-s390/spinlock.h
+++ b/include/asm-s390/spinlock.h
@@ -53,44 +53,48 @@ _raw_compare_and_swap(volatile unsigned int *lock,
53 */ 53 */
54 54
55#define __raw_spin_is_locked(x) ((x)->owner_cpu != 0) 55#define __raw_spin_is_locked(x) ((x)->owner_cpu != 0)
56#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
57#define __raw_spin_unlock_wait(lock) \ 56#define __raw_spin_unlock_wait(lock) \
58 do { while (__raw_spin_is_locked(lock)) \ 57 do { while (__raw_spin_is_locked(lock)) \
59 _raw_spin_relax(lock); } while (0) 58 _raw_spin_relax(lock); } while (0)
60 59
61extern void _raw_spin_lock_wait(raw_spinlock_t *, unsigned int pc); 60extern void _raw_spin_lock_wait(raw_spinlock_t *);
62extern int _raw_spin_trylock_retry(raw_spinlock_t *, unsigned int pc); 61extern void _raw_spin_lock_wait_flags(raw_spinlock_t *, unsigned long flags);
62extern int _raw_spin_trylock_retry(raw_spinlock_t *);
63extern void _raw_spin_relax(raw_spinlock_t *lock); 63extern void _raw_spin_relax(raw_spinlock_t *lock);
64 64
65static inline void __raw_spin_lock(raw_spinlock_t *lp) 65static inline void __raw_spin_lock(raw_spinlock_t *lp)
66{ 66{
67 unsigned long pc = 1 | (unsigned long) __builtin_return_address(0);
68 int old; 67 int old;
69 68
70 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); 69 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
71 if (likely(old == 0)) { 70 if (likely(old == 0))
72 lp->owner_pc = pc;
73 return; 71 return;
74 } 72 _raw_spin_lock_wait(lp);
75 _raw_spin_lock_wait(lp, pc); 73}
74
75static inline void __raw_spin_lock_flags(raw_spinlock_t *lp,
76 unsigned long flags)
77{
78 int old;
79
80 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
81 if (likely(old == 0))
82 return;
83 _raw_spin_lock_wait_flags(lp, flags);
76} 84}
77 85
78static inline int __raw_spin_trylock(raw_spinlock_t *lp) 86static inline int __raw_spin_trylock(raw_spinlock_t *lp)
79{ 87{
80 unsigned long pc = 1 | (unsigned long) __builtin_return_address(0);
81 int old; 88 int old;
82 89
83 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); 90 old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id());
84 if (likely(old == 0)) { 91 if (likely(old == 0))
85 lp->owner_pc = pc;
86 return 1; 92 return 1;
87 } 93 return _raw_spin_trylock_retry(lp);
88 return _raw_spin_trylock_retry(lp, pc);
89} 94}
90 95
91static inline void __raw_spin_unlock(raw_spinlock_t *lp) 96static inline void __raw_spin_unlock(raw_spinlock_t *lp)
92{ 97{
93 lp->owner_pc = 0;
94 _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0); 98 _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0);
95} 99}
96 100
diff --git a/include/asm-s390/spinlock_types.h b/include/asm-s390/spinlock_types.h
index b7ac13f7aa37..654abc40de04 100644
--- a/include/asm-s390/spinlock_types.h
+++ b/include/asm-s390/spinlock_types.h
@@ -7,7 +7,6 @@
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned int owner_cpu; 9 volatile unsigned int owner_cpu;
10 volatile unsigned int owner_pc;
11} __attribute__ ((aligned (4))) raw_spinlock_t; 10} __attribute__ ((aligned (4))) raw_spinlock_t;
12 11
13#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
diff --git a/include/asm-s390/tlbflush.h b/include/asm-s390/tlbflush.h
index a69bd2490d52..70fa5ae58180 100644
--- a/include/asm-s390/tlbflush.h
+++ b/include/asm-s390/tlbflush.h
@@ -42,11 +42,11 @@ static inline void __tlb_flush_global(void)
42/* 42/*
43 * Flush all tlb entries of a page table on all cpus. 43 * Flush all tlb entries of a page table on all cpus.
44 */ 44 */
45static inline void __tlb_flush_idte(pgd_t *pgd) 45static inline void __tlb_flush_idte(unsigned long asce)
46{ 46{
47 asm volatile( 47 asm volatile(
48 " .insn rrf,0xb98e0000,0,%0,%1,0" 48 " .insn rrf,0xb98e0000,0,%0,%1,0"
49 : : "a" (2048), "a" (__pa(pgd) & PAGE_MASK) : "cc" ); 49 : : "a" (2048), "a" (asce) : "cc" );
50} 50}
51 51
52static inline void __tlb_flush_mm(struct mm_struct * mm) 52static inline void __tlb_flush_mm(struct mm_struct * mm)
@@ -61,11 +61,11 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
61 * only ran on the local cpu. 61 * only ran on the local cpu.
62 */ 62 */
63 if (MACHINE_HAS_IDTE) { 63 if (MACHINE_HAS_IDTE) {
64 pgd_t *shadow_pgd = get_shadow_table(mm->pgd); 64 pgd_t *shadow = get_shadow_table(mm->pgd);
65 65
66 if (shadow_pgd) 66 if (shadow)
67 __tlb_flush_idte(shadow_pgd); 67 __tlb_flush_idte((unsigned long) shadow | mm->context);
68 __tlb_flush_idte(mm->pgd); 68 __tlb_flush_idte((unsigned long) mm->pgd | mm->context);
69 return; 69 return;
70 } 70 }
71 preempt_disable(); 71 preempt_disable();
@@ -106,9 +106,23 @@ static inline void __tlb_flush_mm_cond(struct mm_struct * mm)
106 */ 106 */
107#define flush_tlb() do { } while (0) 107#define flush_tlb() do { } while (0)
108#define flush_tlb_all() do { } while (0) 108#define flush_tlb_all() do { } while (0)
109#define flush_tlb_mm(mm) __tlb_flush_mm_cond(mm)
110#define flush_tlb_page(vma, addr) do { } while (0) 109#define flush_tlb_page(vma, addr) do { } while (0)
111#define flush_tlb_range(vma, start, end) __tlb_flush_mm_cond(mm) 110
112#define flush_tlb_kernel_range(start, end) __tlb_flush_mm(&init_mm) 111static inline void flush_tlb_mm(struct mm_struct *mm)
112{
113 __tlb_flush_mm_cond(mm);
114}
115
116static inline void flush_tlb_range(struct vm_area_struct *vma,
117 unsigned long start, unsigned long end)
118{
119 __tlb_flush_mm_cond(vma->vm_mm);
120}
121
122static inline void flush_tlb_kernel_range(unsigned long start,
123 unsigned long end)
124{
125 __tlb_flush_mm(&init_mm);
126}
113 127
114#endif /* _S390_TLBFLUSH_H */ 128#endif /* _S390_TLBFLUSH_H */
diff --git a/include/asm-s390/zcrypt.h b/include/asm-s390/zcrypt.h
index a5dada617751..f228f1b86877 100644
--- a/include/asm-s390/zcrypt.h
+++ b/include/asm-s390/zcrypt.h
@@ -117,7 +117,7 @@ struct CPRBX {
117 unsigned char padx004[16 - sizeof (char *)]; 117 unsigned char padx004[16 - sizeof (char *)];
118 unsigned char * req_extb; /* request extension block 'addr'*/ 118 unsigned char * req_extb; /* request extension block 'addr'*/
119 unsigned char padx005[16 - sizeof (char *)]; 119 unsigned char padx005[16 - sizeof (char *)];
120 unsigned char * rpl_extb; /* reply extension block 'addres'*/ 120 unsigned char * rpl_extb; /* reply extension block 'address'*/
121 unsigned short ccp_rtcode; /* server return code */ 121 unsigned short ccp_rtcode; /* server return code */
122 unsigned short ccp_rscode; /* server reason code */ 122 unsigned short ccp_rscode; /* server reason code */
123 unsigned int mac_data_len; /* Mac Data Length */ 123 unsigned int mac_data_len; /* Mac Data Length */