aboutsummaryrefslogtreecommitdiffstats
path: root/arch/s390
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2015-03-03 21:16:48 -0500
committerDavid S. Miller <davem@davemloft.net>2015-03-03 21:16:48 -0500
commit71a83a6db6138b9d41d8a0b6b91cb59f6dc4742c (patch)
treef74b6e4e48257ec6ce40b95645ecb8533b9cc1f8 /arch/s390
parentb97526f3ff95f92b107f0fb52cbb8627e395429b (diff)
parenta6c5170d1edea97c538c81e377e56c7b5c5b7e63 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/rocker/rocker.c The rocker commit was two overlapping changes, one to rename the ->vport member to ->pport, and another making the bitmask expression use '1ULL' instead of plain '1'. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/s390')
-rw-r--r--arch/s390/hypfs/inode.c53
-rw-r--r--arch/s390/include/asm/pci_io.h1
-rw-r--r--arch/s390/include/asm/pgtable.h2
-rw-r--r--arch/s390/include/asm/topology.h24
-rw-r--r--arch/s390/kernel/cache.c25
-rw-r--r--arch/s390/kernel/early.c12
-rw-r--r--arch/s390/kernel/setup.c1
-rw-r--r--arch/s390/kernel/smp.c54
-rw-r--r--arch/s390/kernel/topology.c134
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S6
-rw-r--r--arch/s390/mm/mmap.c5
-rw-r--r--arch/s390/pci/pci.c34
12 files changed, 174 insertions, 177 deletions
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 4c8008dd938e..99824ff8dd35 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -74,7 +74,7 @@ static void hypfs_remove(struct dentry *dentry)
74 parent = dentry->d_parent; 74 parent = dentry->d_parent;
75 mutex_lock(&parent->d_inode->i_mutex); 75 mutex_lock(&parent->d_inode->i_mutex);
76 if (hypfs_positive(dentry)) { 76 if (hypfs_positive(dentry)) {
77 if (S_ISDIR(dentry->d_inode->i_mode)) 77 if (d_is_dir(dentry))
78 simple_rmdir(parent->d_inode, dentry); 78 simple_rmdir(parent->d_inode, dentry);
79 else 79 else
80 simple_unlink(parent->d_inode, dentry); 80 simple_unlink(parent->d_inode, dentry);
@@ -144,36 +144,32 @@ static int hypfs_open(struct inode *inode, struct file *filp)
144 return nonseekable_open(inode, filp); 144 return nonseekable_open(inode, filp);
145} 145}
146 146
147static ssize_t hypfs_aio_read(struct kiocb *iocb, const struct iovec *iov, 147static ssize_t hypfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
148 unsigned long nr_segs, loff_t offset)
149{ 148{
150 char *data; 149 struct file *file = iocb->ki_filp;
151 ssize_t ret; 150 char *data = file->private_data;
152 struct file *filp = iocb->ki_filp; 151 size_t available = strlen(data);
153 /* XXX: temporary */ 152 loff_t pos = iocb->ki_pos;
154 char __user *buf = iov[0].iov_base; 153 size_t count;
155 size_t count = iov[0].iov_len;
156
157 if (nr_segs != 1)
158 return -EINVAL;
159
160 data = filp->private_data;
161 ret = simple_read_from_buffer(buf, count, &offset, data, strlen(data));
162 if (ret <= 0)
163 return ret;
164 154
165 iocb->ki_pos += ret; 155 if (pos < 0)
166 file_accessed(filp); 156 return -EINVAL;
167 157 if (pos >= available || !iov_iter_count(to))
168 return ret; 158 return 0;
159 count = copy_to_iter(data + pos, available - pos, to);
160 if (!count)
161 return -EFAULT;
162 iocb->ki_pos = pos + count;
163 file_accessed(file);
164 return count;
169} 165}
170static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov, 166
171 unsigned long nr_segs, loff_t offset) 167static ssize_t hypfs_write_iter(struct kiocb *iocb, struct iov_iter *from)
172{ 168{
173 int rc; 169 int rc;
174 struct super_block *sb = file_inode(iocb->ki_filp)->i_sb; 170 struct super_block *sb = file_inode(iocb->ki_filp)->i_sb;
175 struct hypfs_sb_info *fs_info = sb->s_fs_info; 171 struct hypfs_sb_info *fs_info = sb->s_fs_info;
176 size_t count = iov_length(iov, nr_segs); 172 size_t count = iov_iter_count(from);
177 173
178 /* 174 /*
179 * Currently we only allow one update per second for two reasons: 175 * Currently we only allow one update per second for two reasons:
@@ -202,6 +198,7 @@ static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov,
202 } 198 }
203 hypfs_update_update(sb); 199 hypfs_update_update(sb);
204 rc = count; 200 rc = count;
201 iov_iter_advance(from, count);
205out: 202out:
206 mutex_unlock(&fs_info->lock); 203 mutex_unlock(&fs_info->lock);
207 return rc; 204 return rc;
@@ -440,10 +437,10 @@ struct dentry *hypfs_create_str(struct dentry *dir,
440static const struct file_operations hypfs_file_ops = { 437static const struct file_operations hypfs_file_ops = {
441 .open = hypfs_open, 438 .open = hypfs_open,
442 .release = hypfs_release, 439 .release = hypfs_release,
443 .read = do_sync_read, 440 .read = new_sync_read,
444 .write = do_sync_write, 441 .write = new_sync_write,
445 .aio_read = hypfs_aio_read, 442 .read_iter = hypfs_read_iter,
446 .aio_write = hypfs_aio_write, 443 .write_iter = hypfs_write_iter,
447 .llseek = no_llseek, 444 .llseek = no_llseek,
448}; 445};
449 446
diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h
index f664e96f48c7..1a9a98de5bde 100644
--- a/arch/s390/include/asm/pci_io.h
+++ b/arch/s390/include/asm/pci_io.h
@@ -16,6 +16,7 @@
16struct zpci_iomap_entry { 16struct zpci_iomap_entry {
17 u32 fh; 17 u32 fh;
18 u8 bar; 18 u8 bar;
19 u16 count;
19}; 20};
20 21
21extern struct zpci_iomap_entry *zpci_iomap_start; 22extern struct zpci_iomap_entry *zpci_iomap_start;
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index fbb5ee3ae57c..e08ec38f8c6e 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -91,7 +91,9 @@ extern unsigned long zero_page_mask;
91 */ 91 */
92#define PTRS_PER_PTE 256 92#define PTRS_PER_PTE 256
93#ifndef CONFIG_64BIT 93#ifndef CONFIG_64BIT
94#define __PAGETABLE_PUD_FOLDED
94#define PTRS_PER_PMD 1 95#define PTRS_PER_PMD 1
96#define __PAGETABLE_PMD_FOLDED
95#define PTRS_PER_PUD 1 97#define PTRS_PER_PUD 1
96#else /* CONFIG_64BIT */ 98#else /* CONFIG_64BIT */
97#define PTRS_PER_PMD 2048 99#define PTRS_PER_PMD 2048
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h
index c4fbb9527c5c..b1453a2ae1ca 100644
--- a/arch/s390/include/asm/topology.h
+++ b/arch/s390/include/asm/topology.h
@@ -18,15 +18,15 @@ struct cpu_topology_s390 {
18 cpumask_t book_mask; 18 cpumask_t book_mask;
19}; 19};
20 20
21extern struct cpu_topology_s390 cpu_topology[NR_CPUS]; 21DECLARE_PER_CPU(struct cpu_topology_s390, cpu_topology);
22 22
23#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id) 23#define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id)
24#define topology_thread_id(cpu) (cpu_topology[cpu].thread_id) 24#define topology_thread_id(cpu) (per_cpu(cpu_topology, cpu).thread_id)
25#define topology_thread_cpumask(cpu) (&cpu_topology[cpu].thread_mask) 25#define topology_thread_cpumask(cpu) (&per_cpu(cpu_topology, cpu).thread_mask)
26#define topology_core_id(cpu) (cpu_topology[cpu].core_id) 26#define topology_core_id(cpu) (per_cpu(cpu_topology, cpu).core_id)
27#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_mask) 27#define topology_core_cpumask(cpu) (&per_cpu(cpu_topology, cpu).core_mask)
28#define topology_book_id(cpu) (cpu_topology[cpu].book_id) 28#define topology_book_id(cpu) (per_cpu(cpu_topology, cpu).book_id)
29#define topology_book_cpumask(cpu) (&cpu_topology[cpu].book_mask) 29#define topology_book_cpumask(cpu) (&per_cpu(cpu_topology, cpu).book_mask)
30 30
31#define mc_capable() 1 31#define mc_capable() 1
32 32
@@ -51,14 +51,6 @@ static inline void topology_expect_change(void) { }
51#define POLARIZATION_VM (2) 51#define POLARIZATION_VM (2)
52#define POLARIZATION_VH (3) 52#define POLARIZATION_VH (3)
53 53
54#ifdef CONFIG_SCHED_BOOK
55void s390_init_cpu_topology(void);
56#else
57static inline void s390_init_cpu_topology(void)
58{
59};
60#endif
61
62#include <asm-generic/topology.h> 54#include <asm-generic/topology.h>
63 55
64#endif /* _ASM_S390_TOPOLOGY_H */ 56#endif /* _ASM_S390_TOPOLOGY_H */
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
index 632fa06ea162..0969d113b3d6 100644
--- a/arch/s390/kernel/cache.c
+++ b/arch/s390/kernel/cache.c
@@ -91,12 +91,9 @@ static inline enum cache_type get_cache_type(struct cache_info *ci, int level)
91{ 91{
92 if (level >= CACHE_MAX_LEVEL) 92 if (level >= CACHE_MAX_LEVEL)
93 return CACHE_TYPE_NOCACHE; 93 return CACHE_TYPE_NOCACHE;
94
95 ci += level; 94 ci += level;
96
97 if (ci->scope != CACHE_SCOPE_SHARED && ci->scope != CACHE_SCOPE_PRIVATE) 95 if (ci->scope != CACHE_SCOPE_SHARED && ci->scope != CACHE_SCOPE_PRIVATE)
98 return CACHE_TYPE_NOCACHE; 96 return CACHE_TYPE_NOCACHE;
99
100 return cache_type_map[ci->type]; 97 return cache_type_map[ci->type];
101} 98}
102 99
@@ -111,23 +108,19 @@ static inline unsigned long ecag(int ai, int li, int ti)
111} 108}
112 109
113static void ci_leaf_init(struct cacheinfo *this_leaf, int private, 110static void ci_leaf_init(struct cacheinfo *this_leaf, int private,
114 enum cache_type type, unsigned int level) 111 enum cache_type type, unsigned int level, int cpu)
115{ 112{
116 int ti, num_sets; 113 int ti, num_sets;
117 int cpu = smp_processor_id();
118 114
119 if (type == CACHE_TYPE_INST) 115 if (type == CACHE_TYPE_INST)
120 ti = CACHE_TI_INSTRUCTION; 116 ti = CACHE_TI_INSTRUCTION;
121 else 117 else
122 ti = CACHE_TI_UNIFIED; 118 ti = CACHE_TI_UNIFIED;
123
124 this_leaf->level = level + 1; 119 this_leaf->level = level + 1;
125 this_leaf->type = type; 120 this_leaf->type = type;
126 this_leaf->coherency_line_size = ecag(EXTRACT_LINE_SIZE, level, ti); 121 this_leaf->coherency_line_size = ecag(EXTRACT_LINE_SIZE, level, ti);
127 this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY, 122 this_leaf->ways_of_associativity = ecag(EXTRACT_ASSOCIATIVITY, level, ti);
128 level, ti);
129 this_leaf->size = ecag(EXTRACT_SIZE, level, ti); 123 this_leaf->size = ecag(EXTRACT_SIZE, level, ti);
130
131 num_sets = this_leaf->size / this_leaf->coherency_line_size; 124 num_sets = this_leaf->size / this_leaf->coherency_line_size;
132 num_sets /= this_leaf->ways_of_associativity; 125 num_sets /= this_leaf->ways_of_associativity;
133 this_leaf->number_of_sets = num_sets; 126 this_leaf->number_of_sets = num_sets;
@@ -145,7 +138,6 @@ int init_cache_level(unsigned int cpu)
145 138
146 if (!this_cpu_ci) 139 if (!this_cpu_ci)
147 return -EINVAL; 140 return -EINVAL;
148
149 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); 141 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
150 do { 142 do {
151 ctype = get_cache_type(&ct.ci[0], level); 143 ctype = get_cache_type(&ct.ci[0], level);
@@ -154,34 +146,31 @@ int init_cache_level(unsigned int cpu)
154 /* Separate instruction and data caches */ 146 /* Separate instruction and data caches */
155 leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1; 147 leaves += (ctype == CACHE_TYPE_SEPARATE) ? 2 : 1;
156 } while (++level < CACHE_MAX_LEVEL); 148 } while (++level < CACHE_MAX_LEVEL);
157
158 this_cpu_ci->num_levels = level; 149 this_cpu_ci->num_levels = level;
159 this_cpu_ci->num_leaves = leaves; 150 this_cpu_ci->num_leaves = leaves;
160
161 return 0; 151 return 0;
162} 152}
163 153
164int populate_cache_leaves(unsigned int cpu) 154int populate_cache_leaves(unsigned int cpu)
165{ 155{
156 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
157 struct cacheinfo *this_leaf = this_cpu_ci->info_list;
166 unsigned int level, idx, pvt; 158 unsigned int level, idx, pvt;
167 union cache_topology ct; 159 union cache_topology ct;
168 enum cache_type ctype; 160 enum cache_type ctype;
169 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
170 struct cacheinfo *this_leaf = this_cpu_ci->info_list;
171 161
172 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); 162 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
173 for (idx = 0, level = 0; level < this_cpu_ci->num_levels && 163 for (idx = 0, level = 0; level < this_cpu_ci->num_levels &&
174 idx < this_cpu_ci->num_leaves; idx++, level++) { 164 idx < this_cpu_ci->num_leaves; idx++, level++) {
175 if (!this_leaf) 165 if (!this_leaf)
176 return -EINVAL; 166 return -EINVAL;
177
178 pvt = (ct.ci[level].scope == CACHE_SCOPE_PRIVATE) ? 1 : 0; 167 pvt = (ct.ci[level].scope == CACHE_SCOPE_PRIVATE) ? 1 : 0;
179 ctype = get_cache_type(&ct.ci[0], level); 168 ctype = get_cache_type(&ct.ci[0], level);
180 if (ctype == CACHE_TYPE_SEPARATE) { 169 if (ctype == CACHE_TYPE_SEPARATE) {
181 ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level); 170 ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_DATA, level, cpu);
182 ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level); 171 ci_leaf_init(this_leaf++, pvt, CACHE_TYPE_INST, level, cpu);
183 } else { 172 } else {
184 ci_leaf_init(this_leaf++, pvt, ctype, level); 173 ci_leaf_init(this_leaf++, pvt, ctype, level, cpu);
185 } 174 }
186 } 175 }
187 return 0; 176 return 0;
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index 70a329450901..4427ab7ac23a 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -393,17 +393,19 @@ static __init void detect_machine_facilities(void)
393 S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC; 393 S390_lowcore.machine_flags |= MACHINE_FLAG_TLB_LC;
394 if (test_facility(129)) 394 if (test_facility(129))
395 S390_lowcore.machine_flags |= MACHINE_FLAG_VX; 395 S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
396 if (test_facility(128))
397 S390_lowcore.machine_flags |= MACHINE_FLAG_CAD;
398#endif 396#endif
399} 397}
400 398
401static int __init nocad_setup(char *str) 399static int __init cad_setup(char *str)
402{ 400{
403 S390_lowcore.machine_flags &= ~MACHINE_FLAG_CAD; 401 int val;
402
403 get_option(&str, &val);
404 if (val && test_facility(128))
405 S390_lowcore.machine_flags |= MACHINE_FLAG_CAD;
404 return 0; 406 return 0;
405} 407}
406early_param("nocad", nocad_setup); 408early_param("cad", cad_setup);
407 409
408static int __init cad_init(void) 410static int __init cad_init(void)
409{ 411{
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index bfac77ada4f2..a5ea8bc17cb3 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -909,7 +909,6 @@ void __init setup_arch(char **cmdline_p)
909 setup_lowcore(); 909 setup_lowcore();
910 smp_fill_possible_mask(); 910 smp_fill_possible_mask();
911 cpu_init(); 911 cpu_init();
912 s390_init_cpu_topology();
913 912
914 /* 913 /*
915 * Setup capabilities (ELF_HWCAP & ELF_PLATFORM). 914 * Setup capabilities (ELF_HWCAP & ELF_PLATFORM).
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index a668993ff577..db8f1115a3bf 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -59,14 +59,13 @@ enum {
59 CPU_STATE_CONFIGURED, 59 CPU_STATE_CONFIGURED,
60}; 60};
61 61
62static DEFINE_PER_CPU(struct cpu *, cpu_device);
63
62struct pcpu { 64struct pcpu {
63 struct cpu *cpu;
64 struct _lowcore *lowcore; /* lowcore page(s) for the cpu */ 65 struct _lowcore *lowcore; /* lowcore page(s) for the cpu */
65 unsigned long async_stack; /* async stack for the cpu */
66 unsigned long panic_stack; /* panic stack for the cpu */
67 unsigned long ec_mask; /* bit mask for ec_xxx functions */ 66 unsigned long ec_mask; /* bit mask for ec_xxx functions */
68 int state; /* physical cpu state */ 67 signed char state; /* physical cpu state */
69 int polarization; /* physical polarization */ 68 signed char polarization; /* physical polarization */
70 u16 address; /* physical cpu address */ 69 u16 address; /* physical cpu address */
71}; 70};
72 71
@@ -173,25 +172,30 @@ static void pcpu_ec_call(struct pcpu *pcpu, int ec_bit)
173 pcpu_sigp_retry(pcpu, order, 0); 172 pcpu_sigp_retry(pcpu, order, 0);
174} 173}
175 174
175#define ASYNC_FRAME_OFFSET (ASYNC_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
176#define PANIC_FRAME_OFFSET (PAGE_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE)
177
176static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu) 178static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
177{ 179{
180 unsigned long async_stack, panic_stack;
178 struct _lowcore *lc; 181 struct _lowcore *lc;
179 182
180 if (pcpu != &pcpu_devices[0]) { 183 if (pcpu != &pcpu_devices[0]) {
181 pcpu->lowcore = (struct _lowcore *) 184 pcpu->lowcore = (struct _lowcore *)
182 __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER); 185 __get_free_pages(GFP_KERNEL | GFP_DMA, LC_ORDER);
183 pcpu->async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER); 186 async_stack = __get_free_pages(GFP_KERNEL, ASYNC_ORDER);
184 pcpu->panic_stack = __get_free_page(GFP_KERNEL); 187 panic_stack = __get_free_page(GFP_KERNEL);
185 if (!pcpu->lowcore || !pcpu->panic_stack || !pcpu->async_stack) 188 if (!pcpu->lowcore || !panic_stack || !async_stack)
186 goto out; 189 goto out;
190 } else {
191 async_stack = pcpu->lowcore->async_stack - ASYNC_FRAME_OFFSET;
192 panic_stack = pcpu->lowcore->panic_stack - PANIC_FRAME_OFFSET;
187 } 193 }
188 lc = pcpu->lowcore; 194 lc = pcpu->lowcore;
189 memcpy(lc, &S390_lowcore, 512); 195 memcpy(lc, &S390_lowcore, 512);
190 memset((char *) lc + 512, 0, sizeof(*lc) - 512); 196 memset((char *) lc + 512, 0, sizeof(*lc) - 512);
191 lc->async_stack = pcpu->async_stack + ASYNC_SIZE 197 lc->async_stack = async_stack + ASYNC_FRAME_OFFSET;
192 - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs); 198 lc->panic_stack = panic_stack + PANIC_FRAME_OFFSET;
193 lc->panic_stack = pcpu->panic_stack + PAGE_SIZE
194 - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
195 lc->cpu_nr = cpu; 199 lc->cpu_nr = cpu;
196 lc->spinlock_lockval = arch_spin_lockval(cpu); 200 lc->spinlock_lockval = arch_spin_lockval(cpu);
197#ifndef CONFIG_64BIT 201#ifndef CONFIG_64BIT
@@ -212,8 +216,8 @@ static int pcpu_alloc_lowcore(struct pcpu *pcpu, int cpu)
212 return 0; 216 return 0;
213out: 217out:
214 if (pcpu != &pcpu_devices[0]) { 218 if (pcpu != &pcpu_devices[0]) {
215 free_page(pcpu->panic_stack); 219 free_page(panic_stack);
216 free_pages(pcpu->async_stack, ASYNC_ORDER); 220 free_pages(async_stack, ASYNC_ORDER);
217 free_pages((unsigned long) pcpu->lowcore, LC_ORDER); 221 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
218 } 222 }
219 return -ENOMEM; 223 return -ENOMEM;
@@ -235,11 +239,11 @@ static void pcpu_free_lowcore(struct pcpu *pcpu)
235#else 239#else
236 vdso_free_per_cpu(pcpu->lowcore); 240 vdso_free_per_cpu(pcpu->lowcore);
237#endif 241#endif
238 if (pcpu != &pcpu_devices[0]) { 242 if (pcpu == &pcpu_devices[0])
239 free_page(pcpu->panic_stack); 243 return;
240 free_pages(pcpu->async_stack, ASYNC_ORDER); 244 free_page(pcpu->lowcore->panic_stack-PANIC_FRAME_OFFSET);
241 free_pages((unsigned long) pcpu->lowcore, LC_ORDER); 245 free_pages(pcpu->lowcore->async_stack-ASYNC_FRAME_OFFSET, ASYNC_ORDER);
242 } 246 free_pages((unsigned long) pcpu->lowcore, LC_ORDER);
243} 247}
244 248
245#endif /* CONFIG_HOTPLUG_CPU */ 249#endif /* CONFIG_HOTPLUG_CPU */
@@ -366,7 +370,8 @@ void smp_call_online_cpu(void (*func)(void *), void *data)
366void smp_call_ipl_cpu(void (*func)(void *), void *data) 370void smp_call_ipl_cpu(void (*func)(void *), void *data)
367{ 371{
368 pcpu_delegate(&pcpu_devices[0], func, data, 372 pcpu_delegate(&pcpu_devices[0], func, data,
369 pcpu_devices->panic_stack + PAGE_SIZE); 373 pcpu_devices->lowcore->panic_stack -
374 PANIC_FRAME_OFFSET + PAGE_SIZE);
370} 375}
371 376
372int smp_find_processor_id(u16 address) 377int smp_find_processor_id(u16 address)
@@ -935,10 +940,6 @@ void __init smp_prepare_boot_cpu(void)
935 pcpu->state = CPU_STATE_CONFIGURED; 940 pcpu->state = CPU_STATE_CONFIGURED;
936 pcpu->address = stap(); 941 pcpu->address = stap();
937 pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix(); 942 pcpu->lowcore = (struct _lowcore *)(unsigned long) store_prefix();
938 pcpu->async_stack = S390_lowcore.async_stack - ASYNC_SIZE
939 + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
940 pcpu->panic_stack = S390_lowcore.panic_stack - PAGE_SIZE
941 + STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
942 S390_lowcore.percpu_offset = __per_cpu_offset[0]; 943 S390_lowcore.percpu_offset = __per_cpu_offset[0];
943 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN); 944 smp_cpu_set_polarization(0, POLARIZATION_UNKNOWN);
944 set_cpu_present(0, true); 945 set_cpu_present(0, true);
@@ -1078,8 +1079,7 @@ static int smp_cpu_notify(struct notifier_block *self, unsigned long action,
1078 void *hcpu) 1079 void *hcpu)
1079{ 1080{
1080 unsigned int cpu = (unsigned int)(long)hcpu; 1081 unsigned int cpu = (unsigned int)(long)hcpu;
1081 struct cpu *c = pcpu_devices[cpu].cpu; 1082 struct device *s = &per_cpu(cpu_device, cpu)->dev;
1082 struct device *s = &c->dev;
1083 int err = 0; 1083 int err = 0;
1084 1084
1085 switch (action & ~CPU_TASKS_FROZEN) { 1085 switch (action & ~CPU_TASKS_FROZEN) {
@@ -1102,7 +1102,7 @@ static int smp_add_present_cpu(int cpu)
1102 c = kzalloc(sizeof(*c), GFP_KERNEL); 1102 c = kzalloc(sizeof(*c), GFP_KERNEL);
1103 if (!c) 1103 if (!c)
1104 return -ENOMEM; 1104 return -ENOMEM;
1105 pcpu_devices[cpu].cpu = c; 1105 per_cpu(cpu_device, cpu) = c;
1106 s = &c->dev; 1106 s = &c->dev;
1107 c->hotpluggable = 1; 1107 c->hotpluggable = 1;
1108 rc = register_cpu(c, cpu); 1108 rc = register_cpu(c, cpu);
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c
index 24ee33f1af24..14da43b801d9 100644
--- a/arch/s390/kernel/topology.c
+++ b/arch/s390/kernel/topology.c
@@ -7,14 +7,14 @@
7#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 7#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
8 8
9#include <linux/workqueue.h> 9#include <linux/workqueue.h>
10#include <linux/bootmem.h>
11#include <linux/cpuset.h> 10#include <linux/cpuset.h>
12#include <linux/device.h> 11#include <linux/device.h>
13#include <linux/export.h> 12#include <linux/export.h>
14#include <linux/kernel.h> 13#include <linux/kernel.h>
15#include <linux/sched.h> 14#include <linux/sched.h>
16#include <linux/init.h>
17#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/init.h>
17#include <linux/slab.h>
18#include <linux/cpu.h> 18#include <linux/cpu.h>
19#include <linux/smp.h> 19#include <linux/smp.h>
20#include <linux/mm.h> 20#include <linux/mm.h>
@@ -42,8 +42,8 @@ static DEFINE_SPINLOCK(topology_lock);
42static struct mask_info socket_info; 42static struct mask_info socket_info;
43static struct mask_info book_info; 43static struct mask_info book_info;
44 44
45struct cpu_topology_s390 cpu_topology[NR_CPUS]; 45DEFINE_PER_CPU(struct cpu_topology_s390, cpu_topology);
46EXPORT_SYMBOL_GPL(cpu_topology); 46EXPORT_PER_CPU_SYMBOL_GPL(cpu_topology);
47 47
48static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) 48static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
49{ 49{
@@ -90,15 +90,15 @@ static struct mask_info *add_cpus_to_mask(struct topology_core *tl_core,
90 if (lcpu < 0) 90 if (lcpu < 0)
91 continue; 91 continue;
92 for (i = 0; i <= smp_cpu_mtid; i++) { 92 for (i = 0; i <= smp_cpu_mtid; i++) {
93 cpu_topology[lcpu + i].book_id = book->id; 93 per_cpu(cpu_topology, lcpu + i).book_id = book->id;
94 cpu_topology[lcpu + i].core_id = rcore; 94 per_cpu(cpu_topology, lcpu + i).core_id = rcore;
95 cpu_topology[lcpu + i].thread_id = lcpu + i; 95 per_cpu(cpu_topology, lcpu + i).thread_id = lcpu + i;
96 cpumask_set_cpu(lcpu + i, &book->mask); 96 cpumask_set_cpu(lcpu + i, &book->mask);
97 cpumask_set_cpu(lcpu + i, &socket->mask); 97 cpumask_set_cpu(lcpu + i, &socket->mask);
98 if (one_socket_per_cpu) 98 if (one_socket_per_cpu)
99 cpu_topology[lcpu + i].socket_id = rcore; 99 per_cpu(cpu_topology, lcpu + i).socket_id = rcore;
100 else 100 else
101 cpu_topology[lcpu + i].socket_id = socket->id; 101 per_cpu(cpu_topology, lcpu + i).socket_id = socket->id;
102 smp_cpu_set_polarization(lcpu + i, tl_core->pp); 102 smp_cpu_set_polarization(lcpu + i, tl_core->pp);
103 } 103 }
104 if (one_socket_per_cpu) 104 if (one_socket_per_cpu)
@@ -249,14 +249,14 @@ static void update_cpu_masks(void)
249 249
250 spin_lock_irqsave(&topology_lock, flags); 250 spin_lock_irqsave(&topology_lock, flags);
251 for_each_possible_cpu(cpu) { 251 for_each_possible_cpu(cpu) {
252 cpu_topology[cpu].thread_mask = cpu_thread_map(cpu); 252 per_cpu(cpu_topology, cpu).thread_mask = cpu_thread_map(cpu);
253 cpu_topology[cpu].core_mask = cpu_group_map(&socket_info, cpu); 253 per_cpu(cpu_topology, cpu).core_mask = cpu_group_map(&socket_info, cpu);
254 cpu_topology[cpu].book_mask = cpu_group_map(&book_info, cpu); 254 per_cpu(cpu_topology, cpu).book_mask = cpu_group_map(&book_info, cpu);
255 if (!MACHINE_HAS_TOPOLOGY) { 255 if (!MACHINE_HAS_TOPOLOGY) {
256 cpu_topology[cpu].thread_id = cpu; 256 per_cpu(cpu_topology, cpu).thread_id = cpu;
257 cpu_topology[cpu].core_id = cpu; 257 per_cpu(cpu_topology, cpu).core_id = cpu;
258 cpu_topology[cpu].socket_id = cpu; 258 per_cpu(cpu_topology, cpu).socket_id = cpu;
259 cpu_topology[cpu].book_id = cpu; 259 per_cpu(cpu_topology, cpu).book_id = cpu;
260 } 260 }
261 } 261 }
262 spin_unlock_irqrestore(&topology_lock, flags); 262 spin_unlock_irqrestore(&topology_lock, flags);
@@ -334,50 +334,6 @@ void topology_expect_change(void)
334 set_topology_timer(); 334 set_topology_timer();
335} 335}
336 336
337static int __init early_parse_topology(char *p)
338{
339 if (strncmp(p, "off", 3))
340 return 0;
341 topology_enabled = 0;
342 return 0;
343}
344early_param("topology", early_parse_topology);
345
346static void __init alloc_masks(struct sysinfo_15_1_x *info,
347 struct mask_info *mask, int offset)
348{
349 int i, nr_masks;
350
351 nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
352 for (i = 0; i < info->mnest - offset; i++)
353 nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
354 nr_masks = max(nr_masks, 1);
355 for (i = 0; i < nr_masks; i++) {
356 mask->next = alloc_bootmem_align(
357 roundup_pow_of_two(sizeof(struct mask_info)),
358 roundup_pow_of_two(sizeof(struct mask_info)));
359 mask = mask->next;
360 }
361}
362
363void __init s390_init_cpu_topology(void)
364{
365 struct sysinfo_15_1_x *info;
366 int i;
367
368 if (!MACHINE_HAS_TOPOLOGY)
369 return;
370 tl_info = alloc_bootmem_pages(PAGE_SIZE);
371 info = tl_info;
372 store_topology(info);
373 pr_info("The CPU configuration topology of the machine is:");
374 for (i = 0; i < TOPOLOGY_NR_MAG; i++)
375 printk(KERN_CONT " %d", info->mag[i]);
376 printk(KERN_CONT " / %d\n", info->mnest);
377 alloc_masks(info, &socket_info, 1);
378 alloc_masks(info, &book_info, 2);
379}
380
381static int cpu_management; 337static int cpu_management;
382 338
383static ssize_t dispatching_show(struct device *dev, 339static ssize_t dispatching_show(struct device *dev,
@@ -467,20 +423,29 @@ int topology_cpu_init(struct cpu *cpu)
467 423
468const struct cpumask *cpu_thread_mask(int cpu) 424const struct cpumask *cpu_thread_mask(int cpu)
469{ 425{
470 return &cpu_topology[cpu].thread_mask; 426 return &per_cpu(cpu_topology, cpu).thread_mask;
471} 427}
472 428
473 429
474const struct cpumask *cpu_coregroup_mask(int cpu) 430const struct cpumask *cpu_coregroup_mask(int cpu)
475{ 431{
476 return &cpu_topology[cpu].core_mask; 432 return &per_cpu(cpu_topology, cpu).core_mask;
477} 433}
478 434
479static const struct cpumask *cpu_book_mask(int cpu) 435static const struct cpumask *cpu_book_mask(int cpu)
480{ 436{
481 return &cpu_topology[cpu].book_mask; 437 return &per_cpu(cpu_topology, cpu).book_mask;
482} 438}
483 439
440static int __init early_parse_topology(char *p)
441{
442 if (strncmp(p, "off", 3))
443 return 0;
444 topology_enabled = 0;
445 return 0;
446}
447early_param("topology", early_parse_topology);
448
484static struct sched_domain_topology_level s390_topology[] = { 449static struct sched_domain_topology_level s390_topology[] = {
485 { cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, 450 { cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
486 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, 451 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
@@ -489,6 +454,42 @@ static struct sched_domain_topology_level s390_topology[] = {
489 { NULL, }, 454 { NULL, },
490}; 455};
491 456
457static void __init alloc_masks(struct sysinfo_15_1_x *info,
458 struct mask_info *mask, int offset)
459{
460 int i, nr_masks;
461
462 nr_masks = info->mag[TOPOLOGY_NR_MAG - offset];
463 for (i = 0; i < info->mnest - offset; i++)
464 nr_masks *= info->mag[TOPOLOGY_NR_MAG - offset - 1 - i];
465 nr_masks = max(nr_masks, 1);
466 for (i = 0; i < nr_masks; i++) {
467 mask->next = kzalloc(sizeof(*mask->next), GFP_KERNEL);
468 mask = mask->next;
469 }
470}
471
472static int __init s390_topology_init(void)
473{
474 struct sysinfo_15_1_x *info;
475 int i;
476
477 if (!MACHINE_HAS_TOPOLOGY)
478 return 0;
479 tl_info = (struct sysinfo_15_1_x *)__get_free_page(GFP_KERNEL);
480 info = tl_info;
481 store_topology(info);
482 pr_info("The CPU configuration topology of the machine is:");
483 for (i = 0; i < TOPOLOGY_NR_MAG; i++)
484 printk(KERN_CONT " %d", info->mag[i]);
485 printk(KERN_CONT " / %d\n", info->mnest);
486 alloc_masks(info, &socket_info, 1);
487 alloc_masks(info, &book_info, 2);
488 set_sched_topology(s390_topology);
489 return 0;
490}
491early_initcall(s390_topology_init);
492
492static int __init topology_init(void) 493static int __init topology_init(void)
493{ 494{
494 if (MACHINE_HAS_TOPOLOGY) 495 if (MACHINE_HAS_TOPOLOGY)
@@ -498,10 +499,3 @@ static int __init topology_init(void)
498 return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching); 499 return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching);
499} 500}
500device_initcall(topology_init); 501device_initcall(topology_init);
501
502static int __init early_topology_init(void)
503{
504 set_sched_topology(s390_topology);
505 return 0;
506}
507early_initcall(early_topology_init);
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index 7699e735ae28..61541fb93dc6 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -25,9 +25,7 @@ __kernel_clock_gettime:
25 je 4f 25 je 4f
26 cghi %r2,__CLOCK_REALTIME 26 cghi %r2,__CLOCK_REALTIME
27 je 5f 27 je 5f
28 cghi %r2,__CLOCK_THREAD_CPUTIME_ID 28 cghi %r2,-3 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
29 je 9f
30 cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
31 je 9f 29 je 9f
32 cghi %r2,__CLOCK_MONOTONIC_COARSE 30 cghi %r2,__CLOCK_MONOTONIC_COARSE
33 je 3f 31 je 3f
@@ -106,7 +104,7 @@ __kernel_clock_gettime:
106 aghi %r15,16 104 aghi %r15,16
107 br %r14 105 br %r14
108 106
109 /* CLOCK_THREAD_CPUTIME_ID for this thread */ 107 /* CPUCLOCK_VIRT for this thread */
1109: icm %r0,15,__VDSO_ECTG_OK(%r5) 1089: icm %r0,15,__VDSO_ECTG_OK(%r5)
111 jz 12f 109 jz 12f
112 ear %r2,%a4 110 ear %r2,%a4
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index d008f638b2cd..179a2c20b01f 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -183,7 +183,10 @@ unsigned long randomize_et_dyn(void)
183{ 183{
184 unsigned long base; 184 unsigned long base;
185 185
186 base = (STACK_TOP / 3 * 2) & (~mmap_align_mask << PAGE_SHIFT); 186 base = STACK_TOP / 3 * 2;
187 if (!is_32bit_task())
188 /* Align to 4GB */
189 base &= ~((1UL << 32) - 1);
187 return base + mmap_rnd(); 190 return base + mmap_rnd();
188} 191}
189 192
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 3290f11ae1d9..753a56731951 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -259,7 +259,10 @@ void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
259} 259}
260 260
261/* Create a virtual mapping cookie for a PCI BAR */ 261/* Create a virtual mapping cookie for a PCI BAR */
262void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max) 262void __iomem *pci_iomap_range(struct pci_dev *pdev,
263 int bar,
264 unsigned long offset,
265 unsigned long max)
263{ 266{
264 struct zpci_dev *zdev = get_zdev(pdev); 267 struct zpci_dev *zdev = get_zdev(pdev);
265 u64 addr; 268 u64 addr;
@@ -270,14 +273,27 @@ void __iomem *pci_iomap(struct pci_dev *pdev, int bar, unsigned long max)
270 273
271 idx = zdev->bars[bar].map_idx; 274 idx = zdev->bars[bar].map_idx;
272 spin_lock(&zpci_iomap_lock); 275 spin_lock(&zpci_iomap_lock);
273 zpci_iomap_start[idx].fh = zdev->fh; 276 if (zpci_iomap_start[idx].count++) {
274 zpci_iomap_start[idx].bar = bar; 277 BUG_ON(zpci_iomap_start[idx].fh != zdev->fh ||
278 zpci_iomap_start[idx].bar != bar);
279 } else {
280 zpci_iomap_start[idx].fh = zdev->fh;
281 zpci_iomap_start[idx].bar = bar;
282 }
283 /* Detect overrun */
284 BUG_ON(!zpci_iomap_start[idx].count);
275 spin_unlock(&zpci_iomap_lock); 285 spin_unlock(&zpci_iomap_lock);
276 286
277 addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48); 287 addr = ZPCI_IOMAP_ADDR_BASE | ((u64) idx << 48);
278 return (void __iomem *) addr; 288 return (void __iomem *) addr + offset;
279} 289}
280EXPORT_SYMBOL_GPL(pci_iomap); 290EXPORT_SYMBOL_GPL(pci_iomap_range);
291
292void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
293{
294 return pci_iomap_range(dev, bar, 0, maxlen);
295}
296EXPORT_SYMBOL(pci_iomap);
281 297
282void pci_iounmap(struct pci_dev *pdev, void __iomem *addr) 298void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
283{ 299{
@@ -285,8 +301,12 @@ void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
285 301
286 idx = (((__force u64) addr) & ~ZPCI_IOMAP_ADDR_BASE) >> 48; 302 idx = (((__force u64) addr) & ~ZPCI_IOMAP_ADDR_BASE) >> 48;
287 spin_lock(&zpci_iomap_lock); 303 spin_lock(&zpci_iomap_lock);
288 zpci_iomap_start[idx].fh = 0; 304 /* Detect underrun */
289 zpci_iomap_start[idx].bar = 0; 305 BUG_ON(!zpci_iomap_start[idx].count);
306 if (!--zpci_iomap_start[idx].count) {
307 zpci_iomap_start[idx].fh = 0;
308 zpci_iomap_start[idx].bar = 0;
309 }
290 spin_unlock(&zpci_iomap_lock); 310 spin_unlock(&zpci_iomap_lock);
291} 311}
292EXPORT_SYMBOL_GPL(pci_iounmap); 312EXPORT_SYMBOL_GPL(pci_iounmap);