diff options
Diffstat (limited to 'arch/s390/kernel/topology.c')
-rw-r--r-- | arch/s390/kernel/topology.c | 281 |
1 files changed, 190 insertions, 91 deletions
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index fdb5b8cb260f..7370a41948ca 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c | |||
@@ -1,22 +1,22 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright IBM Corp. 2007 | 2 | * Copyright IBM Corp. 2007,2011 |
3 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | 3 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> |
4 | */ | 4 | */ |
5 | 5 | ||
6 | #define KMSG_COMPONENT "cpu" | 6 | #define KMSG_COMPONENT "cpu" |
7 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | 7 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
8 | 8 | ||
9 | #include <linux/kernel.h> | 9 | #include <linux/workqueue.h> |
10 | #include <linux/mm.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/device.h> | ||
13 | #include <linux/bootmem.h> | 10 | #include <linux/bootmem.h> |
11 | #include <linux/cpuset.h> | ||
12 | #include <linux/device.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/sched.h> | 14 | #include <linux/sched.h> |
15 | #include <linux/workqueue.h> | 15 | #include <linux/init.h> |
16 | #include <linux/delay.h> | ||
16 | #include <linux/cpu.h> | 17 | #include <linux/cpu.h> |
17 | #include <linux/smp.h> | 18 | #include <linux/smp.h> |
18 | #include <linux/cpuset.h> | 19 | #include <linux/mm.h> |
19 | #include <asm/delay.h> | ||
20 | 20 | ||
21 | #define PTF_HORIZONTAL (0UL) | 21 | #define PTF_HORIZONTAL (0UL) |
22 | #define PTF_VERTICAL (1UL) | 22 | #define PTF_VERTICAL (1UL) |
@@ -31,7 +31,6 @@ struct mask_info { | |||
31 | static int topology_enabled = 1; | 31 | static int topology_enabled = 1; |
32 | static void topology_work_fn(struct work_struct *work); | 32 | static void topology_work_fn(struct work_struct *work); |
33 | static struct sysinfo_15_1_x *tl_info; | 33 | static struct sysinfo_15_1_x *tl_info; |
34 | static struct timer_list topology_timer; | ||
35 | static void set_topology_timer(void); | 34 | static void set_topology_timer(void); |
36 | static DECLARE_WORK(topology_work, topology_work_fn); | 35 | static DECLARE_WORK(topology_work, topology_work_fn); |
37 | /* topology_lock protects the core linked list */ | 36 | /* topology_lock protects the core linked list */ |
@@ -41,11 +40,12 @@ static struct mask_info core_info; | |||
41 | cpumask_t cpu_core_map[NR_CPUS]; | 40 | cpumask_t cpu_core_map[NR_CPUS]; |
42 | unsigned char cpu_core_id[NR_CPUS]; | 41 | unsigned char cpu_core_id[NR_CPUS]; |
43 | 42 | ||
44 | #ifdef CONFIG_SCHED_BOOK | ||
45 | static struct mask_info book_info; | 43 | static struct mask_info book_info; |
46 | cpumask_t cpu_book_map[NR_CPUS]; | 44 | cpumask_t cpu_book_map[NR_CPUS]; |
47 | unsigned char cpu_book_id[NR_CPUS]; | 45 | unsigned char cpu_book_id[NR_CPUS]; |
48 | #endif | 46 | |
47 | /* smp_cpu_state_mutex must be held when accessing this array */ | ||
48 | int cpu_polarization[NR_CPUS]; | ||
49 | 49 | ||
50 | static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) | 50 | static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) |
51 | { | 51 | { |
@@ -71,7 +71,7 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) | |||
71 | static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu, | 71 | static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu, |
72 | struct mask_info *book, | 72 | struct mask_info *book, |
73 | struct mask_info *core, | 73 | struct mask_info *core, |
74 | int z10) | 74 | int one_core_per_cpu) |
75 | { | 75 | { |
76 | unsigned int cpu; | 76 | unsigned int cpu; |
77 | 77 | ||
@@ -85,18 +85,16 @@ static struct mask_info *add_cpus_to_mask(struct topology_cpu *tl_cpu, | |||
85 | for_each_present_cpu(lcpu) { | 85 | for_each_present_cpu(lcpu) { |
86 | if (cpu_logical_map(lcpu) != rcpu) | 86 | if (cpu_logical_map(lcpu) != rcpu) |
87 | continue; | 87 | continue; |
88 | #ifdef CONFIG_SCHED_BOOK | ||
89 | cpumask_set_cpu(lcpu, &book->mask); | 88 | cpumask_set_cpu(lcpu, &book->mask); |
90 | cpu_book_id[lcpu] = book->id; | 89 | cpu_book_id[lcpu] = book->id; |
91 | #endif | ||
92 | cpumask_set_cpu(lcpu, &core->mask); | 90 | cpumask_set_cpu(lcpu, &core->mask); |
93 | if (z10) { | 91 | if (one_core_per_cpu) { |
94 | cpu_core_id[lcpu] = rcpu; | 92 | cpu_core_id[lcpu] = rcpu; |
95 | core = core->next; | 93 | core = core->next; |
96 | } else { | 94 | } else { |
97 | cpu_core_id[lcpu] = core->id; | 95 | cpu_core_id[lcpu] = core->id; |
98 | } | 96 | } |
99 | smp_cpu_polarization[lcpu] = tl_cpu->pp; | 97 | cpu_set_polarization(lcpu, tl_cpu->pp); |
100 | } | 98 | } |
101 | } | 99 | } |
102 | return core; | 100 | return core; |
@@ -111,13 +109,11 @@ static void clear_masks(void) | |||
111 | cpumask_clear(&info->mask); | 109 | cpumask_clear(&info->mask); |
112 | info = info->next; | 110 | info = info->next; |
113 | } | 111 | } |
114 | #ifdef CONFIG_SCHED_BOOK | ||
115 | info = &book_info; | 112 | info = &book_info; |
116 | while (info) { | 113 | while (info) { |
117 | cpumask_clear(&info->mask); | 114 | cpumask_clear(&info->mask); |
118 | info = info->next; | 115 | info = info->next; |
119 | } | 116 | } |
120 | #endif | ||
121 | } | 117 | } |
122 | 118 | ||
123 | static union topology_entry *next_tle(union topology_entry *tle) | 119 | static union topology_entry *next_tle(union topology_entry *tle) |
@@ -127,66 +123,75 @@ static union topology_entry *next_tle(union topology_entry *tle) | |||
127 | return (union topology_entry *)((struct topology_container *)tle + 1); | 123 | return (union topology_entry *)((struct topology_container *)tle + 1); |
128 | } | 124 | } |
129 | 125 | ||
130 | static void tl_to_cores(struct sysinfo_15_1_x *info) | 126 | static void __tl_to_cores_generic(struct sysinfo_15_1_x *info) |
131 | { | 127 | { |
132 | #ifdef CONFIG_SCHED_BOOK | ||
133 | struct mask_info *book = &book_info; | ||
134 | struct cpuid cpu_id; | ||
135 | #else | ||
136 | struct mask_info *book = NULL; | ||
137 | #endif | ||
138 | struct mask_info *core = &core_info; | 128 | struct mask_info *core = &core_info; |
129 | struct mask_info *book = &book_info; | ||
139 | union topology_entry *tle, *end; | 130 | union topology_entry *tle, *end; |
140 | int z10 = 0; | ||
141 | 131 | ||
142 | #ifdef CONFIG_SCHED_BOOK | ||
143 | get_cpu_id(&cpu_id); | ||
144 | z10 = cpu_id.machine == 0x2097 || cpu_id.machine == 0x2098; | ||
145 | #endif | ||
146 | spin_lock_irq(&topology_lock); | ||
147 | clear_masks(); | ||
148 | tle = info->tle; | 132 | tle = info->tle; |
149 | end = (union topology_entry *)((unsigned long)info + info->length); | 133 | end = (union topology_entry *)((unsigned long)info + info->length); |
150 | while (tle < end) { | 134 | while (tle < end) { |
151 | #ifdef CONFIG_SCHED_BOOK | ||
152 | if (z10) { | ||
153 | switch (tle->nl) { | ||
154 | case 1: | ||
155 | book = book->next; | ||
156 | book->id = tle->container.id; | ||
157 | break; | ||
158 | case 0: | ||
159 | core = add_cpus_to_mask(&tle->cpu, book, core, z10); | ||
160 | break; | ||
161 | default: | ||
162 | clear_masks(); | ||
163 | goto out; | ||
164 | } | ||
165 | tle = next_tle(tle); | ||
166 | continue; | ||
167 | } | ||
168 | #endif | ||
169 | switch (tle->nl) { | 135 | switch (tle->nl) { |
170 | #ifdef CONFIG_SCHED_BOOK | ||
171 | case 2: | 136 | case 2: |
172 | book = book->next; | 137 | book = book->next; |
173 | book->id = tle->container.id; | 138 | book->id = tle->container.id; |
174 | break; | 139 | break; |
175 | #endif | ||
176 | case 1: | 140 | case 1: |
177 | core = core->next; | 141 | core = core->next; |
178 | core->id = tle->container.id; | 142 | core->id = tle->container.id; |
179 | break; | 143 | break; |
180 | case 0: | 144 | case 0: |
181 | add_cpus_to_mask(&tle->cpu, book, core, z10); | 145 | add_cpus_to_mask(&tle->cpu, book, core, 0); |
182 | break; | 146 | break; |
183 | default: | 147 | default: |
184 | clear_masks(); | 148 | clear_masks(); |
185 | goto out; | 149 | return; |
186 | } | 150 | } |
187 | tle = next_tle(tle); | 151 | tle = next_tle(tle); |
188 | } | 152 | } |
189 | out: | 153 | } |
154 | |||
155 | static void __tl_to_cores_z10(struct sysinfo_15_1_x *info) | ||
156 | { | ||
157 | struct mask_info *core = &core_info; | ||
158 | struct mask_info *book = &book_info; | ||
159 | union topology_entry *tle, *end; | ||
160 | |||
161 | tle = info->tle; | ||
162 | end = (union topology_entry *)((unsigned long)info + info->length); | ||
163 | while (tle < end) { | ||
164 | switch (tle->nl) { | ||
165 | case 1: | ||
166 | book = book->next; | ||
167 | book->id = tle->container.id; | ||
168 | break; | ||
169 | case 0: | ||
170 | core = add_cpus_to_mask(&tle->cpu, book, core, 1); | ||
171 | break; | ||
172 | default: | ||
173 | clear_masks(); | ||
174 | return; | ||
175 | } | ||
176 | tle = next_tle(tle); | ||
177 | } | ||
178 | } | ||
179 | |||
180 | static void tl_to_cores(struct sysinfo_15_1_x *info) | ||
181 | { | ||
182 | struct cpuid cpu_id; | ||
183 | |||
184 | get_cpu_id(&cpu_id); | ||
185 | spin_lock_irq(&topology_lock); | ||
186 | clear_masks(); | ||
187 | switch (cpu_id.machine) { | ||
188 | case 0x2097: | ||
189 | case 0x2098: | ||
190 | __tl_to_cores_z10(info); | ||
191 | break; | ||
192 | default: | ||
193 | __tl_to_cores_generic(info); | ||
194 | } | ||
190 | spin_unlock_irq(&topology_lock); | 195 | spin_unlock_irq(&topology_lock); |
191 | } | 196 | } |
192 | 197 | ||
@@ -196,7 +201,7 @@ static void topology_update_polarization_simple(void) | |||
196 | 201 | ||
197 | mutex_lock(&smp_cpu_state_mutex); | 202 | mutex_lock(&smp_cpu_state_mutex); |
198 | for_each_possible_cpu(cpu) | 203 | for_each_possible_cpu(cpu) |
199 | smp_cpu_polarization[cpu] = POLARIZATION_HRZ; | 204 | cpu_set_polarization(cpu, POLARIZATION_HRZ); |
200 | mutex_unlock(&smp_cpu_state_mutex); | 205 | mutex_unlock(&smp_cpu_state_mutex); |
201 | } | 206 | } |
202 | 207 | ||
@@ -215,8 +220,7 @@ static int ptf(unsigned long fc) | |||
215 | 220 | ||
216 | int topology_set_cpu_management(int fc) | 221 | int topology_set_cpu_management(int fc) |
217 | { | 222 | { |
218 | int cpu; | 223 | int cpu, rc; |
219 | int rc; | ||
220 | 224 | ||
221 | if (!MACHINE_HAS_TOPOLOGY) | 225 | if (!MACHINE_HAS_TOPOLOGY) |
222 | return -EOPNOTSUPP; | 226 | return -EOPNOTSUPP; |
@@ -227,7 +231,7 @@ int topology_set_cpu_management(int fc) | |||
227 | if (rc) | 231 | if (rc) |
228 | return -EBUSY; | 232 | return -EBUSY; |
229 | for_each_possible_cpu(cpu) | 233 | for_each_possible_cpu(cpu) |
230 | smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN; | 234 | cpu_set_polarization(cpu, POLARIZATION_UNKNOWN); |
231 | return rc; | 235 | return rc; |
232 | } | 236 | } |
233 | 237 | ||
@@ -239,29 +243,25 @@ static void update_cpu_core_map(void) | |||
239 | spin_lock_irqsave(&topology_lock, flags); | 243 | spin_lock_irqsave(&topology_lock, flags); |
240 | for_each_possible_cpu(cpu) { | 244 | for_each_possible_cpu(cpu) { |
241 | cpu_core_map[cpu] = cpu_group_map(&core_info, cpu); | 245 | cpu_core_map[cpu] = cpu_group_map(&core_info, cpu); |
242 | #ifdef CONFIG_SCHED_BOOK | ||
243 | cpu_book_map[cpu] = cpu_group_map(&book_info, cpu); | 246 | cpu_book_map[cpu] = cpu_group_map(&book_info, cpu); |
244 | #endif | ||
245 | } | 247 | } |
246 | spin_unlock_irqrestore(&topology_lock, flags); | 248 | spin_unlock_irqrestore(&topology_lock, flags); |
247 | } | 249 | } |
248 | 250 | ||
249 | void store_topology(struct sysinfo_15_1_x *info) | 251 | void store_topology(struct sysinfo_15_1_x *info) |
250 | { | 252 | { |
251 | #ifdef CONFIG_SCHED_BOOK | ||
252 | int rc; | 253 | int rc; |
253 | 254 | ||
254 | rc = stsi(info, 15, 1, 3); | 255 | rc = stsi(info, 15, 1, 3); |
255 | if (rc != -ENOSYS) | 256 | if (rc != -ENOSYS) |
256 | return; | 257 | return; |
257 | #endif | ||
258 | stsi(info, 15, 1, 2); | 258 | stsi(info, 15, 1, 2); |
259 | } | 259 | } |
260 | 260 | ||
261 | int arch_update_cpu_topology(void) | 261 | int arch_update_cpu_topology(void) |
262 | { | 262 | { |
263 | struct sysinfo_15_1_x *info = tl_info; | 263 | struct sysinfo_15_1_x *info = tl_info; |
264 | struct sys_device *sysdev; | 264 | struct device *dev; |
265 | int cpu; | 265 | int cpu; |
266 | 266 | ||
267 | if (!MACHINE_HAS_TOPOLOGY) { | 267 | if (!MACHINE_HAS_TOPOLOGY) { |
@@ -273,8 +273,8 @@ int arch_update_cpu_topology(void) | |||
273 | tl_to_cores(info); | 273 | tl_to_cores(info); |
274 | update_cpu_core_map(); | 274 | update_cpu_core_map(); |
275 | for_each_online_cpu(cpu) { | 275 | for_each_online_cpu(cpu) { |
276 | sysdev = get_cpu_sysdev(cpu); | 276 | dev = get_cpu_device(cpu); |
277 | kobject_uevent(&sysdev->kobj, KOBJ_CHANGE); | 277 | kobject_uevent(&dev->kobj, KOBJ_CHANGE); |
278 | } | 278 | } |
279 | return 1; | 279 | return 1; |
280 | } | 280 | } |
@@ -296,12 +296,30 @@ static void topology_timer_fn(unsigned long ignored) | |||
296 | set_topology_timer(); | 296 | set_topology_timer(); |
297 | } | 297 | } |
298 | 298 | ||
299 | static struct timer_list topology_timer = | ||
300 | TIMER_DEFERRED_INITIALIZER(topology_timer_fn, 0, 0); | ||
301 | |||
302 | static atomic_t topology_poll = ATOMIC_INIT(0); | ||
303 | |||
299 | static void set_topology_timer(void) | 304 | static void set_topology_timer(void) |
300 | { | 305 | { |
301 | topology_timer.function = topology_timer_fn; | 306 | if (atomic_add_unless(&topology_poll, -1, 0)) |
302 | topology_timer.data = 0; | 307 | mod_timer(&topology_timer, jiffies + HZ / 10); |
303 | topology_timer.expires = jiffies + 60 * HZ; | 308 | else |
304 | add_timer(&topology_timer); | 309 | mod_timer(&topology_timer, jiffies + HZ * 60); |
310 | } | ||
311 | |||
312 | void topology_expect_change(void) | ||
313 | { | ||
314 | if (!MACHINE_HAS_TOPOLOGY) | ||
315 | return; | ||
316 | /* This is racy, but it doesn't matter since it is just a heuristic. | ||
317 | * Worst case is that we poll in a higher frequency for a bit longer. | ||
318 | */ | ||
319 | if (atomic_read(&topology_poll) > 60) | ||
320 | return; | ||
321 | atomic_add(60, &topology_poll); | ||
322 | set_topology_timer(); | ||
305 | } | 323 | } |
306 | 324 | ||
307 | static int __init early_parse_topology(char *p) | 325 | static int __init early_parse_topology(char *p) |
@@ -313,23 +331,6 @@ static int __init early_parse_topology(char *p) | |||
313 | } | 331 | } |
314 | early_param("topology", early_parse_topology); | 332 | early_param("topology", early_parse_topology); |
315 | 333 | ||
316 | static int __init init_topology_update(void) | ||
317 | { | ||
318 | int rc; | ||
319 | |||
320 | rc = 0; | ||
321 | if (!MACHINE_HAS_TOPOLOGY) { | ||
322 | topology_update_polarization_simple(); | ||
323 | goto out; | ||
324 | } | ||
325 | init_timer_deferrable(&topology_timer); | ||
326 | set_topology_timer(); | ||
327 | out: | ||
328 | update_cpu_core_map(); | ||
329 | return rc; | ||
330 | } | ||
331 | __initcall(init_topology_update); | ||
332 | |||
333 | static void __init alloc_masks(struct sysinfo_15_1_x *info, | 334 | static void __init alloc_masks(struct sysinfo_15_1_x *info, |
334 | struct mask_info *mask, int offset) | 335 | struct mask_info *mask, int offset) |
335 | { | 336 | { |
@@ -357,10 +358,108 @@ void __init s390_init_cpu_topology(void) | |||
357 | store_topology(info); | 358 | store_topology(info); |
358 | pr_info("The CPU configuration topology of the machine is:"); | 359 | pr_info("The CPU configuration topology of the machine is:"); |
359 | for (i = 0; i < TOPOLOGY_NR_MAG; i++) | 360 | for (i = 0; i < TOPOLOGY_NR_MAG; i++) |
360 | printk(" %d", info->mag[i]); | 361 | printk(KERN_CONT " %d", info->mag[i]); |
361 | printk(" / %d\n", info->mnest); | 362 | printk(KERN_CONT " / %d\n", info->mnest); |
362 | alloc_masks(info, &core_info, 1); | 363 | alloc_masks(info, &core_info, 1); |
363 | #ifdef CONFIG_SCHED_BOOK | ||
364 | alloc_masks(info, &book_info, 2); | 364 | alloc_masks(info, &book_info, 2); |
365 | #endif | ||
366 | } | 365 | } |
366 | |||
367 | static int cpu_management; | ||
368 | |||
369 | static ssize_t dispatching_show(struct device *dev, | ||
370 | struct device_attribute *attr, | ||
371 | char *buf) | ||
372 | { | ||
373 | ssize_t count; | ||
374 | |||
375 | mutex_lock(&smp_cpu_state_mutex); | ||
376 | count = sprintf(buf, "%d\n", cpu_management); | ||
377 | mutex_unlock(&smp_cpu_state_mutex); | ||
378 | return count; | ||
379 | } | ||
380 | |||
381 | static ssize_t dispatching_store(struct device *dev, | ||
382 | struct device_attribute *attr, | ||
383 | const char *buf, | ||
384 | size_t count) | ||
385 | { | ||
386 | int val, rc; | ||
387 | char delim; | ||
388 | |||
389 | if (sscanf(buf, "%d %c", &val, &delim) != 1) | ||
390 | return -EINVAL; | ||
391 | if (val != 0 && val != 1) | ||
392 | return -EINVAL; | ||
393 | rc = 0; | ||
394 | get_online_cpus(); | ||
395 | mutex_lock(&smp_cpu_state_mutex); | ||
396 | if (cpu_management == val) | ||
397 | goto out; | ||
398 | rc = topology_set_cpu_management(val); | ||
399 | if (rc) | ||
400 | goto out; | ||
401 | cpu_management = val; | ||
402 | topology_expect_change(); | ||
403 | out: | ||
404 | mutex_unlock(&smp_cpu_state_mutex); | ||
405 | put_online_cpus(); | ||
406 | return rc ? rc : count; | ||
407 | } | ||
408 | static DEVICE_ATTR(dispatching, 0644, dispatching_show, | ||
409 | dispatching_store); | ||
410 | |||
411 | static ssize_t cpu_polarization_show(struct device *dev, | ||
412 | struct device_attribute *attr, char *buf) | ||
413 | { | ||
414 | int cpu = dev->id; | ||
415 | ssize_t count; | ||
416 | |||
417 | mutex_lock(&smp_cpu_state_mutex); | ||
418 | switch (cpu_read_polarization(cpu)) { | ||
419 | case POLARIZATION_HRZ: | ||
420 | count = sprintf(buf, "horizontal\n"); | ||
421 | break; | ||
422 | case POLARIZATION_VL: | ||
423 | count = sprintf(buf, "vertical:low\n"); | ||
424 | break; | ||
425 | case POLARIZATION_VM: | ||
426 | count = sprintf(buf, "vertical:medium\n"); | ||
427 | break; | ||
428 | case POLARIZATION_VH: | ||
429 | count = sprintf(buf, "vertical:high\n"); | ||
430 | break; | ||
431 | default: | ||
432 | count = sprintf(buf, "unknown\n"); | ||
433 | break; | ||
434 | } | ||
435 | mutex_unlock(&smp_cpu_state_mutex); | ||
436 | return count; | ||
437 | } | ||
438 | static DEVICE_ATTR(polarization, 0444, cpu_polarization_show, NULL); | ||
439 | |||
440 | static struct attribute *topology_cpu_attrs[] = { | ||
441 | &dev_attr_polarization.attr, | ||
442 | NULL, | ||
443 | }; | ||
444 | |||
445 | static struct attribute_group topology_cpu_attr_group = { | ||
446 | .attrs = topology_cpu_attrs, | ||
447 | }; | ||
448 | |||
449 | int topology_cpu_init(struct cpu *cpu) | ||
450 | { | ||
451 | return sysfs_create_group(&cpu->dev.kobj, &topology_cpu_attr_group); | ||
452 | } | ||
453 | |||
454 | static int __init topology_init(void) | ||
455 | { | ||
456 | if (!MACHINE_HAS_TOPOLOGY) { | ||
457 | topology_update_polarization_simple(); | ||
458 | goto out; | ||
459 | } | ||
460 | set_topology_timer(); | ||
461 | out: | ||
462 | update_cpu_core_map(); | ||
463 | return device_create_file(cpu_subsys.dev_root, &dev_attr_dispatching); | ||
464 | } | ||
465 | device_initcall(topology_init); | ||