aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.osdl.org>2006-12-08 14:21:55 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-08 14:21:55 -0500
commit88032b322a38b37335c8cb2e3473a45c81d280eb (patch)
treecd722ab15b18a10f6b1aa50656b8da713ee8b799
parent63f3861d2fbf8ccbad1386ac9ac8b822c036ea00 (diff)
parent028d9b3cc62cb9dd31f1b5929edb3c23612cfccc (diff)
Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6
* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6: [S390] Poison init section before freeing it. [S390] Use add_active_range() and free_area_init_nodes(). [S390] Virtual memmap for s390. [S390] Update documentation for dynamic subchannel mapping. [S390] Use dev->groups for adding/removing the subchannel attribute group. [S390] Support for disconnected devices reappearing on another subchannel. [S390] subchannel lock conversion. [S390] Some preparations for the dynamic subchannel mapping patch. [S390] runtime switch for qdio performance statistics [S390] New DASD feature for ERP related logging [S390] add reset call handler to the ap bus. [S390] more workqueue fixes. [S390] workqueue fixes. [S390] uaccess_pt: add missing down_read() and convert to is_init().
-rw-r--r--Documentation/s390/driver-model.txt7
-rw-r--r--arch/s390/Kconfig14
-rw-r--r--arch/s390/defconfig1
-rw-r--r--arch/s390/kernel/setup.c55
-rw-r--r--arch/s390/lib/uaccess_pt.c5
-rw-r--r--arch/s390/mm/Makefile2
-rw-r--r--arch/s390/mm/extmem.c106
-rw-r--r--arch/s390/mm/init.c184
-rw-r--r--arch/s390/mm/vmem.c381
-rw-r--r--drivers/s390/block/dasd.c8
-rw-r--r--drivers/s390/block/dasd_3990_erp.c23
-rw-r--r--drivers/s390/block/dasd_devmap.c49
-rw-r--r--drivers/s390/block/dasd_int.h4
-rw-r--r--drivers/s390/char/ctrlchar.c9
-rw-r--r--drivers/s390/char/tape.h3
-rw-r--r--drivers/s390/char/tape_34xx.c23
-rw-r--r--drivers/s390/char/tape_3590.c7
-rw-r--r--drivers/s390/char/tape_block.c14
-rw-r--r--drivers/s390/char/tape_core.c14
-rw-r--r--drivers/s390/cio/chsc.c28
-rw-r--r--drivers/s390/cio/cio.c62
-rw-r--r--drivers/s390/cio/cio.h6
-rw-r--r--drivers/s390/cio/css.c69
-rw-r--r--drivers/s390/cio/css.h9
-rw-r--r--drivers/s390/cio/device.c456
-rw-r--r--drivers/s390/cio/device.h6
-rw-r--r--drivers/s390/cio/device_fsm.c58
-rw-r--r--drivers/s390/cio/device_ops.c28
-rw-r--r--drivers/s390/cio/qdio.c234
-rw-r--r--drivers/s390/cio/qdio.h28
-rw-r--r--drivers/s390/crypto/ap_bus.c17
-rw-r--r--include/asm-s390/dasd.h2
-rw-r--r--include/asm-s390/page.h22
-rw-r--r--include/asm-s390/pgalloc.h3
-rw-r--r--include/asm-s390/pgtable.h16
35 files changed, 1298 insertions, 655 deletions
diff --git a/Documentation/s390/driver-model.txt b/Documentation/s390/driver-model.txt
index 77bf450ec39b..e938c442277d 100644
--- a/Documentation/s390/driver-model.txt
+++ b/Documentation/s390/driver-model.txt
@@ -18,11 +18,18 @@ devices/
18 - 0.0.0002/ 18 - 0.0.0002/
19 - 0.1.0000/0.1.1234/ 19 - 0.1.0000/0.1.1234/
20 ... 20 ...
21 - defunct/
21 22
22In this example, device 0815 is accessed via subchannel 0 in subchannel set 0, 23In this example, device 0815 is accessed via subchannel 0 in subchannel set 0,
23device 4711 via subchannel 1 in subchannel set 0, and subchannel 2 is a non-I/O 24device 4711 via subchannel 1 in subchannel set 0, and subchannel 2 is a non-I/O
24subchannel. Device 1234 is accessed via subchannel 0 in subchannel set 1. 25subchannel. Device 1234 is accessed via subchannel 0 in subchannel set 1.
25 26
27The subchannel named 'defunct' does not represent any real subchannel on the
28system; it is a pseudo subchannel where disconnnected ccw devices are moved to
29if they are displaced by another ccw device becoming operational on their
30former subchannel. The ccw devices will be moved again to a proper subchannel
31if they become operational again on that subchannel.
32
26You should address a ccw device via its bus id (e.g. 0.0.4711); the device can 33You should address a ccw device via its bus id (e.g. 0.0.4711); the device can
27be found under bus/ccw/devices/. 34be found under bus/ccw/devices/.
28 35
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 45e47bfb68a9..ff690564edbd 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -241,8 +241,14 @@ config WARN_STACK_SIZE
241 This allows you to specify the maximum frame size a function may 241 This allows you to specify the maximum frame size a function may
242 have without the compiler complaining about it. 242 have without the compiler complaining about it.
243 243
244config ARCH_POPULATES_NODE_MAP
245 def_bool y
246
244source "mm/Kconfig" 247source "mm/Kconfig"
245 248
249config HOLES_IN_ZONE
250 def_bool y
251
246comment "I/O subsystem configuration" 252comment "I/O subsystem configuration"
247 253
248config MACHCHK_WARNING 254config MACHCHK_WARNING
@@ -266,14 +272,6 @@ config QDIO
266 272
267 If unsure, say Y. 273 If unsure, say Y.
268 274
269config QDIO_PERF_STATS
270 bool "Performance statistics in /proc"
271 depends on QDIO
272 help
273 Say Y here to get performance statistics in /proc/qdio_perf
274
275 If unsure, say N.
276
277config QDIO_DEBUG 275config QDIO_DEBUG
278 bool "Extended debugging information" 276 bool "Extended debugging information"
279 depends on QDIO 277 depends on QDIO
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 7cd51e73e274..a6ec919ba83f 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -134,7 +134,6 @@ CONFIG_RESOURCES_64BIT=y
134# 134#
135CONFIG_MACHCHK_WARNING=y 135CONFIG_MACHCHK_WARNING=y
136CONFIG_QDIO=y 136CONFIG_QDIO=y
137# CONFIG_QDIO_PERF_STATS is not set
138# CONFIG_QDIO_DEBUG is not set 137# CONFIG_QDIO_DEBUG is not set
139 138
140# 139#
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index b928fecdc743..49ef206ec880 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -64,9 +64,8 @@ unsigned int console_devno = -1;
64unsigned int console_irq = -1; 64unsigned int console_irq = -1;
65unsigned long machine_flags = 0; 65unsigned long machine_flags = 0;
66 66
67struct mem_chunk memory_chunk[MEMORY_CHUNKS]; 67struct mem_chunk __initdata memory_chunk[MEMORY_CHUNKS];
68volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ 68volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
69unsigned long __initdata zholes_size[MAX_NR_ZONES];
70static unsigned long __initdata memory_end; 69static unsigned long __initdata memory_end;
71 70
72/* 71/*
@@ -354,21 +353,6 @@ void machine_power_off(void)
354 */ 353 */
355void (*pm_power_off)(void) = machine_power_off; 354void (*pm_power_off)(void) = machine_power_off;
356 355
357static void __init
358add_memory_hole(unsigned long start, unsigned long end)
359{
360 unsigned long dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
361
362 if (end <= dma_pfn)
363 zholes_size[ZONE_DMA] += end - start + 1;
364 else if (start > dma_pfn)
365 zholes_size[ZONE_NORMAL] += end - start + 1;
366 else {
367 zholes_size[ZONE_DMA] += dma_pfn - start + 1;
368 zholes_size[ZONE_NORMAL] += end - dma_pfn;
369 }
370}
371
372static int __init early_parse_mem(char *p) 356static int __init early_parse_mem(char *p)
373{ 357{
374 memory_end = memparse(p, &p); 358 memory_end = memparse(p, &p);
@@ -521,7 +505,6 @@ setup_memory(void)
521{ 505{
522 unsigned long bootmap_size; 506 unsigned long bootmap_size;
523 unsigned long start_pfn, end_pfn, init_pfn; 507 unsigned long start_pfn, end_pfn, init_pfn;
524 unsigned long last_rw_end;
525 int i; 508 int i;
526 509
527 /* 510 /*
@@ -577,39 +560,27 @@ setup_memory(void)
577 /* 560 /*
578 * Register RAM areas with the bootmem allocator. 561 * Register RAM areas with the bootmem allocator.
579 */ 562 */
580 last_rw_end = start_pfn;
581 563
582 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { 564 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
583 unsigned long start_chunk, end_chunk; 565 unsigned long start_chunk, end_chunk, pfn;
584 566
585 if (memory_chunk[i].type != CHUNK_READ_WRITE) 567 if (memory_chunk[i].type != CHUNK_READ_WRITE)
586 continue; 568 continue;
587 start_chunk = (memory_chunk[i].addr + PAGE_SIZE - 1); 569 start_chunk = PFN_DOWN(memory_chunk[i].addr);
588 start_chunk >>= PAGE_SHIFT; 570 end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size) - 1;
589 end_chunk = (memory_chunk[i].addr + memory_chunk[i].size); 571 end_chunk = min(end_chunk, end_pfn);
590 end_chunk >>= PAGE_SHIFT; 572 if (start_chunk >= end_chunk)
591 if (start_chunk < start_pfn) 573 continue;
592 start_chunk = start_pfn; 574 add_active_range(0, start_chunk, end_chunk);
593 if (end_chunk > end_pfn) 575 pfn = max(start_chunk, start_pfn);
594 end_chunk = end_pfn; 576 for (; pfn <= end_chunk; pfn++)
595 if (start_chunk < end_chunk) { 577 page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY);
596 /* Initialize storage key for RAM pages */
597 for (init_pfn = start_chunk ; init_pfn < end_chunk;
598 init_pfn++)
599 page_set_storage_key(init_pfn << PAGE_SHIFT,
600 PAGE_DEFAULT_KEY);
601 free_bootmem(start_chunk << PAGE_SHIFT,
602 (end_chunk - start_chunk) << PAGE_SHIFT);
603 if (last_rw_end < start_chunk)
604 add_memory_hole(last_rw_end, start_chunk - 1);
605 last_rw_end = end_chunk;
606 }
607 } 578 }
608 579
609 psw_set_key(PAGE_DEFAULT_KEY); 580 psw_set_key(PAGE_DEFAULT_KEY);
610 581
611 if (last_rw_end < end_pfn - 1) 582 free_bootmem_with_active_regions(0, max_pfn);
612 add_memory_hole(last_rw_end, end_pfn - 1); 583 reserve_bootmem(0, PFN_PHYS(start_pfn));
613 584
614 /* 585 /*
615 * Reserve the bootmem bitmap itself as well. We do this in two 586 * Reserve the bootmem bitmap itself as well. We do this in two
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 8741bdc09299..633249c3ba91 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -8,8 +8,8 @@
8 */ 8 */
9 9
10#include <linux/errno.h> 10#include <linux/errno.h>
11#include <asm/uaccess.h>
12#include <linux/mm.h> 11#include <linux/mm.h>
12#include <asm/uaccess.h>
13#include <asm/futex.h> 13#include <asm/futex.h>
14 14
15static inline int __handle_fault(struct mm_struct *mm, unsigned long address, 15static inline int __handle_fault(struct mm_struct *mm, unsigned long address,
@@ -60,8 +60,9 @@ out:
60 60
61out_of_memory: 61out_of_memory:
62 up_read(&mm->mmap_sem); 62 up_read(&mm->mmap_sem);
63 if (current->pid == 1) { 63 if (is_init(current)) {
64 yield(); 64 yield();
65 down_read(&mm->mmap_sem);
65 goto survive; 66 goto survive;
66 } 67 }
67 printk("VM: killing process %s\n", current->comm); 68 printk("VM: killing process %s\n", current->comm);
diff --git a/arch/s390/mm/Makefile b/arch/s390/mm/Makefile
index aa9a42b6e62d..8e09db1edbb9 100644
--- a/arch/s390/mm/Makefile
+++ b/arch/s390/mm/Makefile
@@ -2,6 +2,6 @@
2# Makefile for the linux s390-specific parts of the memory manager. 2# Makefile for the linux s390-specific parts of the memory manager.
3# 3#
4 4
5obj-y := init.o fault.o ioremap.o extmem.o mmap.o 5obj-y := init.o fault.o ioremap.o extmem.o mmap.o vmem.o
6obj-$(CONFIG_CMM) += cmm.o 6obj-$(CONFIG_CMM) += cmm.o
7 7
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 9e9bc48463a5..775bf19e742b 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -16,6 +16,7 @@
16#include <linux/bootmem.h> 16#include <linux/bootmem.h>
17#include <linux/ctype.h> 17#include <linux/ctype.h>
18#include <asm/page.h> 18#include <asm/page.h>
19#include <asm/pgtable.h>
19#include <asm/ebcdic.h> 20#include <asm/ebcdic.h>
20#include <asm/errno.h> 21#include <asm/errno.h>
21#include <asm/extmem.h> 22#include <asm/extmem.h>
@@ -238,65 +239,6 @@ query_segment_type (struct dcss_segment *seg)
238} 239}
239 240
240/* 241/*
241 * check if the given segment collides with guest storage.
242 * returns 1 if this is the case, 0 if no collision was found
243 */
244static int
245segment_overlaps_storage(struct dcss_segment *seg)
246{
247 int i;
248
249 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
250 if (memory_chunk[i].type != CHUNK_READ_WRITE)
251 continue;
252 if ((memory_chunk[i].addr >> 20) > (seg->end >> 20))
253 continue;
254 if (((memory_chunk[i].addr + memory_chunk[i].size - 1) >> 20)
255 < (seg->start_addr >> 20))
256 continue;
257 return 1;
258 }
259 return 0;
260}
261
262/*
263 * check if segment collides with other segments that are currently loaded
264 * returns 1 if this is the case, 0 if no collision was found
265 */
266static int
267segment_overlaps_others (struct dcss_segment *seg)
268{
269 struct list_head *l;
270 struct dcss_segment *tmp;
271
272 BUG_ON(!mutex_is_locked(&dcss_lock));
273 list_for_each(l, &dcss_list) {
274 tmp = list_entry(l, struct dcss_segment, list);
275 if ((tmp->start_addr >> 20) > (seg->end >> 20))
276 continue;
277 if ((tmp->end >> 20) < (seg->start_addr >> 20))
278 continue;
279 if (seg == tmp)
280 continue;
281 return 1;
282 }
283 return 0;
284}
285
286/*
287 * check if segment exceeds the kernel mapping range (detected or set via mem=)
288 * returns 1 if this is the case, 0 if segment fits into the range
289 */
290static inline int
291segment_exceeds_range (struct dcss_segment *seg)
292{
293 int seg_last_pfn = (seg->end) >> PAGE_SHIFT;
294 if (seg_last_pfn > max_pfn)
295 return 1;
296 return 0;
297}
298
299/*
300 * get info about a segment 242 * get info about a segment
301 * possible return values: 243 * possible return values:
302 * -ENOSYS : we are not running on VM 244 * -ENOSYS : we are not running on VM
@@ -341,24 +283,26 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
341 rc = query_segment_type (seg); 283 rc = query_segment_type (seg);
342 if (rc < 0) 284 if (rc < 0)
343 goto out_free; 285 goto out_free;
344 if (segment_exceeds_range(seg)) { 286
345 PRINT_WARN ("segment_load: not loading segment %s - exceeds" 287 rc = add_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
346 " kernel mapping range\n",name); 288
347 rc = -ERANGE; 289 switch (rc) {
290 case 0:
291 break;
292 case -ENOSPC:
293 PRINT_WARN("segment_load: not loading segment %s - overlaps "
294 "storage/segment\n", name);
348 goto out_free; 295 goto out_free;
349 } 296 case -ERANGE:
350 if (segment_overlaps_storage(seg)) { 297 PRINT_WARN("segment_load: not loading segment %s - exceeds "
351 PRINT_WARN ("segment_load: not loading segment %s - overlaps" 298 "kernel mapping range\n", name);
352 " storage\n",name);
353 rc = -ENOSPC;
354 goto out_free; 299 goto out_free;
355 } 300 default:
356 if (segment_overlaps_others(seg)) { 301 PRINT_WARN("segment_load: not loading segment %s (rc: %d)\n",
357 PRINT_WARN ("segment_load: not loading segment %s - overlaps" 302 name, rc);
358 " other segments\n",name);
359 rc = -EBUSY;
360 goto out_free; 303 goto out_free;
361 } 304 }
305
362 if (do_nonshared) 306 if (do_nonshared)
363 dcss_command = DCSS_LOADNSR; 307 dcss_command = DCSS_LOADNSR;
364 else 308 else
@@ -372,7 +316,7 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
372 rc = dcss_diag_translate_rc (seg->end); 316 rc = dcss_diag_translate_rc (seg->end);
373 dcss_diag(DCSS_PURGESEG, seg->dcss_name, 317 dcss_diag(DCSS_PURGESEG, seg->dcss_name,
374 &seg->start_addr, &seg->end); 318 &seg->start_addr, &seg->end);
375 goto out_free; 319 goto out_shared;
376 } 320 }
377 seg->do_nonshared = do_nonshared; 321 seg->do_nonshared = do_nonshared;
378 atomic_set(&seg->ref_count, 1); 322 atomic_set(&seg->ref_count, 1);
@@ -391,6 +335,8 @@ __segment_load (char *name, int do_nonshared, unsigned long *addr, unsigned long
391 (void*)seg->start_addr, (void*)seg->end, 335 (void*)seg->start_addr, (void*)seg->end,
392 segtype_string[seg->vm_segtype]); 336 segtype_string[seg->vm_segtype]);
393 goto out; 337 goto out;
338 out_shared:
339 remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
394 out_free: 340 out_free:
395 kfree(seg); 341 kfree(seg);
396 out: 342 out:
@@ -530,12 +476,12 @@ segment_unload(char *name)
530 "please report to linux390@de.ibm.com\n",name); 476 "please report to linux390@de.ibm.com\n",name);
531 goto out_unlock; 477 goto out_unlock;
532 } 478 }
533 if (atomic_dec_return(&seg->ref_count) == 0) { 479 if (atomic_dec_return(&seg->ref_count) != 0)
534 list_del(&seg->list); 480 goto out_unlock;
535 dcss_diag(DCSS_PURGESEG, seg->dcss_name, 481 remove_shared_memory(seg->start_addr, seg->end - seg->start_addr + 1);
536 &dummy, &dummy); 482 list_del(&seg->list);
537 kfree(seg); 483 dcss_diag(DCSS_PURGESEG, seg->dcss_name, &dummy, &dummy);
538 } 484 kfree(seg);
539out_unlock: 485out_unlock:
540 mutex_unlock(&dcss_lock); 486 mutex_unlock(&dcss_lock);
541} 487}
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index e1881c31b1cb..4bb21be3b007 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -24,6 +24,7 @@
24#include <linux/pagemap.h> 24#include <linux/pagemap.h>
25#include <linux/bootmem.h> 25#include <linux/bootmem.h>
26#include <linux/pfn.h> 26#include <linux/pfn.h>
27#include <linux/poison.h>
27 28
28#include <asm/processor.h> 29#include <asm/processor.h>
29#include <asm/system.h> 30#include <asm/system.h>
@@ -69,6 +70,8 @@ void show_mem(void)
69 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); 70 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
70 i = max_mapnr; 71 i = max_mapnr;
71 while (i-- > 0) { 72 while (i-- > 0) {
73 if (!pfn_valid(i))
74 continue;
72 page = pfn_to_page(i); 75 page = pfn_to_page(i);
73 total++; 76 total++;
74 if (PageReserved(page)) 77 if (PageReserved(page))
@@ -84,150 +87,52 @@ void show_mem(void)
84 printk("%d pages swap cached\n",cached); 87 printk("%d pages swap cached\n",cached);
85} 88}
86 89
87extern unsigned long __initdata zholes_size[]; 90static void __init setup_ro_region(void)
88/*
89 * paging_init() sets up the page tables
90 */
91
92#ifndef CONFIG_64BIT
93void __init paging_init(void)
94{ 91{
95 pgd_t * pg_dir; 92 pgd_t *pgd;
96 pte_t * pg_table; 93 pmd_t *pmd;
97 pte_t pte; 94 pte_t *pte;
98 int i; 95 pte_t new_pte;
99 unsigned long tmp; 96 unsigned long address, end;
100 unsigned long pfn = 0; 97
101 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; 98 address = ((unsigned long)&__start_rodata) & PAGE_MASK;
102 static const int ssm_mask = 0x04000000L; 99 end = PFN_ALIGN((unsigned long)&__end_rodata);
103 unsigned long ro_start_pfn, ro_end_pfn; 100
104 unsigned long zones_size[MAX_NR_ZONES]; 101 for (; address < end; address += PAGE_SIZE) {
105 102 pgd = pgd_offset_k(address);
106 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata); 103 pmd = pmd_offset(pgd, address);
107 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata); 104 pte = pte_offset_kernel(pmd, address);
108 105 new_pte = mk_pte_phys(address, __pgprot(_PAGE_RO));
109 memset(zones_size, 0, sizeof(zones_size)); 106 set_pte(pte, new_pte);
110 zones_size[ZONE_DMA] = max_low_pfn; 107 }
111 free_area_init_node(0, &contig_page_data, zones_size,
112 __pa(PAGE_OFFSET) >> PAGE_SHIFT,
113 zholes_size);
114
115 /* unmap whole virtual address space */
116
117 pg_dir = swapper_pg_dir;
118
119 for (i = 0; i < PTRS_PER_PGD; i++)
120 pmd_clear((pmd_t *) pg_dir++);
121
122 /*
123 * map whole physical memory to virtual memory (identity mapping)
124 */
125
126 pg_dir = swapper_pg_dir;
127
128 while (pfn < max_low_pfn) {
129 /*
130 * pg_table is physical at this point
131 */
132 pg_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
133
134 pmd_populate_kernel(&init_mm, (pmd_t *) pg_dir, pg_table);
135 pg_dir++;
136
137 for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) {
138 if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
139 pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
140 else
141 pte = pfn_pte(pfn, PAGE_KERNEL);
142 if (pfn >= max_low_pfn)
143 pte_val(pte) = _PAGE_TYPE_EMPTY;
144 set_pte(pg_table, pte);
145 pfn++;
146 }
147 }
148
149 S390_lowcore.kernel_asce = pgdir_k;
150
151 /* enable virtual mapping in kernel mode */
152 __ctl_load(pgdir_k, 1, 1);
153 __ctl_load(pgdir_k, 7, 7);
154 __ctl_load(pgdir_k, 13, 13);
155 __raw_local_irq_ssm(ssm_mask);
156
157 local_flush_tlb();
158} 108}
159 109
160#else /* CONFIG_64BIT */ 110extern void vmem_map_init(void);
161 111
112/*
113 * paging_init() sets up the page tables
114 */
162void __init paging_init(void) 115void __init paging_init(void)
163{ 116{
164 pgd_t * pg_dir; 117 pgd_t *pg_dir;
165 pmd_t * pm_dir; 118 int i;
166 pte_t * pt_dir; 119 unsigned long pgdir_k;
167 pte_t pte;
168 int i,j,k;
169 unsigned long pfn = 0;
170 unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) |
171 _KERN_REGION_TABLE;
172 static const int ssm_mask = 0x04000000L; 120 static const int ssm_mask = 0x04000000L;
173 unsigned long zones_size[MAX_NR_ZONES]; 121 unsigned long max_zone_pfns[MAX_NR_ZONES];
174 unsigned long dma_pfn, high_pfn;
175 unsigned long ro_start_pfn, ro_end_pfn;
176
177 memset(zones_size, 0, sizeof(zones_size));
178 dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
179 high_pfn = max_low_pfn;
180 ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
181 ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
182
183 if (dma_pfn > high_pfn)
184 zones_size[ZONE_DMA] = high_pfn;
185 else {
186 zones_size[ZONE_DMA] = dma_pfn;
187 zones_size[ZONE_NORMAL] = high_pfn - dma_pfn;
188 }
189
190 /* Initialize mem_map[]. */
191 free_area_init_node(0, &contig_page_data, zones_size,
192 __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size);
193 122
194 /* 123 pg_dir = swapper_pg_dir;
195 * map whole physical memory to virtual memory (identity mapping)
196 */
197
198 pg_dir = swapper_pg_dir;
199
200 for (i = 0 ; i < PTRS_PER_PGD ; i++,pg_dir++) {
201 124
202 if (pfn >= max_low_pfn) { 125#ifdef CONFIG_64BIT
203 pgd_clear(pg_dir); 126 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERN_REGION_TABLE;
204 continue; 127 for (i = 0; i < PTRS_PER_PGD; i++)
205 } 128 pgd_clear(pg_dir + i);
206 129#else
207 pm_dir = (pmd_t *) alloc_bootmem_pages(PAGE_SIZE * 4); 130 pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
208 pgd_populate(&init_mm, pg_dir, pm_dir); 131 for (i = 0; i < PTRS_PER_PGD; i++)
209 132 pmd_clear((pmd_t *)(pg_dir + i));
210 for (j = 0 ; j < PTRS_PER_PMD ; j++,pm_dir++) { 133#endif
211 if (pfn >= max_low_pfn) { 134 vmem_map_init();
212 pmd_clear(pm_dir); 135 setup_ro_region();
213 continue;
214 }
215
216 pt_dir = (pte_t *) alloc_bootmem_pages(PAGE_SIZE);
217 pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
218
219 for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) {
220 if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
221 pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
222 else
223 pte = pfn_pte(pfn, PAGE_KERNEL);
224 if (pfn >= max_low_pfn)
225 pte_val(pte) = _PAGE_TYPE_EMPTY;
226 set_pte(pt_dir, pte);
227 pfn++;
228 }
229 }
230 }
231 136
232 S390_lowcore.kernel_asce = pgdir_k; 137 S390_lowcore.kernel_asce = pgdir_k;
233 138
@@ -237,9 +142,11 @@ void __init paging_init(void)
237 __ctl_load(pgdir_k, 13, 13); 142 __ctl_load(pgdir_k, 13, 13);
238 __raw_local_irq_ssm(ssm_mask); 143 __raw_local_irq_ssm(ssm_mask);
239 144
240 local_flush_tlb(); 145 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
146 max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
147 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
148 free_area_init_nodes(max_zone_pfns);
241} 149}
242#endif /* CONFIG_64BIT */
243 150
244void __init mem_init(void) 151void __init mem_init(void)
245{ 152{
@@ -269,6 +176,8 @@ void __init mem_init(void)
269 printk("Write protected kernel read-only data: %#lx - %#lx\n", 176 printk("Write protected kernel read-only data: %#lx - %#lx\n",
270 (unsigned long)&__start_rodata, 177 (unsigned long)&__start_rodata,
271 PFN_ALIGN((unsigned long)&__end_rodata) - 1); 178 PFN_ALIGN((unsigned long)&__end_rodata) - 1);
179 printk("Virtual memmap size: %ldk\n",
180 (max_pfn * sizeof(struct page)) >> 10);
272} 181}
273 182
274void free_initmem(void) 183void free_initmem(void)
@@ -279,6 +188,7 @@ void free_initmem(void)
279 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) { 188 for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
280 ClearPageReserved(virt_to_page(addr)); 189 ClearPageReserved(virt_to_page(addr));
281 init_page_count(virt_to_page(addr)); 190 init_page_count(virt_to_page(addr));
191 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
282 free_page(addr); 192 free_page(addr);
283 totalram_pages++; 193 totalram_pages++;
284 } 194 }
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
new file mode 100644
index 000000000000..7f2944d3ec2a
--- /dev/null
+++ b/arch/s390/mm/vmem.c
@@ -0,0 +1,381 @@
1/*
2 * arch/s390/mm/vmem.c
3 *
4 * Copyright IBM Corp. 2006
5 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>
6 */
7
8#include <linux/bootmem.h>
9#include <linux/pfn.h>
10#include <linux/mm.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <asm/pgalloc.h>
14#include <asm/pgtable.h>
15#include <asm/setup.h>
16#include <asm/tlbflush.h>
17
18unsigned long vmalloc_end;
19EXPORT_SYMBOL(vmalloc_end);
20
21static struct page *vmem_map;
22static DEFINE_MUTEX(vmem_mutex);
23
24struct memory_segment {
25 struct list_head list;
26 unsigned long start;
27 unsigned long size;
28};
29
30static LIST_HEAD(mem_segs);
31
32void memmap_init(unsigned long size, int nid, unsigned long zone,
33 unsigned long start_pfn)
34{
35 struct page *start, *end;
36 struct page *map_start, *map_end;
37 int i;
38
39 start = pfn_to_page(start_pfn);
40 end = start + size;
41
42 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
43 unsigned long cstart, cend;
44
45 cstart = PFN_DOWN(memory_chunk[i].addr);
46 cend = cstart + PFN_DOWN(memory_chunk[i].size);
47
48 map_start = mem_map + cstart;
49 map_end = mem_map + cend;
50
51 if (map_start < start)
52 map_start = start;
53 if (map_end > end)
54 map_end = end;
55
56 map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1))
57 / sizeof(struct page);
58 map_end += ((PFN_ALIGN((unsigned long) map_end)
59 - (unsigned long) map_end)
60 / sizeof(struct page));
61
62 if (map_start < map_end)
63 memmap_init_zone((unsigned long)(map_end - map_start),
64 nid, zone, page_to_pfn(map_start));
65 }
66}
67
68static inline void *vmem_alloc_pages(unsigned int order)
69{
70 if (slab_is_available())
71 return (void *)__get_free_pages(GFP_KERNEL, order);
72 return alloc_bootmem_pages((1 << order) * PAGE_SIZE);
73}
74
75static inline pmd_t *vmem_pmd_alloc(void)
76{
77 pmd_t *pmd;
78 int i;
79
80 pmd = vmem_alloc_pages(PMD_ALLOC_ORDER);
81 if (!pmd)
82 return NULL;
83 for (i = 0; i < PTRS_PER_PMD; i++)
84 pmd_clear(pmd + i);
85 return pmd;
86}
87
88static inline pte_t *vmem_pte_alloc(void)
89{
90 pte_t *pte;
91 pte_t empty_pte;
92 int i;
93
94 pte = vmem_alloc_pages(PTE_ALLOC_ORDER);
95 if (!pte)
96 return NULL;
97 pte_val(empty_pte) = _PAGE_TYPE_EMPTY;
98 for (i = 0; i < PTRS_PER_PTE; i++)
99 set_pte(pte + i, empty_pte);
100 return pte;
101}
102
103/*
104 * Add a physical memory range to the 1:1 mapping.
105 */
106static int vmem_add_range(unsigned long start, unsigned long size)
107{
108 unsigned long address;
109 pgd_t *pg_dir;
110 pmd_t *pm_dir;
111 pte_t *pt_dir;
112 pte_t pte;
113 int ret = -ENOMEM;
114
115 for (address = start; address < start + size; address += PAGE_SIZE) {
116 pg_dir = pgd_offset_k(address);
117 if (pgd_none(*pg_dir)) {
118 pm_dir = vmem_pmd_alloc();
119 if (!pm_dir)
120 goto out;
121 pgd_populate(&init_mm, pg_dir, pm_dir);
122 }
123
124 pm_dir = pmd_offset(pg_dir, address);
125 if (pmd_none(*pm_dir)) {
126 pt_dir = vmem_pte_alloc();
127 if (!pt_dir)
128 goto out;
129 pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
130 }
131
132 pt_dir = pte_offset_kernel(pm_dir, address);
133 pte = pfn_pte(address >> PAGE_SHIFT, PAGE_KERNEL);
134 set_pte(pt_dir, pte);
135 }
136 ret = 0;
137out:
138 flush_tlb_kernel_range(start, start + size);
139 return ret;
140}
141
142/*
143 * Remove a physical memory range from the 1:1 mapping.
144 * Currently only invalidates page table entries.
145 */
146static void vmem_remove_range(unsigned long start, unsigned long size)
147{
148 unsigned long address;
149 pgd_t *pg_dir;
150 pmd_t *pm_dir;
151 pte_t *pt_dir;
152 pte_t pte;
153
154 pte_val(pte) = _PAGE_TYPE_EMPTY;
155 for (address = start; address < start + size; address += PAGE_SIZE) {
156 pg_dir = pgd_offset_k(address);
157 if (pgd_none(*pg_dir))
158 continue;
159 pm_dir = pmd_offset(pg_dir, address);
160 if (pmd_none(*pm_dir))
161 continue;
162 pt_dir = pte_offset_kernel(pm_dir, address);
163 set_pte(pt_dir, pte);
164 }
165 flush_tlb_kernel_range(start, start + size);
166}
167
168/*
169 * Add a backed mem_map array to the virtual mem_map array.
170 */
171static int vmem_add_mem_map(unsigned long start, unsigned long size)
172{
173 unsigned long address, start_addr, end_addr;
174 struct page *map_start, *map_end;
175 pgd_t *pg_dir;
176 pmd_t *pm_dir;
177 pte_t *pt_dir;
178 pte_t pte;
179 int ret = -ENOMEM;
180
181 map_start = vmem_map + PFN_DOWN(start);
182 map_end = vmem_map + PFN_DOWN(start + size);
183
184 start_addr = (unsigned long) map_start & PAGE_MASK;
185 end_addr = PFN_ALIGN((unsigned long) map_end);
186
187 for (address = start_addr; address < end_addr; address += PAGE_SIZE) {
188 pg_dir = pgd_offset_k(address);
189 if (pgd_none(*pg_dir)) {
190 pm_dir = vmem_pmd_alloc();
191 if (!pm_dir)
192 goto out;
193 pgd_populate(&init_mm, pg_dir, pm_dir);
194 }
195
196 pm_dir = pmd_offset(pg_dir, address);
197 if (pmd_none(*pm_dir)) {
198 pt_dir = vmem_pte_alloc();
199 if (!pt_dir)
200 goto out;
201 pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
202 }
203
204 pt_dir = pte_offset_kernel(pm_dir, address);
205 if (pte_none(*pt_dir)) {
206 unsigned long new_page;
207
208 new_page =__pa(vmem_alloc_pages(0));
209 if (!new_page)
210 goto out;
211 pte = pfn_pte(new_page >> PAGE_SHIFT, PAGE_KERNEL);
212 set_pte(pt_dir, pte);
213 }
214 }
215 ret = 0;
216out:
217 flush_tlb_kernel_range(start_addr, end_addr);
218 return ret;
219}
220
221static int vmem_add_mem(unsigned long start, unsigned long size)
222{
223 int ret;
224
225 ret = vmem_add_range(start, size);
226 if (ret)
227 return ret;
228 return vmem_add_mem_map(start, size);
229}
230
231/*
232 * Add memory segment to the segment list if it doesn't overlap with
233 * an already present segment.
234 */
235static int insert_memory_segment(struct memory_segment *seg)
236{
237 struct memory_segment *tmp;
238
239 if (PFN_DOWN(seg->start + seg->size) > max_pfn ||
240 seg->start + seg->size < seg->start)
241 return -ERANGE;
242
243 list_for_each_entry(tmp, &mem_segs, list) {
244 if (seg->start >= tmp->start + tmp->size)
245 continue;
246 if (seg->start + seg->size <= tmp->start)
247 continue;
248 return -ENOSPC;
249 }
250 list_add(&seg->list, &mem_segs);
251 return 0;
252}
253
254/*
255 * Remove memory segment from the segment list.
256 */
257static void remove_memory_segment(struct memory_segment *seg)
258{
259 list_del(&seg->list);
260}
261
262static void __remove_shared_memory(struct memory_segment *seg)
263{
264 remove_memory_segment(seg);
265 vmem_remove_range(seg->start, seg->size);
266}
267
268int remove_shared_memory(unsigned long start, unsigned long size)
269{
270 struct memory_segment *seg;
271 int ret;
272
273 mutex_lock(&vmem_mutex);
274
275 ret = -ENOENT;
276 list_for_each_entry(seg, &mem_segs, list) {
277 if (seg->start == start && seg->size == size)
278 break;
279 }
280
281 if (seg->start != start || seg->size != size)
282 goto out;
283
284 ret = 0;
285 __remove_shared_memory(seg);
286 kfree(seg);
287out:
288 mutex_unlock(&vmem_mutex);
289 return ret;
290}
291
292int add_shared_memory(unsigned long start, unsigned long size)
293{
294 struct memory_segment *seg;
295 struct page *page;
296 unsigned long pfn, num_pfn, end_pfn;
297 int ret;
298
299 mutex_lock(&vmem_mutex);
300 ret = -ENOMEM;
301 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
302 if (!seg)
303 goto out;
304 seg->start = start;
305 seg->size = size;
306
307 ret = insert_memory_segment(seg);
308 if (ret)
309 goto out_free;
310
311 ret = vmem_add_mem(start, size);
312 if (ret)
313 goto out_remove;
314
315 pfn = PFN_DOWN(start);
316 num_pfn = PFN_DOWN(size);
317 end_pfn = pfn + num_pfn;
318
319 page = pfn_to_page(pfn);
320 memset(page, 0, num_pfn * sizeof(struct page));
321
322 for (; pfn < end_pfn; pfn++) {
323 page = pfn_to_page(pfn);
324 init_page_count(page);
325 reset_page_mapcount(page);
326 SetPageReserved(page);
327 INIT_LIST_HEAD(&page->lru);
328 }
329 goto out;
330
331out_remove:
332 __remove_shared_memory(seg);
333out_free:
334 kfree(seg);
335out:
336 mutex_unlock(&vmem_mutex);
337 return ret;
338}
339
340/*
341 * map whole physical memory to virtual memory (identity mapping)
342 */
343void __init vmem_map_init(void)
344{
345 unsigned long map_size;
346 int i;
347
348 map_size = ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * sizeof(struct page);
349 vmalloc_end = PFN_ALIGN(VMALLOC_END_INIT) - PFN_ALIGN(map_size);
350 vmem_map = (struct page *) vmalloc_end;
351 NODE_DATA(0)->node_mem_map = vmem_map;
352
353 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++)
354 vmem_add_mem(memory_chunk[i].addr, memory_chunk[i].size);
355}
356
357/*
358 * Convert memory chunk array to a memory segment list so there is a single
359 * list that contains both r/w memory and shared memory segments.
360 */
361static int __init vmem_convert_memory_chunk(void)
362{
363 struct memory_segment *seg;
364 int i;
365
366 mutex_lock(&vmem_mutex);
367 for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) {
368 if (!memory_chunk[i].size)
369 continue;
370 seg = kzalloc(sizeof(*seg), GFP_KERNEL);
371 if (!seg)
372 panic("Out of memory...\n");
373 seg->start = memory_chunk[i].addr;
374 seg->size = memory_chunk[i].size;
375 insert_memory_segment(seg);
376 }
377 mutex_unlock(&vmem_mutex);
378 return 0;
379}
380
381core_initcall(vmem_convert_memory_chunk);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 2af2d9b53d18..492b68bcd7cc 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1050,10 +1050,10 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1050 } 1050 }
1051 } else { /* error */ 1051 } else { /* error */
1052 memcpy(&cqr->irb, irb, sizeof (struct irb)); 1052 memcpy(&cqr->irb, irb, sizeof (struct irb));
1053#ifdef ERP_DEBUG 1053 if (device->features & DASD_FEATURE_ERPLOG) {
1054 /* dump sense data */ 1054 /* dump sense data */
1055 dasd_log_sense(cqr, irb); 1055 dasd_log_sense(cqr, irb);
1056#endif 1056 }
1057 switch (era) { 1057 switch (era) {
1058 case dasd_era_fatal: 1058 case dasd_era_fatal:
1059 cqr->status = DASD_CQR_FAILED; 1059 cqr->status = DASD_CQR_FAILED;
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 669805d4402d..4d01040c2c63 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -2641,14 +2641,12 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
2641 struct dasd_ccw_req *erp = NULL; 2641 struct dasd_ccw_req *erp = NULL;
2642 struct dasd_device *device = cqr->device; 2642 struct dasd_device *device = cqr->device;
2643 __u32 cpa = cqr->irb.scsw.cpa; 2643 __u32 cpa = cqr->irb.scsw.cpa;
2644 struct dasd_ccw_req *temp_erp = NULL;
2644 2645
2645#ifdef ERP_DEBUG 2646 if (device->features & DASD_FEATURE_ERPLOG) {
2646 /* print current erp_chain */ 2647 /* print current erp_chain */
2647 DEV_MESSAGE(KERN_ERR, device, "%s", 2648 DEV_MESSAGE(KERN_ERR, device, "%s",
2648 "ERP chain at BEGINNING of ERP-ACTION"); 2649 "ERP chain at BEGINNING of ERP-ACTION");
2649 {
2650 struct dasd_ccw_req *temp_erp = NULL;
2651
2652 for (temp_erp = cqr; 2650 for (temp_erp = cqr;
2653 temp_erp != NULL; temp_erp = temp_erp->refers) { 2651 temp_erp != NULL; temp_erp = temp_erp->refers) {
2654 2652
@@ -2658,7 +2656,6 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
2658 temp_erp->refers); 2656 temp_erp->refers);
2659 } 2657 }
2660 } 2658 }
2661#endif /* ERP_DEBUG */
2662 2659
2663 /* double-check if current erp/cqr was successfull */ 2660 /* double-check if current erp/cqr was successfull */
2664 if ((cqr->irb.scsw.cstat == 0x00) && 2661 if ((cqr->irb.scsw.cstat == 0x00) &&
@@ -2695,11 +2692,10 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
2695 erp = dasd_3990_erp_handle_match_erp(cqr, erp); 2692 erp = dasd_3990_erp_handle_match_erp(cqr, erp);
2696 } 2693 }
2697 2694
2698#ifdef ERP_DEBUG 2695 if (device->features & DASD_FEATURE_ERPLOG) {
2699 /* print current erp_chain */ 2696 /* print current erp_chain */
2700 DEV_MESSAGE(KERN_ERR, device, "%s", "ERP chain at END of ERP-ACTION"); 2697 DEV_MESSAGE(KERN_ERR, device, "%s",
2701 { 2698 "ERP chain at END of ERP-ACTION");
2702 struct dasd_ccw_req *temp_erp = NULL;
2703 for (temp_erp = erp; 2699 for (temp_erp = erp;
2704 temp_erp != NULL; temp_erp = temp_erp->refers) { 2700 temp_erp != NULL; temp_erp = temp_erp->refers) {
2705 2701
@@ -2709,7 +2705,6 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
2709 temp_erp->refers); 2705 temp_erp->refers);
2710 } 2706 }
2711 } 2707 }
2712#endif /* ERP_DEBUG */
2713 2708
2714 if (erp->status == DASD_CQR_FAILED) 2709 if (erp->status == DASD_CQR_FAILED)
2715 dasd_log_ccw(erp, 1, cpa); 2710 dasd_log_ccw(erp, 1, cpa);
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index cf28ccc57948..5943266152f5 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -202,6 +202,8 @@ dasd_feature_list(char *str, char **endp)
202 features |= DASD_FEATURE_READONLY; 202 features |= DASD_FEATURE_READONLY;
203 else if (len == 4 && !strncmp(str, "diag", 4)) 203 else if (len == 4 && !strncmp(str, "diag", 4))
204 features |= DASD_FEATURE_USEDIAG; 204 features |= DASD_FEATURE_USEDIAG;
205 else if (len == 6 && !strncmp(str, "erplog", 6))
206 features |= DASD_FEATURE_ERPLOG;
205 else { 207 else {
206 MESSAGE(KERN_WARNING, 208 MESSAGE(KERN_WARNING,
207 "unsupported feature: %*s, " 209 "unsupported feature: %*s, "
@@ -709,6 +711,52 @@ dasd_ro_store(struct device *dev, struct device_attribute *attr,
709} 711}
710 712
711static DEVICE_ATTR(readonly, 0644, dasd_ro_show, dasd_ro_store); 713static DEVICE_ATTR(readonly, 0644, dasd_ro_show, dasd_ro_store);
714/*
715 * erplog controls the logging of ERP related data
716 * (e.g. failing channel programs).
717 */
718static ssize_t
719dasd_erplog_show(struct device *dev, struct device_attribute *attr, char *buf)
720{
721 struct dasd_devmap *devmap;
722 int erplog;
723
724 devmap = dasd_find_busid(dev->bus_id);
725 if (!IS_ERR(devmap))
726 erplog = (devmap->features & DASD_FEATURE_ERPLOG) != 0;
727 else
728 erplog = (DASD_FEATURE_DEFAULT & DASD_FEATURE_ERPLOG) != 0;
729 return snprintf(buf, PAGE_SIZE, erplog ? "1\n" : "0\n");
730}
731
732static ssize_t
733dasd_erplog_store(struct device *dev, struct device_attribute *attr,
734 const char *buf, size_t count)
735{
736 struct dasd_devmap *devmap;
737 int val;
738 char *endp;
739
740 devmap = dasd_devmap_from_cdev(to_ccwdev(dev));
741 if (IS_ERR(devmap))
742 return PTR_ERR(devmap);
743
744 val = simple_strtoul(buf, &endp, 0);
745 if (((endp + 1) < (buf + count)) || (val > 1))
746 return -EINVAL;
747
748 spin_lock(&dasd_devmap_lock);
749 if (val)
750 devmap->features |= DASD_FEATURE_ERPLOG;
751 else
752 devmap->features &= ~DASD_FEATURE_ERPLOG;
753 if (devmap->device)
754 devmap->device->features = devmap->features;
755 spin_unlock(&dasd_devmap_lock);
756 return count;
757}
758
759static DEVICE_ATTR(erplog, 0644, dasd_erplog_show, dasd_erplog_store);
712 760
713/* 761/*
714 * use_diag controls whether the driver should use diag rather than ssch 762 * use_diag controls whether the driver should use diag rather than ssch
@@ -896,6 +944,7 @@ static struct attribute * dasd_attrs[] = {
896 &dev_attr_uid.attr, 944 &dev_attr_uid.attr,
897 &dev_attr_use_diag.attr, 945 &dev_attr_use_diag.attr,
898 &dev_attr_eer_enabled.attr, 946 &dev_attr_eer_enabled.attr,
947 &dev_attr_erplog.attr,
899 NULL, 948 NULL,
900}; 949};
901 950
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index dc5dd509434d..fb725e3b08fe 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -13,10 +13,6 @@
13 13
14#ifdef __KERNEL__ 14#ifdef __KERNEL__
15 15
16/* erp debugging in dasd.c and dasd_3990_erp.c */
17#define ERP_DEBUG
18
19
20/* we keep old device allocation scheme; IOW, minors are still in 0..255 */ 16/* we keep old device allocation scheme; IOW, minors are still in 0..255 */
21#define DASD_PER_MAJOR (1U << (MINORBITS - DASD_PARTN_BITS)) 17#define DASD_PER_MAJOR (1U << (MINORBITS - DASD_PARTN_BITS))
22#define DASD_PARTN_MASK ((1 << DASD_PARTN_BITS) - 1) 18#define DASD_PARTN_MASK ((1 << DASD_PARTN_BITS) - 1)
diff --git a/drivers/s390/char/ctrlchar.c b/drivers/s390/char/ctrlchar.c
index 49e9628d9297..c6cbcb3f925e 100644
--- a/drivers/s390/char/ctrlchar.c
+++ b/drivers/s390/char/ctrlchar.c
@@ -16,14 +16,15 @@
16 16
17#ifdef CONFIG_MAGIC_SYSRQ 17#ifdef CONFIG_MAGIC_SYSRQ
18static int ctrlchar_sysrq_key; 18static int ctrlchar_sysrq_key;
19static struct tty_struct *sysrq_tty;
19 20
20static void 21static void
21ctrlchar_handle_sysrq(void *tty) 22ctrlchar_handle_sysrq(struct work_struct *work)
22{ 23{
23 handle_sysrq(ctrlchar_sysrq_key, (struct tty_struct *) tty); 24 handle_sysrq(ctrlchar_sysrq_key, sysrq_tty);
24} 25}
25 26
26static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq, NULL); 27static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq);
27#endif 28#endif
28 29
29 30
@@ -53,7 +54,7 @@ ctrlchar_handle(const unsigned char *buf, int len, struct tty_struct *tty)
53 /* racy */ 54 /* racy */
54 if (len == 3 && buf[1] == '-') { 55 if (len == 3 && buf[1] == '-') {
55 ctrlchar_sysrq_key = buf[2]; 56 ctrlchar_sysrq_key = buf[2];
56 ctrlchar_work.data = tty; 57 sysrq_tty = tty;
57 schedule_work(&ctrlchar_work); 58 schedule_work(&ctrlchar_work);
58 return CTRLCHAR_SYSRQ; 59 return CTRLCHAR_SYSRQ;
59 } 60 }
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index 1f4c89967be4..c9f1c4c8bb13 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -179,6 +179,7 @@ struct tape_char_data {
179/* Block Frontend Data */ 179/* Block Frontend Data */
180struct tape_blk_data 180struct tape_blk_data
181{ 181{
182 struct tape_device * device;
182 /* Block device request queue. */ 183 /* Block device request queue. */
183 request_queue_t * request_queue; 184 request_queue_t * request_queue;
184 spinlock_t request_queue_lock; 185 spinlock_t request_queue_lock;
@@ -240,7 +241,7 @@ struct tape_device {
240#endif 241#endif
241 242
242 /* Function to start or stop the next request later. */ 243 /* Function to start or stop the next request later. */
243 struct work_struct tape_dnr; 244 struct delayed_work tape_dnr;
244}; 245};
245 246
246/* Externals from tape_core.c */ 247/* Externals from tape_core.c */
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c
index 7b95dab913d0..e765875e8db2 100644
--- a/drivers/s390/char/tape_34xx.c
+++ b/drivers/s390/char/tape_34xx.c
@@ -95,6 +95,12 @@ tape_34xx_medium_sense(struct tape_device *device)
95 return rc; 95 return rc;
96} 96}
97 97
98struct tape_34xx_work {
99 struct tape_device *device;
100 enum tape_op op;
101 struct work_struct work;
102};
103
98/* 104/*
99 * These functions are currently used only to schedule a medium_sense for 105 * These functions are currently used only to schedule a medium_sense for
100 * later execution. This is because we get an interrupt whenever a medium 106 * later execution. This is because we get an interrupt whenever a medium
@@ -103,13 +109,10 @@ tape_34xx_medium_sense(struct tape_device *device)
103 * interrupt handler. 109 * interrupt handler.
104 */ 110 */
105static void 111static void
106tape_34xx_work_handler(void *data) 112tape_34xx_work_handler(struct work_struct *work)
107{ 113{
108 struct { 114 struct tape_34xx_work *p =
109 struct tape_device *device; 115 container_of(work, struct tape_34xx_work, work);
110 enum tape_op op;
111 struct work_struct work;
112 } *p = data;
113 116
114 switch(p->op) { 117 switch(p->op) {
115 case TO_MSEN: 118 case TO_MSEN:
@@ -126,17 +129,13 @@ tape_34xx_work_handler(void *data)
126static int 129static int
127tape_34xx_schedule_work(struct tape_device *device, enum tape_op op) 130tape_34xx_schedule_work(struct tape_device *device, enum tape_op op)
128{ 131{
129 struct { 132 struct tape_34xx_work *p;
130 struct tape_device *device;
131 enum tape_op op;
132 struct work_struct work;
133 } *p;
134 133
135 if ((p = kmalloc(sizeof(*p), GFP_ATOMIC)) == NULL) 134 if ((p = kmalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
136 return -ENOMEM; 135 return -ENOMEM;
137 136
138 memset(p, 0, sizeof(*p)); 137 memset(p, 0, sizeof(*p));
139 INIT_WORK(&p->work, tape_34xx_work_handler, p); 138 INIT_WORK(&p->work, tape_34xx_work_handler);
140 139
141 p->device = tape_get_device_reference(device); 140 p->device = tape_get_device_reference(device);
142 p->op = op; 141 p->op = op;
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 928cbefc49d5..9df912f63188 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -236,9 +236,10 @@ struct work_handler_data {
236}; 236};
237 237
238static void 238static void
239tape_3590_work_handler(void *data) 239tape_3590_work_handler(struct work_struct *work)
240{ 240{
241 struct work_handler_data *p = data; 241 struct work_handler_data *p =
242 container_of(work, struct work_handler_data, work);
242 243
243 switch (p->op) { 244 switch (p->op) {
244 case TO_MSEN: 245 case TO_MSEN:
@@ -263,7 +264,7 @@ tape_3590_schedule_work(struct tape_device *device, enum tape_op op)
263 if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL) 264 if ((p = kzalloc(sizeof(*p), GFP_ATOMIC)) == NULL)
264 return -ENOMEM; 265 return -ENOMEM;
265 266
266 INIT_WORK(&p->work, tape_3590_work_handler, p); 267 INIT_WORK(&p->work, tape_3590_work_handler);
267 268
268 p->device = tape_get_device_reference(device); 269 p->device = tape_get_device_reference(device);
269 p->op = op; 270 p->op = op;
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index 3225fcd1dcb4..c8a89b3b87d4 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -15,6 +15,7 @@
15#include <linux/blkdev.h> 15#include <linux/blkdev.h>
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/buffer_head.h> 17#include <linux/buffer_head.h>
18#include <linux/kernel.h>
18 19
19#include <asm/debug.h> 20#include <asm/debug.h>
20 21
@@ -143,7 +144,8 @@ tapeblock_start_request(struct tape_device *device, struct request *req)
143 * queue. 144 * queue.
144 */ 145 */
145static void 146static void
146tapeblock_requeue(void *data) { 147tapeblock_requeue(struct work_struct *work) {
148 struct tape_blk_data * blkdat;
147 struct tape_device * device; 149 struct tape_device * device;
148 request_queue_t * queue; 150 request_queue_t * queue;
149 int nr_queued; 151 int nr_queued;
@@ -151,7 +153,8 @@ tapeblock_requeue(void *data) {
151 struct list_head * l; 153 struct list_head * l;
152 int rc; 154 int rc;
153 155
154 device = (struct tape_device *) data; 156 blkdat = container_of(work, struct tape_blk_data, requeue_task);
157 device = blkdat->device;
155 if (!device) 158 if (!device)
156 return; 159 return;
157 160
@@ -212,6 +215,7 @@ tapeblock_setup_device(struct tape_device * device)
212 int rc; 215 int rc;
213 216
214 blkdat = &device->blk_data; 217 blkdat = &device->blk_data;
218 blkdat->device = device;
215 spin_lock_init(&blkdat->request_queue_lock); 219 spin_lock_init(&blkdat->request_queue_lock);
216 atomic_set(&blkdat->requeue_scheduled, 0); 220 atomic_set(&blkdat->requeue_scheduled, 0);
217 221
@@ -255,8 +259,8 @@ tapeblock_setup_device(struct tape_device * device)
255 259
256 add_disk(disk); 260 add_disk(disk);
257 261
258 INIT_WORK(&blkdat->requeue_task, tapeblock_requeue, 262 tape_get_device_reference(device);
259 tape_get_device_reference(device)); 263 INIT_WORK(&blkdat->requeue_task, tapeblock_requeue);
260 264
261 return 0; 265 return 0;
262 266
@@ -271,7 +275,7 @@ void
271tapeblock_cleanup_device(struct tape_device *device) 275tapeblock_cleanup_device(struct tape_device *device)
272{ 276{
273 flush_scheduled_work(); 277 flush_scheduled_work();
274 device->blk_data.requeue_task.data = tape_put_device(device); 278 tape_put_device(device);
275 279
276 if (!device->blk_data.disk) { 280 if (!device->blk_data.disk) {
277 PRINT_ERR("(%s): No gendisk to clean up!\n", 281 PRINT_ERR("(%s): No gendisk to clean up!\n",
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 2826aed91043..c6c2e918b990 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -28,7 +28,7 @@
28#define PRINTK_HEADER "TAPE_CORE: " 28#define PRINTK_HEADER "TAPE_CORE: "
29 29
30static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *); 30static void __tape_do_irq (struct ccw_device *, unsigned long, struct irb *);
31static void tape_delayed_next_request(void * data); 31static void tape_delayed_next_request(struct work_struct *);
32 32
33/* 33/*
34 * One list to contain all tape devices of all disciplines, so 34 * One list to contain all tape devices of all disciplines, so
@@ -272,7 +272,7 @@ __tape_cancel_io(struct tape_device *device, struct tape_request *request)
272 return 0; 272 return 0;
273 case -EBUSY: 273 case -EBUSY:
274 request->status = TAPE_REQUEST_CANCEL; 274 request->status = TAPE_REQUEST_CANCEL;
275 schedule_work(&device->tape_dnr); 275 schedule_delayed_work(&device->tape_dnr, 0);
276 return 0; 276 return 0;
277 case -ENODEV: 277 case -ENODEV:
278 DBF_EXCEPTION(2, "device gone, retry\n"); 278 DBF_EXCEPTION(2, "device gone, retry\n");
@@ -470,7 +470,7 @@ tape_alloc_device(void)
470 *device->modeset_byte = 0; 470 *device->modeset_byte = 0;
471 device->first_minor = -1; 471 device->first_minor = -1;
472 atomic_set(&device->ref_count, 1); 472 atomic_set(&device->ref_count, 1);
473 INIT_WORK(&device->tape_dnr, tape_delayed_next_request, device); 473 INIT_DELAYED_WORK(&device->tape_dnr, tape_delayed_next_request);
474 474
475 return device; 475 return device;
476} 476}
@@ -724,7 +724,7 @@ __tape_start_io(struct tape_device *device, struct tape_request *request)
724 } else if (rc == -EBUSY) { 724 } else if (rc == -EBUSY) {
725 /* The common I/O subsystem is currently busy. Retry later. */ 725 /* The common I/O subsystem is currently busy. Retry later. */
726 request->status = TAPE_REQUEST_QUEUED; 726 request->status = TAPE_REQUEST_QUEUED;
727 schedule_work(&device->tape_dnr); 727 schedule_delayed_work(&device->tape_dnr, 0);
728 rc = 0; 728 rc = 0;
729 } else { 729 } else {
730 /* Start failed. Remove request and indicate failure. */ 730 /* Start failed. Remove request and indicate failure. */
@@ -790,11 +790,11 @@ __tape_start_next_request(struct tape_device *device)
790} 790}
791 791
792static void 792static void
793tape_delayed_next_request(void *data) 793tape_delayed_next_request(struct work_struct *work)
794{ 794{
795 struct tape_device * device; 795 struct tape_device *device =
796 container_of(work, struct tape_device, tape_dnr.work);
796 797
797 device = (struct tape_device *) data;
798 DBF_LH(6, "tape_delayed_next_request(%p)\n", device); 798 DBF_LH(6, "tape_delayed_next_request(%p)\n", device);
799 spin_lock_irq(get_ccwdev_lock(device->cdev)); 799 spin_lock_irq(get_ccwdev_lock(device->cdev));
800 __tape_start_next_request(device); 800 __tape_start_next_request(device);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index dbfb77b03928..cbab8d2ce5cf 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -183,7 +183,7 @@ css_get_ssd_info(struct subchannel *sch)
183 page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 183 page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
184 if (!page) 184 if (!page)
185 return -ENOMEM; 185 return -ENOMEM;
186 spin_lock_irq(&sch->lock); 186 spin_lock_irq(sch->lock);
187 ret = chsc_get_sch_desc_irq(sch, page); 187 ret = chsc_get_sch_desc_irq(sch, page);
188 if (ret) { 188 if (ret) {
189 static int cio_chsc_err_msg; 189 static int cio_chsc_err_msg;
@@ -197,7 +197,7 @@ css_get_ssd_info(struct subchannel *sch)
197 cio_chsc_err_msg = 1; 197 cio_chsc_err_msg = 1;
198 } 198 }
199 } 199 }
200 spin_unlock_irq(&sch->lock); 200 spin_unlock_irq(sch->lock);
201 free_page((unsigned long)page); 201 free_page((unsigned long)page);
202 if (!ret) { 202 if (!ret) {
203 int j, chpid, mask; 203 int j, chpid, mask;
@@ -233,7 +233,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
233 if (j >= 8) 233 if (j >= 8)
234 return 0; 234 return 0;
235 235
236 spin_lock_irq(&sch->lock); 236 spin_lock_irq(sch->lock);
237 237
238 stsch(sch->schid, &schib); 238 stsch(sch->schid, &schib);
239 if (!schib.pmcw.dnv) 239 if (!schib.pmcw.dnv)
@@ -265,10 +265,10 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
265 else if (sch->lpm == mask) 265 else if (sch->lpm == mask)
266 goto out_unreg; 266 goto out_unreg;
267out_unlock: 267out_unlock:
268 spin_unlock_irq(&sch->lock); 268 spin_unlock_irq(sch->lock);
269 return 0; 269 return 0;
270out_unreg: 270out_unreg:
271 spin_unlock_irq(&sch->lock); 271 spin_unlock_irq(sch->lock);
272 sch->lpm = 0; 272 sch->lpm = 0;
273 if (css_enqueue_subchannel_slow(sch->schid)) { 273 if (css_enqueue_subchannel_slow(sch->schid)) {
274 css_clear_subchannel_slow_list(); 274 css_clear_subchannel_slow_list();
@@ -378,12 +378,12 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
378 /* Check if a subchannel is newly available. */ 378 /* Check if a subchannel is newly available. */
379 return s390_process_res_acc_new_sch(schid); 379 return s390_process_res_acc_new_sch(schid);
380 380
381 spin_lock_irq(&sch->lock); 381 spin_lock_irq(sch->lock);
382 382
383 chp_mask = s390_process_res_acc_sch(res_data, sch); 383 chp_mask = s390_process_res_acc_sch(res_data, sch);
384 384
385 if (chp_mask == 0) { 385 if (chp_mask == 0) {
386 spin_unlock_irq(&sch->lock); 386 spin_unlock_irq(sch->lock);
387 put_device(&sch->dev); 387 put_device(&sch->dev);
388 return 0; 388 return 0;
389 } 389 }
@@ -397,7 +397,7 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
397 else if (sch->driver && sch->driver->verify) 397 else if (sch->driver && sch->driver->verify)
398 sch->driver->verify(&sch->dev); 398 sch->driver->verify(&sch->dev);
399 399
400 spin_unlock_irq(&sch->lock); 400 spin_unlock_irq(sch->lock);
401 put_device(&sch->dev); 401 put_device(&sch->dev);
402 return 0; 402 return 0;
403} 403}
@@ -635,21 +635,21 @@ __chp_add(struct subchannel_id schid, void *data)
635 if (!sch) 635 if (!sch)
636 /* Check if the subchannel is now available. */ 636 /* Check if the subchannel is now available. */
637 return __chp_add_new_sch(schid); 637 return __chp_add_new_sch(schid);
638 spin_lock_irq(&sch->lock); 638 spin_lock_irq(sch->lock);
639 for (i=0; i<8; i++) { 639 for (i=0; i<8; i++) {
640 mask = 0x80 >> i; 640 mask = 0x80 >> i;
641 if ((sch->schib.pmcw.pim & mask) && 641 if ((sch->schib.pmcw.pim & mask) &&
642 (sch->schib.pmcw.chpid[i] == chp->id)) { 642 (sch->schib.pmcw.chpid[i] == chp->id)) {
643 if (stsch(sch->schid, &sch->schib) != 0) { 643 if (stsch(sch->schid, &sch->schib) != 0) {
644 /* Endgame. */ 644 /* Endgame. */
645 spin_unlock_irq(&sch->lock); 645 spin_unlock_irq(sch->lock);
646 return -ENXIO; 646 return -ENXIO;
647 } 647 }
648 break; 648 break;
649 } 649 }
650 } 650 }
651 if (i==8) { 651 if (i==8) {
652 spin_unlock_irq(&sch->lock); 652 spin_unlock_irq(sch->lock);
653 return 0; 653 return 0;
654 } 654 }
655 sch->lpm = ((sch->schib.pmcw.pim & 655 sch->lpm = ((sch->schib.pmcw.pim &
@@ -660,7 +660,7 @@ __chp_add(struct subchannel_id schid, void *data)
660 if (sch->driver && sch->driver->verify) 660 if (sch->driver && sch->driver->verify)
661 sch->driver->verify(&sch->dev); 661 sch->driver->verify(&sch->dev);
662 662
663 spin_unlock_irq(&sch->lock); 663 spin_unlock_irq(sch->lock);
664 put_device(&sch->dev); 664 put_device(&sch->dev);
665 return 0; 665 return 0;
666} 666}
@@ -750,7 +750,7 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
750 if (!sch->ssd_info.valid) 750 if (!sch->ssd_info.valid)
751 return; 751 return;
752 752
753 spin_lock_irqsave(&sch->lock, flags); 753 spin_lock_irqsave(sch->lock, flags);
754 old_lpm = sch->lpm; 754 old_lpm = sch->lpm;
755 for (chp = 0; chp < 8; chp++) { 755 for (chp = 0; chp < 8; chp++) {
756 if (sch->ssd_info.chpid[chp] != chpid) 756 if (sch->ssd_info.chpid[chp] != chpid)
@@ -785,7 +785,7 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
785 sch->driver->verify(&sch->dev); 785 sch->driver->verify(&sch->dev);
786 break; 786 break;
787 } 787 }
788 spin_unlock_irqrestore(&sch->lock, flags); 788 spin_unlock_irqrestore(sch->lock, flags);
789} 789}
790 790
791static int 791static int
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 20aee2783847..7835a714a405 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -143,11 +143,11 @@ cio_tpi(void)
143 return 1; 143 return 1;
144 local_bh_disable(); 144 local_bh_disable();
145 irq_enter (); 145 irq_enter ();
146 spin_lock(&sch->lock); 146 spin_lock(sch->lock);
147 memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw)); 147 memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw));
148 if (sch->driver && sch->driver->irq) 148 if (sch->driver && sch->driver->irq)
149 sch->driver->irq(&sch->dev); 149 sch->driver->irq(&sch->dev);
150 spin_unlock(&sch->lock); 150 spin_unlock(sch->lock);
151 irq_exit (); 151 irq_exit ();
152 _local_bh_enable(); 152 _local_bh_enable();
153 return 1; 153 return 1;
@@ -415,6 +415,8 @@ cio_enable_subchannel (struct subchannel *sch, unsigned int isc)
415 CIO_TRACE_EVENT (2, "ensch"); 415 CIO_TRACE_EVENT (2, "ensch");
416 CIO_TRACE_EVENT (2, sch->dev.bus_id); 416 CIO_TRACE_EVENT (2, sch->dev.bus_id);
417 417
418 if (sch_is_pseudo_sch(sch))
419 return -EINVAL;
418 ccode = stsch (sch->schid, &sch->schib); 420 ccode = stsch (sch->schid, &sch->schib);
419 if (ccode) 421 if (ccode)
420 return -ENODEV; 422 return -ENODEV;
@@ -462,6 +464,8 @@ cio_disable_subchannel (struct subchannel *sch)
462 CIO_TRACE_EVENT (2, "dissch"); 464 CIO_TRACE_EVENT (2, "dissch");
463 CIO_TRACE_EVENT (2, sch->dev.bus_id); 465 CIO_TRACE_EVENT (2, sch->dev.bus_id);
464 466
467 if (sch_is_pseudo_sch(sch))
468 return 0;
465 ccode = stsch (sch->schid, &sch->schib); 469 ccode = stsch (sch->schid, &sch->schib);
466 if (ccode == 3) /* Not operational. */ 470 if (ccode == 3) /* Not operational. */
467 return -ENODEV; 471 return -ENODEV;
@@ -496,6 +500,15 @@ cio_disable_subchannel (struct subchannel *sch)
496 return ret; 500 return ret;
497} 501}
498 502
503int cio_create_sch_lock(struct subchannel *sch)
504{
505 sch->lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
506 if (!sch->lock)
507 return -ENOMEM;
508 spin_lock_init(sch->lock);
509 return 0;
510}
511
499/* 512/*
500 * cio_validate_subchannel() 513 * cio_validate_subchannel()
501 * 514 *
@@ -513,6 +526,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
513{ 526{
514 char dbf_txt[15]; 527 char dbf_txt[15];
515 int ccode; 528 int ccode;
529 int err;
516 530
517 sprintf (dbf_txt, "valsch%x", schid.sch_no); 531 sprintf (dbf_txt, "valsch%x", schid.sch_no);
518 CIO_TRACE_EVENT (4, dbf_txt); 532 CIO_TRACE_EVENT (4, dbf_txt);
@@ -520,9 +534,15 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
520 /* Nuke all fields. */ 534 /* Nuke all fields. */
521 memset(sch, 0, sizeof(struct subchannel)); 535 memset(sch, 0, sizeof(struct subchannel));
522 536
523 spin_lock_init(&sch->lock); 537 sch->schid = schid;
538 if (cio_is_console(schid)) {
539 sch->lock = cio_get_console_lock();
540 } else {
541 err = cio_create_sch_lock(sch);
542 if (err)
543 goto out;
544 }
524 mutex_init(&sch->reg_mutex); 545 mutex_init(&sch->reg_mutex);
525
526 /* Set a name for the subchannel */ 546 /* Set a name for the subchannel */
527 snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", schid.ssid, 547 snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", schid.ssid,
528 schid.sch_no); 548 schid.sch_no);
@@ -534,10 +554,10 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
534 * is not valid. 554 * is not valid.
535 */ 555 */
536 ccode = stsch_err (schid, &sch->schib); 556 ccode = stsch_err (schid, &sch->schib);
537 if (ccode) 557 if (ccode) {
538 return (ccode == 3) ? -ENXIO : ccode; 558 err = (ccode == 3) ? -ENXIO : ccode;
539 559 goto out;
540 sch->schid = schid; 560 }
541 /* Copy subchannel type from path management control word. */ 561 /* Copy subchannel type from path management control word. */
542 sch->st = sch->schib.pmcw.st; 562 sch->st = sch->schib.pmcw.st;
543 563
@@ -550,14 +570,16 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
550 "non-I/O subchannel type %04X\n", 570 "non-I/O subchannel type %04X\n",
551 sch->schid.ssid, sch->schid.sch_no, sch->st); 571 sch->schid.ssid, sch->schid.sch_no, sch->st);
552 /* We stop here for non-io subchannels. */ 572 /* We stop here for non-io subchannels. */
553 return sch->st; 573 err = sch->st;
574 goto out;
554 } 575 }
555 576
556 /* Initialization for io subchannels. */ 577 /* Initialization for io subchannels. */
557 if (!sch->schib.pmcw.dnv) 578 if (!sch->schib.pmcw.dnv) {
558 /* io subchannel but device number is invalid. */ 579 /* io subchannel but device number is invalid. */
559 return -ENODEV; 580 err = -ENODEV;
560 581 goto out;
582 }
561 /* Devno is valid. */ 583 /* Devno is valid. */
562 if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) { 584 if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) {
563 /* 585 /*
@@ -567,7 +589,8 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
567 CIO_MSG_EVENT(0, "Blacklisted device detected " 589 CIO_MSG_EVENT(0, "Blacklisted device detected "
568 "at devno %04X, subchannel set %x\n", 590 "at devno %04X, subchannel set %x\n",
569 sch->schib.pmcw.dev, sch->schid.ssid); 591 sch->schib.pmcw.dev, sch->schid.ssid);
570 return -ENODEV; 592 err = -ENODEV;
593 goto out;
571 } 594 }
572 sch->opm = 0xff; 595 sch->opm = 0xff;
573 if (!cio_is_console(sch->schid)) 596 if (!cio_is_console(sch->schid))
@@ -595,6 +618,11 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
595 if ((sch->lpm & (sch->lpm - 1)) != 0) 618 if ((sch->lpm & (sch->lpm - 1)) != 0)
596 sch->schib.pmcw.mp = 1; /* multipath mode */ 619 sch->schib.pmcw.mp = 1; /* multipath mode */
597 return 0; 620 return 0;
621out:
622 if (!cio_is_console(schid))
623 kfree(sch->lock);
624 sch->lock = NULL;
625 return err;
598} 626}
599 627
600/* 628/*
@@ -637,7 +665,7 @@ do_IRQ (struct pt_regs *regs)
637 } 665 }
638 sch = (struct subchannel *)(unsigned long)tpi_info->intparm; 666 sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
639 if (sch) 667 if (sch)
640 spin_lock(&sch->lock); 668 spin_lock(sch->lock);
641 /* Store interrupt response block to lowcore. */ 669 /* Store interrupt response block to lowcore. */
642 if (tsch (tpi_info->schid, irb) == 0 && sch) { 670 if (tsch (tpi_info->schid, irb) == 0 && sch) {
643 /* Keep subchannel information word up to date. */ 671 /* Keep subchannel information word up to date. */
@@ -648,7 +676,7 @@ do_IRQ (struct pt_regs *regs)
648 sch->driver->irq(&sch->dev); 676 sch->driver->irq(&sch->dev);
649 } 677 }
650 if (sch) 678 if (sch)
651 spin_unlock(&sch->lock); 679 spin_unlock(sch->lock);
652 /* 680 /*
653 * Are more interrupts pending? 681 * Are more interrupts pending?
654 * If so, the tpi instruction will update the lowcore 682 * If so, the tpi instruction will update the lowcore
@@ -687,10 +715,10 @@ wait_cons_dev (void)
687 __ctl_load (cr6, 6, 6); 715 __ctl_load (cr6, 6, 6);
688 716
689 do { 717 do {
690 spin_unlock(&console_subchannel.lock); 718 spin_unlock(console_subchannel.lock);
691 if (!cio_tpi()) 719 if (!cio_tpi())
692 cpu_relax(); 720 cpu_relax();
693 spin_lock(&console_subchannel.lock); 721 spin_lock(console_subchannel.lock);
694 } while (console_subchannel.schib.scsw.actl != 0); 722 } while (console_subchannel.schib.scsw.actl != 0);
695 /* 723 /*
696 * restore previous isc value 724 * restore previous isc value
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index 4541c1af4b66..35154a210357 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -87,7 +87,7 @@ struct orb {
87/* subchannel data structure used by I/O subroutines */ 87/* subchannel data structure used by I/O subroutines */
88struct subchannel { 88struct subchannel {
89 struct subchannel_id schid; 89 struct subchannel_id schid;
90 spinlock_t lock; /* subchannel lock */ 90 spinlock_t *lock; /* subchannel lock */
91 struct mutex reg_mutex; 91 struct mutex reg_mutex;
92 enum { 92 enum {
93 SUBCHANNEL_TYPE_IO = 0, 93 SUBCHANNEL_TYPE_IO = 0,
@@ -131,15 +131,19 @@ extern int cio_set_options (struct subchannel *, int);
131extern int cio_get_options (struct subchannel *); 131extern int cio_get_options (struct subchannel *);
132extern int cio_modify (struct subchannel *); 132extern int cio_modify (struct subchannel *);
133 133
134int cio_create_sch_lock(struct subchannel *);
135
134/* Use with care. */ 136/* Use with care. */
135#ifdef CONFIG_CCW_CONSOLE 137#ifdef CONFIG_CCW_CONSOLE
136extern struct subchannel *cio_probe_console(void); 138extern struct subchannel *cio_probe_console(void);
137extern void cio_release_console(void); 139extern void cio_release_console(void);
138extern int cio_is_console(struct subchannel_id); 140extern int cio_is_console(struct subchannel_id);
139extern struct subchannel *cio_get_console_subchannel(void); 141extern struct subchannel *cio_get_console_subchannel(void);
142extern spinlock_t * cio_get_console_lock(void);
140#else 143#else
141#define cio_is_console(schid) 0 144#define cio_is_console(schid) 0
142#define cio_get_console_subchannel() NULL 145#define cio_get_console_subchannel() NULL
146#define cio_get_console_lock() NULL;
143#endif 147#endif
144 148
145extern int cio_show_msg; 149extern int cio_show_msg;
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 26cf2f5ae2e7..4c81d890791e 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -91,9 +91,9 @@ css_free_subchannel(struct subchannel *sch)
91 /* Reset intparm to zeroes. */ 91 /* Reset intparm to zeroes. */
92 sch->schib.pmcw.intparm = 0; 92 sch->schib.pmcw.intparm = 0;
93 cio_modify(sch); 93 cio_modify(sch);
94 kfree(sch->lock);
94 kfree(sch); 95 kfree(sch);
95 } 96 }
96
97} 97}
98 98
99static void 99static void
@@ -102,8 +102,10 @@ css_subchannel_release(struct device *dev)
102 struct subchannel *sch; 102 struct subchannel *sch;
103 103
104 sch = to_subchannel(dev); 104 sch = to_subchannel(dev);
105 if (!cio_is_console(sch->schid)) 105 if (!cio_is_console(sch->schid)) {
106 kfree(sch->lock);
106 kfree(sch); 107 kfree(sch);
108 }
107} 109}
108 110
109extern int css_get_ssd_info(struct subchannel *sch); 111extern int css_get_ssd_info(struct subchannel *sch);
@@ -135,14 +137,16 @@ css_register_subchannel(struct subchannel *sch)
135 sch->dev.parent = &css[0]->device; 137 sch->dev.parent = &css[0]->device;
136 sch->dev.bus = &css_bus_type; 138 sch->dev.bus = &css_bus_type;
137 sch->dev.release = &css_subchannel_release; 139 sch->dev.release = &css_subchannel_release;
138 140 sch->dev.groups = subch_attr_groups;
141
139 /* make it known to the system */ 142 /* make it known to the system */
140 ret = css_sch_device_register(sch); 143 ret = css_sch_device_register(sch);
141 if (ret) 144 if (ret) {
142 printk (KERN_WARNING "%s: could not register %s\n", 145 printk (KERN_WARNING "%s: could not register %s\n",
143 __func__, sch->dev.bus_id); 146 __func__, sch->dev.bus_id);
144 else 147 return ret;
145 css_get_ssd_info(sch); 148 }
149 css_get_ssd_info(sch);
146 return ret; 150 return ret;
147} 151}
148 152
@@ -201,18 +205,18 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
201 unsigned long flags; 205 unsigned long flags;
202 enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action; 206 enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action;
203 207
204 spin_lock_irqsave(&sch->lock, flags); 208 spin_lock_irqsave(sch->lock, flags);
205 disc = device_is_disconnected(sch); 209 disc = device_is_disconnected(sch);
206 if (disc && slow) { 210 if (disc && slow) {
207 /* Disconnected devices are evaluated directly only.*/ 211 /* Disconnected devices are evaluated directly only.*/
208 spin_unlock_irqrestore(&sch->lock, flags); 212 spin_unlock_irqrestore(sch->lock, flags);
209 return 0; 213 return 0;
210 } 214 }
211 /* No interrupt after machine check - kill pending timers. */ 215 /* No interrupt after machine check - kill pending timers. */
212 device_kill_pending_timer(sch); 216 device_kill_pending_timer(sch);
213 if (!disc && !slow) { 217 if (!disc && !slow) {
214 /* Non-disconnected devices are evaluated on the slow path. */ 218 /* Non-disconnected devices are evaluated on the slow path. */
215 spin_unlock_irqrestore(&sch->lock, flags); 219 spin_unlock_irqrestore(sch->lock, flags);
216 return -EAGAIN; 220 return -EAGAIN;
217 } 221 }
218 event = css_get_subchannel_status(sch); 222 event = css_get_subchannel_status(sch);
@@ -237,9 +241,9 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
237 /* Ask driver what to do with device. */ 241 /* Ask driver what to do with device. */
238 action = UNREGISTER; 242 action = UNREGISTER;
239 if (sch->driver && sch->driver->notify) { 243 if (sch->driver && sch->driver->notify) {
240 spin_unlock_irqrestore(&sch->lock, flags); 244 spin_unlock_irqrestore(sch->lock, flags);
241 ret = sch->driver->notify(&sch->dev, event); 245 ret = sch->driver->notify(&sch->dev, event);
242 spin_lock_irqsave(&sch->lock, flags); 246 spin_lock_irqsave(sch->lock, flags);
243 if (ret) 247 if (ret)
244 action = NONE; 248 action = NONE;
245 } 249 }
@@ -264,9 +268,9 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
264 case UNREGISTER: 268 case UNREGISTER:
265 case UNREGISTER_PROBE: 269 case UNREGISTER_PROBE:
266 /* Unregister device (will use subchannel lock). */ 270 /* Unregister device (will use subchannel lock). */
267 spin_unlock_irqrestore(&sch->lock, flags); 271 spin_unlock_irqrestore(sch->lock, flags);
268 css_sch_device_unregister(sch); 272 css_sch_device_unregister(sch);
269 spin_lock_irqsave(&sch->lock, flags); 273 spin_lock_irqsave(sch->lock, flags);
270 274
271 /* Reset intparm to zeroes. */ 275 /* Reset intparm to zeroes. */
272 sch->schib.pmcw.intparm = 0; 276 sch->schib.pmcw.intparm = 0;
@@ -278,7 +282,7 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
278 default: 282 default:
279 break; 283 break;
280 } 284 }
281 spin_unlock_irqrestore(&sch->lock, flags); 285 spin_unlock_irqrestore(sch->lock, flags);
282 /* Probe if necessary. */ 286 /* Probe if necessary. */
283 if (action == UNREGISTER_PROBE) 287 if (action == UNREGISTER_PROBE)
284 ret = css_probe_device(sch->schid); 288 ret = css_probe_device(sch->schid);
@@ -573,12 +577,24 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr,
573 577
574static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store); 578static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store);
575 579
576static inline void __init 580static inline int __init setup_css(int nr)
577setup_css(int nr)
578{ 581{
579 u32 tod_high; 582 u32 tod_high;
583 int ret;
580 584
581 memset(css[nr], 0, sizeof(struct channel_subsystem)); 585 memset(css[nr], 0, sizeof(struct channel_subsystem));
586 css[nr]->pseudo_subchannel =
587 kzalloc(sizeof(*css[nr]->pseudo_subchannel), GFP_KERNEL);
588 if (!css[nr]->pseudo_subchannel)
589 return -ENOMEM;
590 css[nr]->pseudo_subchannel->dev.parent = &css[nr]->device;
591 css[nr]->pseudo_subchannel->dev.release = css_subchannel_release;
592 sprintf(css[nr]->pseudo_subchannel->dev.bus_id, "defunct");
593 ret = cio_create_sch_lock(css[nr]->pseudo_subchannel);
594 if (ret) {
595 kfree(css[nr]->pseudo_subchannel);
596 return ret;
597 }
582 mutex_init(&css[nr]->mutex); 598 mutex_init(&css[nr]->mutex);
583 css[nr]->valid = 1; 599 css[nr]->valid = 1;
584 css[nr]->cssid = nr; 600 css[nr]->cssid = nr;
@@ -586,6 +602,7 @@ setup_css(int nr)
586 css[nr]->device.release = channel_subsystem_release; 602 css[nr]->device.release = channel_subsystem_release;
587 tod_high = (u32) (get_clock() >> 32); 603 tod_high = (u32) (get_clock() >> 32);
588 css_generate_pgid(css[nr], tod_high); 604 css_generate_pgid(css[nr], tod_high);
605 return 0;
589} 606}
590 607
591/* 608/*
@@ -622,10 +639,12 @@ init_channel_subsystem (void)
622 ret = -ENOMEM; 639 ret = -ENOMEM;
623 goto out_unregister; 640 goto out_unregister;
624 } 641 }
625 setup_css(i); 642 ret = setup_css(i);
626 ret = device_register(&css[i]->device);
627 if (ret) 643 if (ret)
628 goto out_free; 644 goto out_free;
645 ret = device_register(&css[i]->device);
646 if (ret)
647 goto out_free_all;
629 if (css_characteristics_avail && 648 if (css_characteristics_avail &&
630 css_chsc_characteristics.secm) { 649 css_chsc_characteristics.secm) {
631 ret = device_create_file(&css[i]->device, 650 ret = device_create_file(&css[i]->device,
@@ -633,6 +652,9 @@ init_channel_subsystem (void)
633 if (ret) 652 if (ret)
634 goto out_device; 653 goto out_device;
635 } 654 }
655 ret = device_register(&css[i]->pseudo_subchannel->dev);
656 if (ret)
657 goto out_file;
636 } 658 }
637 css_init_done = 1; 659 css_init_done = 1;
638 660
@@ -640,13 +662,19 @@ init_channel_subsystem (void)
640 662
641 for_each_subchannel(__init_channel_subsystem, NULL); 663 for_each_subchannel(__init_channel_subsystem, NULL);
642 return 0; 664 return 0;
665out_file:
666 device_remove_file(&css[i]->device, &dev_attr_cm_enable);
643out_device: 667out_device:
644 device_unregister(&css[i]->device); 668 device_unregister(&css[i]->device);
669out_free_all:
670 kfree(css[i]->pseudo_subchannel->lock);
671 kfree(css[i]->pseudo_subchannel);
645out_free: 672out_free:
646 kfree(css[i]); 673 kfree(css[i]);
647out_unregister: 674out_unregister:
648 while (i > 0) { 675 while (i > 0) {
649 i--; 676 i--;
677 device_unregister(&css[i]->pseudo_subchannel->dev);
650 if (css_characteristics_avail && css_chsc_characteristics.secm) 678 if (css_characteristics_avail && css_chsc_characteristics.secm)
651 device_remove_file(&css[i]->device, 679 device_remove_file(&css[i]->device,
652 &dev_attr_cm_enable); 680 &dev_attr_cm_enable);
@@ -658,6 +686,11 @@ out:
658 return ret; 686 return ret;
659} 687}
660 688
689int sch_is_pseudo_sch(struct subchannel *sch)
690{
691 return sch == to_css(sch->dev.parent)->pseudo_subchannel;
692}
693
661/* 694/*
662 * find a driver for a subchannel. They identify by the subchannel 695 * find a driver for a subchannel. They identify by the subchannel
663 * type with the exception that the console subchannel driver has its own 696 * type with the exception that the console subchannel driver has its own
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 9ff064e71767..3464c5b875c4 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -73,6 +73,8 @@ struct senseid {
73} __attribute__ ((packed,aligned(4))); 73} __attribute__ ((packed,aligned(4)));
74 74
75struct ccw_device_private { 75struct ccw_device_private {
76 struct ccw_device *cdev;
77 struct subchannel *sch;
76 int state; /* device state */ 78 int state; /* device state */
77 atomic_t onoff; 79 atomic_t onoff;
78 unsigned long registered; 80 unsigned long registered;
@@ -158,6 +160,8 @@ struct channel_subsystem {
158 int cm_enabled; 160 int cm_enabled;
159 void *cub_addr1; 161 void *cub_addr1;
160 void *cub_addr2; 162 void *cub_addr2;
163 /* for orphaned ccw devices */
164 struct subchannel *pseudo_subchannel;
161}; 165};
162#define to_css(dev) container_of(dev, struct channel_subsystem, device) 166#define to_css(dev) container_of(dev, struct channel_subsystem, device)
163 167
@@ -185,6 +189,11 @@ void css_clear_subchannel_slow_list(void);
185int css_slow_subchannels_exist(void); 189int css_slow_subchannels_exist(void);
186extern int need_rescan; 190extern int need_rescan;
187 191
192int sch_is_pseudo_sch(struct subchannel *);
193
188extern struct workqueue_struct *slow_path_wq; 194extern struct workqueue_struct *slow_path_wq;
189extern struct work_struct slow_path_work; 195extern struct work_struct slow_path_work;
196
197int subchannel_add_files (struct device *);
198extern struct attribute_group *subch_attr_groups[];
190#endif 199#endif
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index d3d3716ff84b..803579053c2f 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -23,6 +23,7 @@
23#include <asm/param.h> /* HZ */ 23#include <asm/param.h> /* HZ */
24 24
25#include "cio.h" 25#include "cio.h"
26#include "cio_debug.h"
26#include "css.h" 27#include "css.h"
27#include "device.h" 28#include "device.h"
28#include "ioasm.h" 29#include "ioasm.h"
@@ -234,9 +235,11 @@ chpids_show (struct device * dev, struct device_attribute *attr, char * buf)
234 ssize_t ret = 0; 235 ssize_t ret = 0;
235 int chp; 236 int chp;
236 237
237 for (chp = 0; chp < 8; chp++) 238 if (ssd)
238 ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]); 239 for (chp = 0; chp < 8; chp++)
239 240 ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]);
241 else
242 ret += sprintf (buf, "n/a");
240 ret += sprintf (buf+ret, "\n"); 243 ret += sprintf (buf+ret, "\n");
241 return min((ssize_t)PAGE_SIZE, ret); 244 return min((ssize_t)PAGE_SIZE, ret);
242} 245}
@@ -294,14 +297,44 @@ online_show (struct device *dev, struct device_attribute *attr, char *buf)
294 return sprintf(buf, cdev->online ? "1\n" : "0\n"); 297 return sprintf(buf, cdev->online ? "1\n" : "0\n");
295} 298}
296 299
300int ccw_device_is_orphan(struct ccw_device *cdev)
301{
302 return sch_is_pseudo_sch(to_subchannel(cdev->dev.parent));
303}
304
305static void ccw_device_unregister(struct work_struct *work)
306{
307 struct ccw_device_private *priv;
308 struct ccw_device *cdev;
309
310 priv = container_of(work, struct ccw_device_private, kick_work);
311 cdev = priv->cdev;
312 if (test_and_clear_bit(1, &cdev->private->registered))
313 device_unregister(&cdev->dev);
314 put_device(&cdev->dev);
315}
316
297static void 317static void
298ccw_device_remove_disconnected(struct ccw_device *cdev) 318ccw_device_remove_disconnected(struct ccw_device *cdev)
299{ 319{
300 struct subchannel *sch; 320 struct subchannel *sch;
321 unsigned long flags;
301 /* 322 /*
302 * Forced offline in disconnected state means 323 * Forced offline in disconnected state means
303 * 'throw away device'. 324 * 'throw away device'.
304 */ 325 */
326 if (ccw_device_is_orphan(cdev)) {
327 /* Deregister ccw device. */
328 spin_lock_irqsave(cdev->ccwlock, flags);
329 cdev->private->state = DEV_STATE_NOT_OPER;
330 spin_unlock_irqrestore(cdev->ccwlock, flags);
331 if (get_device(&cdev->dev)) {
332 PREPARE_WORK(&cdev->private->kick_work,
333 ccw_device_unregister);
334 queue_work(ccw_device_work, &cdev->private->kick_work);
335 }
336 return ;
337 }
305 sch = to_subchannel(cdev->dev.parent); 338 sch = to_subchannel(cdev->dev.parent);
306 css_sch_device_unregister(sch); 339 css_sch_device_unregister(sch);
307 /* Reset intparm to zeroes. */ 340 /* Reset intparm to zeroes. */
@@ -462,6 +495,8 @@ available_show (struct device *dev, struct device_attribute *attr, char *buf)
462 struct ccw_device *cdev = to_ccwdev(dev); 495 struct ccw_device *cdev = to_ccwdev(dev);
463 struct subchannel *sch; 496 struct subchannel *sch;
464 497
498 if (ccw_device_is_orphan(cdev))
499 return sprintf(buf, "no device\n");
465 switch (cdev->private->state) { 500 switch (cdev->private->state) {
466 case DEV_STATE_BOXED: 501 case DEV_STATE_BOXED:
467 return sprintf(buf, "boxed\n"); 502 return sprintf(buf, "boxed\n");
@@ -498,11 +533,10 @@ static struct attribute_group subch_attr_group = {
498 .attrs = subch_attrs, 533 .attrs = subch_attrs,
499}; 534};
500 535
501static inline int 536struct attribute_group *subch_attr_groups[] = {
502subchannel_add_files (struct device *dev) 537 &subch_attr_group,
503{ 538 NULL,
504 return sysfs_create_group(&dev->kobj, &subch_attr_group); 539};
505}
506 540
507static struct attribute * ccwdev_attrs[] = { 541static struct attribute * ccwdev_attrs[] = {
508 &dev_attr_devtype.attr, 542 &dev_attr_devtype.attr,
@@ -563,11 +597,10 @@ match_devno(struct device * dev, void * data)
563 597
564 cdev = to_ccwdev(dev); 598 cdev = to_ccwdev(dev);
565 if ((cdev->private->state == DEV_STATE_DISCONNECTED) && 599 if ((cdev->private->state == DEV_STATE_DISCONNECTED) &&
600 !ccw_device_is_orphan(cdev) &&
566 ccw_dev_id_is_equal(&cdev->private->dev_id, &d->dev_id) && 601 ccw_dev_id_is_equal(&cdev->private->dev_id, &d->dev_id) &&
567 (cdev != d->sibling)) { 602 (cdev != d->sibling))
568 cdev->private->state = DEV_STATE_NOT_OPER;
569 return 1; 603 return 1;
570 }
571 return 0; 604 return 0;
572} 605}
573 606
@@ -584,13 +617,36 @@ static struct ccw_device * get_disc_ccwdev_by_dev_id(struct ccw_dev_id *dev_id,
584 return dev ? to_ccwdev(dev) : NULL; 617 return dev ? to_ccwdev(dev) : NULL;
585} 618}
586 619
587static void 620static int match_orphan(struct device *dev, void *data)
588ccw_device_add_changed(void *data) 621{
622 struct ccw_dev_id *dev_id;
623 struct ccw_device *cdev;
624
625 dev_id = data;
626 cdev = to_ccwdev(dev);
627 return ccw_dev_id_is_equal(&cdev->private->dev_id, dev_id);
628}
629
630static struct ccw_device *
631get_orphaned_ccwdev_by_dev_id(struct channel_subsystem *css,
632 struct ccw_dev_id *dev_id)
589{ 633{
634 struct device *dev;
590 635
636 dev = device_find_child(&css->pseudo_subchannel->dev, dev_id,
637 match_orphan);
638
639 return dev ? to_ccwdev(dev) : NULL;
640}
641
642static void
643ccw_device_add_changed(struct work_struct *work)
644{
645 struct ccw_device_private *priv;
591 struct ccw_device *cdev; 646 struct ccw_device *cdev;
592 647
593 cdev = data; 648 priv = container_of(work, struct ccw_device_private, kick_work);
649 cdev = priv->cdev;
594 if (device_add(&cdev->dev)) { 650 if (device_add(&cdev->dev)) {
595 put_device(&cdev->dev); 651 put_device(&cdev->dev);
596 return; 652 return;
@@ -602,64 +658,21 @@ ccw_device_add_changed(void *data)
602 } 658 }
603} 659}
604 660
605extern int css_get_ssd_info(struct subchannel *sch); 661void ccw_device_do_unreg_rereg(struct work_struct *work)
606
607void
608ccw_device_do_unreg_rereg(void *data)
609{ 662{
663 struct ccw_device_private *priv;
610 struct ccw_device *cdev; 664 struct ccw_device *cdev;
611 struct subchannel *sch; 665 struct subchannel *sch;
612 int need_rename;
613 666
614 cdev = data; 667 priv = container_of(work, struct ccw_device_private, kick_work);
668 cdev = priv->cdev;
615 sch = to_subchannel(cdev->dev.parent); 669 sch = to_subchannel(cdev->dev.parent);
616 if (cdev->private->dev_id.devno != sch->schib.pmcw.dev) { 670
617 /*
618 * The device number has changed. This is usually only when
619 * a device has been detached under VM and then re-appeared
620 * on another subchannel because of a different attachment
621 * order than before. Ideally, we should should just switch
622 * subchannels, but unfortunately, this is not possible with
623 * the current implementation.
624 * Instead, we search for the old subchannel for this device
625 * number and deregister so there are no collisions with the
626 * newly registered ccw_device.
627 * FIXME: Find another solution so the block layer doesn't
628 * get possibly sick...
629 */
630 struct ccw_device *other_cdev;
631 struct ccw_dev_id dev_id;
632
633 need_rename = 1;
634 dev_id.devno = sch->schib.pmcw.dev;
635 dev_id.ssid = sch->schid.ssid;
636 other_cdev = get_disc_ccwdev_by_dev_id(&dev_id, cdev);
637 if (other_cdev) {
638 struct subchannel *other_sch;
639
640 other_sch = to_subchannel(other_cdev->dev.parent);
641 if (get_device(&other_sch->dev)) {
642 stsch(other_sch->schid, &other_sch->schib);
643 if (other_sch->schib.pmcw.dnv) {
644 other_sch->schib.pmcw.intparm = 0;
645 cio_modify(other_sch);
646 }
647 css_sch_device_unregister(other_sch);
648 }
649 }
650 /* Update ssd info here. */
651 css_get_ssd_info(sch);
652 cdev->private->dev_id.devno = sch->schib.pmcw.dev;
653 } else
654 need_rename = 0;
655 device_remove_files(&cdev->dev); 671 device_remove_files(&cdev->dev);
656 if (test_and_clear_bit(1, &cdev->private->registered)) 672 if (test_and_clear_bit(1, &cdev->private->registered))
657 device_del(&cdev->dev); 673 device_del(&cdev->dev);
658 if (need_rename)
659 snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x",
660 sch->schid.ssid, sch->schib.pmcw.dev);
661 PREPARE_WORK(&cdev->private->kick_work, 674 PREPARE_WORK(&cdev->private->kick_work,
662 ccw_device_add_changed, cdev); 675 ccw_device_add_changed);
663 queue_work(ccw_device_work, &cdev->private->kick_work); 676 queue_work(ccw_device_work, &cdev->private->kick_work);
664} 677}
665 678
@@ -673,18 +686,194 @@ ccw_device_release(struct device *dev)
673 kfree(cdev); 686 kfree(cdev);
674} 687}
675 688
689static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
690{
691 struct ccw_device *cdev;
692
693 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
694 if (cdev) {
695 cdev->private = kzalloc(sizeof(struct ccw_device_private),
696 GFP_KERNEL | GFP_DMA);
697 if (cdev->private)
698 return cdev;
699 }
700 kfree(cdev);
701 return ERR_PTR(-ENOMEM);
702}
703
704static int io_subchannel_initialize_dev(struct subchannel *sch,
705 struct ccw_device *cdev)
706{
707 cdev->private->cdev = cdev;
708 atomic_set(&cdev->private->onoff, 0);
709 cdev->dev.parent = &sch->dev;
710 cdev->dev.release = ccw_device_release;
711 INIT_LIST_HEAD(&cdev->private->kick_work.entry);
712 /* Do first half of device_register. */
713 device_initialize(&cdev->dev);
714 if (!get_device(&sch->dev)) {
715 if (cdev->dev.release)
716 cdev->dev.release(&cdev->dev);
717 return -ENODEV;
718 }
719 return 0;
720}
721
722static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
723{
724 struct ccw_device *cdev;
725 int ret;
726
727 cdev = io_subchannel_allocate_dev(sch);
728 if (!IS_ERR(cdev)) {
729 ret = io_subchannel_initialize_dev(sch, cdev);
730 if (ret) {
731 kfree(cdev);
732 cdev = ERR_PTR(ret);
733 }
734 }
735 return cdev;
736}
737
738static int io_subchannel_recog(struct ccw_device *, struct subchannel *);
739
740static void sch_attach_device(struct subchannel *sch,
741 struct ccw_device *cdev)
742{
743 spin_lock_irq(sch->lock);
744 sch->dev.driver_data = cdev;
745 cdev->private->schid = sch->schid;
746 cdev->ccwlock = sch->lock;
747 device_trigger_reprobe(sch);
748 spin_unlock_irq(sch->lock);
749}
750
751static void sch_attach_disconnected_device(struct subchannel *sch,
752 struct ccw_device *cdev)
753{
754 struct subchannel *other_sch;
755 int ret;
756
757 other_sch = to_subchannel(get_device(cdev->dev.parent));
758 ret = device_move(&cdev->dev, &sch->dev);
759 if (ret) {
760 CIO_MSG_EVENT(2, "Moving disconnected device 0.%x.%04x failed "
761 "(ret=%d)!\n", cdev->private->dev_id.ssid,
762 cdev->private->dev_id.devno, ret);
763 put_device(&other_sch->dev);
764 return;
765 }
766 other_sch->dev.driver_data = NULL;
767 /* No need to keep a subchannel without ccw device around. */
768 css_sch_device_unregister(other_sch);
769 put_device(&other_sch->dev);
770 sch_attach_device(sch, cdev);
771}
772
773static void sch_attach_orphaned_device(struct subchannel *sch,
774 struct ccw_device *cdev)
775{
776 int ret;
777
778 /* Try to move the ccw device to its new subchannel. */
779 ret = device_move(&cdev->dev, &sch->dev);
780 if (ret) {
781 CIO_MSG_EVENT(0, "Moving device 0.%x.%04x from orphanage "
782 "failed (ret=%d)!\n",
783 cdev->private->dev_id.ssid,
784 cdev->private->dev_id.devno, ret);
785 return;
786 }
787 sch_attach_device(sch, cdev);
788}
789
790static void sch_create_and_recog_new_device(struct subchannel *sch)
791{
792 struct ccw_device *cdev;
793
794 /* Need to allocate a new ccw device. */
795 cdev = io_subchannel_create_ccwdev(sch);
796 if (IS_ERR(cdev)) {
797 /* OK, we did everything we could... */
798 css_sch_device_unregister(sch);
799 return;
800 }
801 spin_lock_irq(sch->lock);
802 sch->dev.driver_data = cdev;
803 spin_unlock_irq(sch->lock);
804 /* Start recognition for the new ccw device. */
805 if (io_subchannel_recog(cdev, sch)) {
806 spin_lock_irq(sch->lock);
807 sch->dev.driver_data = NULL;
808 spin_unlock_irq(sch->lock);
809 if (cdev->dev.release)
810 cdev->dev.release(&cdev->dev);
811 css_sch_device_unregister(sch);
812 }
813}
814
815
816void ccw_device_move_to_orphanage(struct work_struct *work)
817{
818 struct ccw_device_private *priv;
819 struct ccw_device *cdev;
820 struct ccw_device *replacing_cdev;
821 struct subchannel *sch;
822 int ret;
823 struct channel_subsystem *css;
824 struct ccw_dev_id dev_id;
825
826 priv = container_of(work, struct ccw_device_private, kick_work);
827 cdev = priv->cdev;
828 sch = to_subchannel(cdev->dev.parent);
829 css = to_css(sch->dev.parent);
830 dev_id.devno = sch->schib.pmcw.dev;
831 dev_id.ssid = sch->schid.ssid;
832
833 /*
834 * Move the orphaned ccw device to the orphanage so the replacing
835 * ccw device can take its place on the subchannel.
836 */
837 ret = device_move(&cdev->dev, &css->pseudo_subchannel->dev);
838 if (ret) {
839 CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to orphanage failed "
840 "(ret=%d)!\n", cdev->private->dev_id.ssid,
841 cdev->private->dev_id.devno, ret);
842 return;
843 }
844 cdev->ccwlock = css->pseudo_subchannel->lock;
845 /*
846 * Search for the replacing ccw device
847 * - among the disconnected devices
848 * - in the orphanage
849 */
850 replacing_cdev = get_disc_ccwdev_by_dev_id(&dev_id, cdev);
851 if (replacing_cdev) {
852 sch_attach_disconnected_device(sch, replacing_cdev);
853 return;
854 }
855 replacing_cdev = get_orphaned_ccwdev_by_dev_id(css, &dev_id);
856 if (replacing_cdev) {
857 sch_attach_orphaned_device(sch, replacing_cdev);
858 return;
859 }
860 sch_create_and_recog_new_device(sch);
861}
862
676/* 863/*
677 * Register recognized device. 864 * Register recognized device.
678 */ 865 */
679static void 866static void
680io_subchannel_register(void *data) 867io_subchannel_register(struct work_struct *work)
681{ 868{
869 struct ccw_device_private *priv;
682 struct ccw_device *cdev; 870 struct ccw_device *cdev;
683 struct subchannel *sch; 871 struct subchannel *sch;
684 int ret; 872 int ret;
685 unsigned long flags; 873 unsigned long flags;
686 874
687 cdev = data; 875 priv = container_of(work, struct ccw_device_private, kick_work);
876 cdev = priv->cdev;
688 sch = to_subchannel(cdev->dev.parent); 877 sch = to_subchannel(cdev->dev.parent);
689 878
690 /* 879 /*
@@ -709,9 +898,9 @@ io_subchannel_register(void *data)
709 printk (KERN_WARNING "%s: could not register %s\n", 898 printk (KERN_WARNING "%s: could not register %s\n",
710 __func__, cdev->dev.bus_id); 899 __func__, cdev->dev.bus_id);
711 put_device(&cdev->dev); 900 put_device(&cdev->dev);
712 spin_lock_irqsave(&sch->lock, flags); 901 spin_lock_irqsave(sch->lock, flags);
713 sch->dev.driver_data = NULL; 902 sch->dev.driver_data = NULL;
714 spin_unlock_irqrestore(&sch->lock, flags); 903 spin_unlock_irqrestore(sch->lock, flags);
715 kfree (cdev->private); 904 kfree (cdev->private);
716 kfree (cdev); 905 kfree (cdev);
717 put_device(&sch->dev); 906 put_device(&sch->dev);
@@ -719,11 +908,6 @@ io_subchannel_register(void *data)
719 wake_up(&ccw_device_init_wq); 908 wake_up(&ccw_device_init_wq);
720 return; 909 return;
721 } 910 }
722
723 ret = subchannel_add_files(cdev->dev.parent);
724 if (ret)
725 printk(KERN_WARNING "%s: could not add attributes to %s\n",
726 __func__, sch->dev.bus_id);
727 put_device(&cdev->dev); 911 put_device(&cdev->dev);
728out: 912out:
729 cdev->private->flags.recog_done = 1; 913 cdev->private->flags.recog_done = 1;
@@ -734,11 +918,14 @@ out:
734} 918}
735 919
736void 920void
737ccw_device_call_sch_unregister(void *data) 921ccw_device_call_sch_unregister(struct work_struct *work)
738{ 922{
739 struct ccw_device *cdev = data; 923 struct ccw_device_private *priv;
924 struct ccw_device *cdev;
740 struct subchannel *sch; 925 struct subchannel *sch;
741 926
927 priv = container_of(work, struct ccw_device_private, kick_work);
928 cdev = priv->cdev;
742 sch = to_subchannel(cdev->dev.parent); 929 sch = to_subchannel(cdev->dev.parent);
743 css_sch_device_unregister(sch); 930 css_sch_device_unregister(sch);
744 /* Reset intparm to zeroes. */ 931 /* Reset intparm to zeroes. */
@@ -768,7 +955,7 @@ io_subchannel_recog_done(struct ccw_device *cdev)
768 break; 955 break;
769 sch = to_subchannel(cdev->dev.parent); 956 sch = to_subchannel(cdev->dev.parent);
770 PREPARE_WORK(&cdev->private->kick_work, 957 PREPARE_WORK(&cdev->private->kick_work,
771 ccw_device_call_sch_unregister, cdev); 958 ccw_device_call_sch_unregister);
772 queue_work(slow_path_wq, &cdev->private->kick_work); 959 queue_work(slow_path_wq, &cdev->private->kick_work);
773 if (atomic_dec_and_test(&ccw_device_init_count)) 960 if (atomic_dec_and_test(&ccw_device_init_count))
774 wake_up(&ccw_device_init_wq); 961 wake_up(&ccw_device_init_wq);
@@ -783,7 +970,7 @@ io_subchannel_recog_done(struct ccw_device *cdev)
783 if (!get_device(&cdev->dev)) 970 if (!get_device(&cdev->dev))
784 break; 971 break;
785 PREPARE_WORK(&cdev->private->kick_work, 972 PREPARE_WORK(&cdev->private->kick_work,
786 io_subchannel_register, cdev); 973 io_subchannel_register);
787 queue_work(slow_path_wq, &cdev->private->kick_work); 974 queue_work(slow_path_wq, &cdev->private->kick_work);
788 break; 975 break;
789 } 976 }
@@ -797,7 +984,7 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
797 984
798 sch->dev.driver_data = cdev; 985 sch->dev.driver_data = cdev;
799 sch->driver = &io_subchannel_driver; 986 sch->driver = &io_subchannel_driver;
800 cdev->ccwlock = &sch->lock; 987 cdev->ccwlock = sch->lock;
801 988
802 /* Init private data. */ 989 /* Init private data. */
803 priv = cdev->private; 990 priv = cdev->private;
@@ -817,9 +1004,9 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
817 atomic_inc(&ccw_device_init_count); 1004 atomic_inc(&ccw_device_init_count);
818 1005
819 /* Start async. device sensing. */ 1006 /* Start async. device sensing. */
820 spin_lock_irq(&sch->lock); 1007 spin_lock_irq(sch->lock);
821 rc = ccw_device_recognition(cdev); 1008 rc = ccw_device_recognition(cdev);
822 spin_unlock_irq(&sch->lock); 1009 spin_unlock_irq(sch->lock);
823 if (rc) { 1010 if (rc) {
824 if (atomic_dec_and_test(&ccw_device_init_count)) 1011 if (atomic_dec_and_test(&ccw_device_init_count))
825 wake_up(&ccw_device_init_wq); 1012 wake_up(&ccw_device_init_wq);
@@ -827,12 +1014,55 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
827 return rc; 1014 return rc;
828} 1015}
829 1016
1017static void ccw_device_move_to_sch(struct work_struct *work)
1018{
1019 struct ccw_device_private *priv;
1020 int rc;
1021 struct subchannel *sch;
1022 struct ccw_device *cdev;
1023 struct subchannel *former_parent;
1024
1025 priv = container_of(work, struct ccw_device_private, kick_work);
1026 sch = priv->sch;
1027 cdev = priv->cdev;
1028 former_parent = ccw_device_is_orphan(cdev) ?
1029 NULL : to_subchannel(get_device(cdev->dev.parent));
1030 mutex_lock(&sch->reg_mutex);
1031 /* Try to move the ccw device to its new subchannel. */
1032 rc = device_move(&cdev->dev, &sch->dev);
1033 mutex_unlock(&sch->reg_mutex);
1034 if (rc) {
1035 CIO_MSG_EVENT(2, "Moving device 0.%x.%04x to subchannel "
1036 "0.%x.%04x failed (ret=%d)!\n",
1037 cdev->private->dev_id.ssid,
1038 cdev->private->dev_id.devno, sch->schid.ssid,
1039 sch->schid.sch_no, rc);
1040 css_sch_device_unregister(sch);
1041 goto out;
1042 }
1043 if (former_parent) {
1044 spin_lock_irq(former_parent->lock);
1045 former_parent->dev.driver_data = NULL;
1046 spin_unlock_irq(former_parent->lock);
1047 css_sch_device_unregister(former_parent);
1048 /* Reset intparm to zeroes. */
1049 former_parent->schib.pmcw.intparm = 0;
1050 cio_modify(former_parent);
1051 }
1052 sch_attach_device(sch, cdev);
1053out:
1054 if (former_parent)
1055 put_device(&former_parent->dev);
1056 put_device(&cdev->dev);
1057}
1058
830static int 1059static int
831io_subchannel_probe (struct subchannel *sch) 1060io_subchannel_probe (struct subchannel *sch)
832{ 1061{
833 struct ccw_device *cdev; 1062 struct ccw_device *cdev;
834 int rc; 1063 int rc;
835 unsigned long flags; 1064 unsigned long flags;
1065 struct ccw_dev_id dev_id;
836 1066
837 if (sch->dev.driver_data) { 1067 if (sch->dev.driver_data) {
838 /* 1068 /*
@@ -843,7 +1073,6 @@ io_subchannel_probe (struct subchannel *sch)
843 cdev = sch->dev.driver_data; 1073 cdev = sch->dev.driver_data;
844 device_initialize(&cdev->dev); 1074 device_initialize(&cdev->dev);
845 ccw_device_register(cdev); 1075 ccw_device_register(cdev);
846 subchannel_add_files(&sch->dev);
847 /* 1076 /*
848 * Check if the device is already online. If it is 1077 * Check if the device is already online. If it is
849 * the reference count needs to be corrected 1078 * the reference count needs to be corrected
@@ -856,33 +1085,37 @@ io_subchannel_probe (struct subchannel *sch)
856 get_device(&cdev->dev); 1085 get_device(&cdev->dev);
857 return 0; 1086 return 0;
858 } 1087 }
859 cdev = kzalloc (sizeof(*cdev), GFP_KERNEL); 1088 /*
1089 * First check if a fitting device may be found amongst the
1090 * disconnected devices or in the orphanage.
1091 */
1092 dev_id.devno = sch->schib.pmcw.dev;
1093 dev_id.ssid = sch->schid.ssid;
1094 cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL);
860 if (!cdev) 1095 if (!cdev)
861 return -ENOMEM; 1096 cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent),
862 cdev->private = kzalloc(sizeof(struct ccw_device_private), 1097 &dev_id);
863 GFP_KERNEL | GFP_DMA); 1098 if (cdev) {
864 if (!cdev->private) { 1099 /*
865 kfree(cdev); 1100 * Schedule moving the device until when we have a registered
866 return -ENOMEM; 1101 * subchannel to move to and succeed the probe. We can
867 } 1102 * unregister later again, when the probe is through.
868 atomic_set(&cdev->private->onoff, 0); 1103 */
869 cdev->dev.parent = &sch->dev; 1104 cdev->private->sch = sch;
870 cdev->dev.release = ccw_device_release; 1105 PREPARE_WORK(&cdev->private->kick_work,
871 INIT_LIST_HEAD(&cdev->private->kick_work.entry); 1106 ccw_device_move_to_sch);
872 /* Do first half of device_register. */ 1107 queue_work(slow_path_wq, &cdev->private->kick_work);
873 device_initialize(&cdev->dev); 1108 return 0;
874
875 if (!get_device(&sch->dev)) {
876 if (cdev->dev.release)
877 cdev->dev.release(&cdev->dev);
878 return -ENODEV;
879 } 1109 }
1110 cdev = io_subchannel_create_ccwdev(sch);
1111 if (IS_ERR(cdev))
1112 return PTR_ERR(cdev);
880 1113
881 rc = io_subchannel_recog(cdev, sch); 1114 rc = io_subchannel_recog(cdev, sch);
882 if (rc) { 1115 if (rc) {
883 spin_lock_irqsave(&sch->lock, flags); 1116 spin_lock_irqsave(sch->lock, flags);
884 sch->dev.driver_data = NULL; 1117 sch->dev.driver_data = NULL;
885 spin_unlock_irqrestore(&sch->lock, flags); 1118 spin_unlock_irqrestore(sch->lock, flags);
886 if (cdev->dev.release) 1119 if (cdev->dev.release)
887 cdev->dev.release(&cdev->dev); 1120 cdev->dev.release(&cdev->dev);
888 } 1121 }
@@ -890,17 +1123,6 @@ io_subchannel_probe (struct subchannel *sch)
890 return rc; 1123 return rc;
891} 1124}
892 1125
893static void
894ccw_device_unregister(void *data)
895{
896 struct ccw_device *cdev;
897
898 cdev = (struct ccw_device *)data;
899 if (test_and_clear_bit(1, &cdev->private->registered))
900 device_unregister(&cdev->dev);
901 put_device(&cdev->dev);
902}
903
904static int 1126static int
905io_subchannel_remove (struct subchannel *sch) 1127io_subchannel_remove (struct subchannel *sch)
906{ 1128{
@@ -921,7 +1143,7 @@ io_subchannel_remove (struct subchannel *sch)
921 */ 1143 */
922 if (get_device(&cdev->dev)) { 1144 if (get_device(&cdev->dev)) {
923 PREPARE_WORK(&cdev->private->kick_work, 1145 PREPARE_WORK(&cdev->private->kick_work,
924 ccw_device_unregister, cdev); 1146 ccw_device_unregister);
925 queue_work(ccw_device_work, &cdev->private->kick_work); 1147 queue_work(ccw_device_work, &cdev->private->kick_work);
926 } 1148 }
927 return 0; 1149 return 0;
@@ -1003,6 +1225,13 @@ static struct ccw_device console_cdev;
1003static struct ccw_device_private console_private; 1225static struct ccw_device_private console_private;
1004static int console_cdev_in_use; 1226static int console_cdev_in_use;
1005 1227
1228static DEFINE_SPINLOCK(ccw_console_lock);
1229
1230spinlock_t * cio_get_console_lock(void)
1231{
1232 return &ccw_console_lock;
1233}
1234
1006static int 1235static int
1007ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch) 1236ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch)
1008{ 1237{
@@ -1048,6 +1277,7 @@ ccw_device_probe_console(void)
1048 memset(&console_cdev, 0, sizeof(struct ccw_device)); 1277 memset(&console_cdev, 0, sizeof(struct ccw_device));
1049 memset(&console_private, 0, sizeof(struct ccw_device_private)); 1278 memset(&console_private, 0, sizeof(struct ccw_device_private));
1050 console_cdev.private = &console_private; 1279 console_cdev.private = &console_private;
1280 console_private.cdev = &console_cdev;
1051 ret = ccw_device_console_enable(&console_cdev, sch); 1281 ret = ccw_device_console_enable(&console_cdev, sch);
1052 if (ret) { 1282 if (ret) {
1053 cio_release_console(); 1283 cio_release_console();
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 9233b5c0bcc8..29db6341d632 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -78,8 +78,10 @@ void io_subchannel_recog_done(struct ccw_device *cdev);
78 78
79int ccw_device_cancel_halt_clear(struct ccw_device *); 79int ccw_device_cancel_halt_clear(struct ccw_device *);
80 80
81void ccw_device_do_unreg_rereg(void *); 81void ccw_device_do_unreg_rereg(struct work_struct *);
82void ccw_device_call_sch_unregister(void *); 82void ccw_device_call_sch_unregister(struct work_struct *);
83void ccw_device_move_to_orphanage(struct work_struct *);
84int ccw_device_is_orphan(struct ccw_device *);
83 85
84int ccw_device_recognition(struct ccw_device *); 86int ccw_device_recognition(struct ccw_device *);
85int ccw_device_online(struct ccw_device *); 87int ccw_device_online(struct ccw_device *);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 09c7672eb3f3..eed14572fc3b 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -186,15 +186,14 @@ ccw_device_handle_oper(struct ccw_device *cdev)
186 /* 186 /*
187 * Check if cu type and device type still match. If 187 * Check if cu type and device type still match. If
188 * not, it is certainly another device and we have to 188 * not, it is certainly another device and we have to
189 * de- and re-register. Also check here for non-matching devno. 189 * de- and re-register.
190 */ 190 */
191 if (cdev->id.cu_type != cdev->private->senseid.cu_type || 191 if (cdev->id.cu_type != cdev->private->senseid.cu_type ||
192 cdev->id.cu_model != cdev->private->senseid.cu_model || 192 cdev->id.cu_model != cdev->private->senseid.cu_model ||
193 cdev->id.dev_type != cdev->private->senseid.dev_type || 193 cdev->id.dev_type != cdev->private->senseid.dev_type ||
194 cdev->id.dev_model != cdev->private->senseid.dev_model || 194 cdev->id.dev_model != cdev->private->senseid.dev_model) {
195 cdev->private->dev_id.devno != sch->schib.pmcw.dev) {
196 PREPARE_WORK(&cdev->private->kick_work, 195 PREPARE_WORK(&cdev->private->kick_work,
197 ccw_device_do_unreg_rereg, cdev); 196 ccw_device_do_unreg_rereg);
198 queue_work(ccw_device_work, &cdev->private->kick_work); 197 queue_work(ccw_device_work, &cdev->private->kick_work);
199 return 0; 198 return 0;
200 } 199 }
@@ -329,19 +328,21 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err)
329} 328}
330 329
331static void 330static void
332ccw_device_oper_notify(void *data) 331ccw_device_oper_notify(struct work_struct *work)
333{ 332{
333 struct ccw_device_private *priv;
334 struct ccw_device *cdev; 334 struct ccw_device *cdev;
335 struct subchannel *sch; 335 struct subchannel *sch;
336 int ret; 336 int ret;
337 337
338 cdev = data; 338 priv = container_of(work, struct ccw_device_private, kick_work);
339 cdev = priv->cdev;
339 sch = to_subchannel(cdev->dev.parent); 340 sch = to_subchannel(cdev->dev.parent);
340 ret = (sch->driver && sch->driver->notify) ? 341 ret = (sch->driver && sch->driver->notify) ?
341 sch->driver->notify(&sch->dev, CIO_OPER) : 0; 342 sch->driver->notify(&sch->dev, CIO_OPER) : 0;
342 if (!ret) 343 if (!ret)
343 /* Driver doesn't want device back. */ 344 /* Driver doesn't want device back. */
344 ccw_device_do_unreg_rereg(cdev); 345 ccw_device_do_unreg_rereg(work);
345 else { 346 else {
346 /* Reenable channel measurements, if needed. */ 347 /* Reenable channel measurements, if needed. */
347 cmf_reenable(cdev); 348 cmf_reenable(cdev);
@@ -377,8 +378,7 @@ ccw_device_done(struct ccw_device *cdev, int state)
377 378
378 if (cdev->private->flags.donotify) { 379 if (cdev->private->flags.donotify) {
379 cdev->private->flags.donotify = 0; 380 cdev->private->flags.donotify = 0;
380 PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify, 381 PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify);
381 cdev);
382 queue_work(ccw_device_notify_work, &cdev->private->kick_work); 382 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
383 } 383 }
384 wake_up(&cdev->private->wait_q); 384 wake_up(&cdev->private->wait_q);
@@ -528,13 +528,15 @@ ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event)
528 528
529 529
530static void 530static void
531ccw_device_nopath_notify(void *data) 531ccw_device_nopath_notify(struct work_struct *work)
532{ 532{
533 struct ccw_device_private *priv;
533 struct ccw_device *cdev; 534 struct ccw_device *cdev;
534 struct subchannel *sch; 535 struct subchannel *sch;
535 int ret; 536 int ret;
536 537
537 cdev = data; 538 priv = container_of(work, struct ccw_device_private, kick_work);
539 cdev = priv->cdev;
538 sch = to_subchannel(cdev->dev.parent); 540 sch = to_subchannel(cdev->dev.parent);
539 /* Extra sanity. */ 541 /* Extra sanity. */
540 if (sch->lpm) 542 if (sch->lpm)
@@ -547,8 +549,7 @@ ccw_device_nopath_notify(void *data)
547 cio_disable_subchannel(sch); 549 cio_disable_subchannel(sch);
548 if (get_device(&cdev->dev)) { 550 if (get_device(&cdev->dev)) {
549 PREPARE_WORK(&cdev->private->kick_work, 551 PREPARE_WORK(&cdev->private->kick_work,
550 ccw_device_call_sch_unregister, 552 ccw_device_call_sch_unregister);
551 cdev);
552 queue_work(ccw_device_work, 553 queue_work(ccw_device_work,
553 &cdev->private->kick_work); 554 &cdev->private->kick_work);
554 } else 555 } else
@@ -607,7 +608,7 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
607 /* Reset oper notify indication after verify error. */ 608 /* Reset oper notify indication after verify error. */
608 cdev->private->flags.donotify = 0; 609 cdev->private->flags.donotify = 0;
609 PREPARE_WORK(&cdev->private->kick_work, 610 PREPARE_WORK(&cdev->private->kick_work,
610 ccw_device_nopath_notify, cdev); 611 ccw_device_nopath_notify);
611 queue_work(ccw_device_notify_work, &cdev->private->kick_work); 612 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
612 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 613 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
613 break; 614 break;
@@ -674,6 +675,10 @@ ccw_device_offline(struct ccw_device *cdev)
674{ 675{
675 struct subchannel *sch; 676 struct subchannel *sch;
676 677
678 if (ccw_device_is_orphan(cdev)) {
679 ccw_device_done(cdev, DEV_STATE_OFFLINE);
680 return 0;
681 }
677 sch = to_subchannel(cdev->dev.parent); 682 sch = to_subchannel(cdev->dev.parent);
678 if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv) 683 if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv)
679 return -ENODEV; 684 return -ENODEV;
@@ -738,7 +743,7 @@ ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event)
738 sch = to_subchannel(cdev->dev.parent); 743 sch = to_subchannel(cdev->dev.parent);
739 if (get_device(&cdev->dev)) { 744 if (get_device(&cdev->dev)) {
740 PREPARE_WORK(&cdev->private->kick_work, 745 PREPARE_WORK(&cdev->private->kick_work,
741 ccw_device_call_sch_unregister, cdev); 746 ccw_device_call_sch_unregister);
742 queue_work(ccw_device_work, &cdev->private->kick_work); 747 queue_work(ccw_device_work, &cdev->private->kick_work);
743 } 748 }
744 wake_up(&cdev->private->wait_q); 749 wake_up(&cdev->private->wait_q);
@@ -769,7 +774,7 @@ ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event)
769 } 774 }
770 if (get_device(&cdev->dev)) { 775 if (get_device(&cdev->dev)) {
771 PREPARE_WORK(&cdev->private->kick_work, 776 PREPARE_WORK(&cdev->private->kick_work,
772 ccw_device_call_sch_unregister, cdev); 777 ccw_device_call_sch_unregister);
773 queue_work(ccw_device_work, &cdev->private->kick_work); 778 queue_work(ccw_device_work, &cdev->private->kick_work);
774 } 779 }
775 wake_up(&cdev->private->wait_q); 780 wake_up(&cdev->private->wait_q);
@@ -874,7 +879,7 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
874 sch = to_subchannel(cdev->dev.parent); 879 sch = to_subchannel(cdev->dev.parent);
875 if (!sch->lpm) { 880 if (!sch->lpm) {
876 PREPARE_WORK(&cdev->private->kick_work, 881 PREPARE_WORK(&cdev->private->kick_work,
877 ccw_device_nopath_notify, cdev); 882 ccw_device_nopath_notify);
878 queue_work(ccw_device_notify_work, 883 queue_work(ccw_device_notify_work,
879 &cdev->private->kick_work); 884 &cdev->private->kick_work);
880 } else 885 } else
@@ -969,7 +974,7 @@ ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
969 ERR_PTR(-EIO)); 974 ERR_PTR(-EIO));
970 if (!sch->lpm) { 975 if (!sch->lpm) {
971 PREPARE_WORK(&cdev->private->kick_work, 976 PREPARE_WORK(&cdev->private->kick_work,
972 ccw_device_nopath_notify, cdev); 977 ccw_device_nopath_notify);
973 queue_work(ccw_device_notify_work, &cdev->private->kick_work); 978 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
974 } else if (cdev->private->flags.doverify) 979 } else if (cdev->private->flags.doverify)
975 /* Start delayed path verification. */ 980 /* Start delayed path verification. */
@@ -992,7 +997,7 @@ ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event)
992 sch = to_subchannel(cdev->dev.parent); 997 sch = to_subchannel(cdev->dev.parent);
993 if (!sch->lpm) { 998 if (!sch->lpm) {
994 PREPARE_WORK(&cdev->private->kick_work, 999 PREPARE_WORK(&cdev->private->kick_work,
995 ccw_device_nopath_notify, cdev); 1000 ccw_device_nopath_notify);
996 queue_work(ccw_device_notify_work, 1001 queue_work(ccw_device_notify_work,
997 &cdev->private->kick_work); 1002 &cdev->private->kick_work);
998 } else 1003 } else
@@ -1021,7 +1026,7 @@ void device_kill_io(struct subchannel *sch)
1021 if (ret == -ENODEV) { 1026 if (ret == -ENODEV) {
1022 if (!sch->lpm) { 1027 if (!sch->lpm) {
1023 PREPARE_WORK(&cdev->private->kick_work, 1028 PREPARE_WORK(&cdev->private->kick_work,
1024 ccw_device_nopath_notify, cdev); 1029 ccw_device_nopath_notify);
1025 queue_work(ccw_device_notify_work, 1030 queue_work(ccw_device_notify_work,
1026 &cdev->private->kick_work); 1031 &cdev->private->kick_work);
1027 } else 1032 } else
@@ -1033,7 +1038,7 @@ void device_kill_io(struct subchannel *sch)
1033 ERR_PTR(-EIO)); 1038 ERR_PTR(-EIO));
1034 if (!sch->lpm) { 1039 if (!sch->lpm) {
1035 PREPARE_WORK(&cdev->private->kick_work, 1040 PREPARE_WORK(&cdev->private->kick_work,
1036 ccw_device_nopath_notify, cdev); 1041 ccw_device_nopath_notify);
1037 queue_work(ccw_device_notify_work, &cdev->private->kick_work); 1042 queue_work(ccw_device_notify_work, &cdev->private->kick_work);
1038 } else 1043 } else
1039 /* Start delayed path verification. */ 1044 /* Start delayed path verification. */
@@ -1104,7 +1109,8 @@ device_trigger_reprobe(struct subchannel *sch)
1104 /* Update some values. */ 1109 /* Update some values. */
1105 if (stsch(sch->schid, &sch->schib)) 1110 if (stsch(sch->schid, &sch->schib))
1106 return; 1111 return;
1107 1112 if (!sch->schib.pmcw.dnv)
1113 return;
1108 /* 1114 /*
1109 * The pim, pam, pom values may not be accurate, but they are the best 1115 * The pim, pam, pom values may not be accurate, but they are the best
1110 * we have before performing device selection :/ 1116 * we have before performing device selection :/
@@ -1118,7 +1124,13 @@ device_trigger_reprobe(struct subchannel *sch)
1118 sch->schib.pmcw.mp = 1; 1124 sch->schib.pmcw.mp = 1;
1119 sch->schib.pmcw.intparm = (__u32)(unsigned long)sch; 1125 sch->schib.pmcw.intparm = (__u32)(unsigned long)sch;
1120 /* We should also udate ssd info, but this has to wait. */ 1126 /* We should also udate ssd info, but this has to wait. */
1121 ccw_device_start_id(cdev, 0); 1127 /* Check if this is another device which appeared on the same sch. */
1128 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
1129 PREPARE_WORK(&cdev->private->kick_work,
1130 ccw_device_move_to_orphanage);
1131 queue_work(ccw_device_work, &cdev->private->kick_work);
1132 } else
1133 ccw_device_start_id(cdev, 0);
1122} 1134}
1123 1135
1124static void 1136static void
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index b39c1fa48acd..d269607336ec 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -316,9 +316,9 @@ __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, _
316 ccw_device_set_timeout(cdev, 0); 316 ccw_device_set_timeout(cdev, 0);
317 if (ret == -EBUSY) { 317 if (ret == -EBUSY) {
318 /* Try again later. */ 318 /* Try again later. */
319 spin_unlock_irq(&sch->lock); 319 spin_unlock_irq(sch->lock);
320 msleep(10); 320 msleep(10);
321 spin_lock_irq(&sch->lock); 321 spin_lock_irq(sch->lock);
322 continue; 322 continue;
323 } 323 }
324 if (ret != 0) 324 if (ret != 0)
@@ -326,12 +326,12 @@ __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, _
326 break; 326 break;
327 /* Wait for end of request. */ 327 /* Wait for end of request. */
328 cdev->private->intparm = magic; 328 cdev->private->intparm = magic;
329 spin_unlock_irq(&sch->lock); 329 spin_unlock_irq(sch->lock);
330 wait_event(cdev->private->wait_q, 330 wait_event(cdev->private->wait_q,
331 (cdev->private->intparm == -EIO) || 331 (cdev->private->intparm == -EIO) ||
332 (cdev->private->intparm == -EAGAIN) || 332 (cdev->private->intparm == -EAGAIN) ||
333 (cdev->private->intparm == 0)); 333 (cdev->private->intparm == 0));
334 spin_lock_irq(&sch->lock); 334 spin_lock_irq(sch->lock);
335 /* Check at least for channel end / device end */ 335 /* Check at least for channel end / device end */
336 if (cdev->private->intparm == -EIO) { 336 if (cdev->private->intparm == -EIO) {
337 /* Non-retryable error. */ 337 /* Non-retryable error. */
@@ -342,9 +342,9 @@ __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, _
342 /* Success. */ 342 /* Success. */
343 break; 343 break;
344 /* Try again later. */ 344 /* Try again later. */
345 spin_unlock_irq(&sch->lock); 345 spin_unlock_irq(sch->lock);
346 msleep(10); 346 msleep(10);
347 spin_lock_irq(&sch->lock); 347 spin_lock_irq(sch->lock);
348 } while (1); 348 } while (1);
349 349
350 return ret; 350 return ret;
@@ -389,7 +389,7 @@ read_dev_chars (struct ccw_device *cdev, void **buffer, int length)
389 return ret; 389 return ret;
390 } 390 }
391 391
392 spin_lock_irq(&sch->lock); 392 spin_lock_irq(sch->lock);
393 /* Save interrupt handler. */ 393 /* Save interrupt handler. */
394 handler = cdev->handler; 394 handler = cdev->handler;
395 /* Temporarily install own handler. */ 395 /* Temporarily install own handler. */
@@ -406,7 +406,7 @@ read_dev_chars (struct ccw_device *cdev, void **buffer, int length)
406 406
407 /* Restore interrupt handler. */ 407 /* Restore interrupt handler. */
408 cdev->handler = handler; 408 cdev->handler = handler;
409 spin_unlock_irq(&sch->lock); 409 spin_unlock_irq(sch->lock);
410 410
411 clear_normalized_cda (rdc_ccw); 411 clear_normalized_cda (rdc_ccw);
412 kfree(rdc_ccw); 412 kfree(rdc_ccw);
@@ -463,7 +463,7 @@ read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lp
463 rcd_ccw->count = ciw->count; 463 rcd_ccw->count = ciw->count;
464 rcd_ccw->flags = CCW_FLAG_SLI; 464 rcd_ccw->flags = CCW_FLAG_SLI;
465 465
466 spin_lock_irq(&sch->lock); 466 spin_lock_irq(sch->lock);
467 /* Save interrupt handler. */ 467 /* Save interrupt handler. */
468 handler = cdev->handler; 468 handler = cdev->handler;
469 /* Temporarily install own handler. */ 469 /* Temporarily install own handler. */
@@ -480,7 +480,7 @@ read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lp
480 480
481 /* Restore interrupt handler. */ 481 /* Restore interrupt handler. */
482 cdev->handler = handler; 482 cdev->handler = handler;
483 spin_unlock_irq(&sch->lock); 483 spin_unlock_irq(sch->lock);
484 484
485 /* 485 /*
486 * on success we update the user input parms 486 * on success we update the user input parms
@@ -537,7 +537,7 @@ ccw_device_stlck(struct ccw_device *cdev)
537 kfree(buf); 537 kfree(buf);
538 return -ENOMEM; 538 return -ENOMEM;
539 } 539 }
540 spin_lock_irqsave(&sch->lock, flags); 540 spin_lock_irqsave(sch->lock, flags);
541 ret = cio_enable_subchannel(sch, 3); 541 ret = cio_enable_subchannel(sch, 3);
542 if (ret) 542 if (ret)
543 goto out_unlock; 543 goto out_unlock;
@@ -559,9 +559,9 @@ ccw_device_stlck(struct ccw_device *cdev)
559 goto out_unlock; 559 goto out_unlock;
560 } 560 }
561 cdev->private->irb.scsw.actl |= SCSW_ACTL_START_PEND; 561 cdev->private->irb.scsw.actl |= SCSW_ACTL_START_PEND;
562 spin_unlock_irqrestore(&sch->lock, flags); 562 spin_unlock_irqrestore(sch->lock, flags);
563 wait_event(cdev->private->wait_q, cdev->private->irb.scsw.actl == 0); 563 wait_event(cdev->private->wait_q, cdev->private->irb.scsw.actl == 0);
564 spin_lock_irqsave(&sch->lock, flags); 564 spin_lock_irqsave(sch->lock, flags);
565 cio_disable_subchannel(sch); //FIXME: return code? 565 cio_disable_subchannel(sch); //FIXME: return code?
566 if ((cdev->private->irb.scsw.dstat != 566 if ((cdev->private->irb.scsw.dstat !=
567 (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) || 567 (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
@@ -572,7 +572,7 @@ ccw_device_stlck(struct ccw_device *cdev)
572out_unlock: 572out_unlock:
573 kfree(buf); 573 kfree(buf);
574 kfree(buf2); 574 kfree(buf2);
575 spin_unlock_irqrestore(&sch->lock, flags); 575 spin_unlock_irqrestore(sch->lock, flags);
576 return ret; 576 return ret;
577} 577}
578 578
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index 8d5fa1b4d11f..9d4ea449a608 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -46,6 +46,7 @@
46#include <asm/timex.h> 46#include <asm/timex.h>
47 47
48#include <asm/debug.h> 48#include <asm/debug.h>
49#include <asm/s390_rdev.h>
49#include <asm/qdio.h> 50#include <asm/qdio.h>
50 51
51#include "cio.h" 52#include "cio.h"
@@ -65,12 +66,12 @@ MODULE_LICENSE("GPL");
65/******************** HERE WE GO ***********************************/ 66/******************** HERE WE GO ***********************************/
66 67
67static const char version[] = "QDIO base support version 2"; 68static const char version[] = "QDIO base support version 2";
69extern struct bus_type ccw_bus_type;
68 70
69#ifdef QDIO_PERFORMANCE_STATS 71static int qdio_performance_stats = 0;
70static int proc_perf_file_registration; 72static int proc_perf_file_registration;
71static unsigned long i_p_c, i_p_nc, o_p_c, o_p_nc, ii_p_c, ii_p_nc; 73static unsigned long i_p_c, i_p_nc, o_p_c, o_p_nc, ii_p_c, ii_p_nc;
72static struct qdio_perf_stats perf_stats; 74static struct qdio_perf_stats perf_stats;
73#endif /* QDIO_PERFORMANCE_STATS */
74 75
75static int hydra_thinints; 76static int hydra_thinints;
76static int is_passthrough = 0; 77static int is_passthrough = 0;
@@ -275,9 +276,8 @@ qdio_siga_sync(struct qdio_q *q, unsigned int gpr2,
275 QDIO_DBF_TEXT4(0,trace,"sigasync"); 276 QDIO_DBF_TEXT4(0,trace,"sigasync");
276 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); 277 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
277 278
278#ifdef QDIO_PERFORMANCE_STATS 279 if (qdio_performance_stats)
279 perf_stats.siga_syncs++; 280 perf_stats.siga_syncs++;
280#endif /* QDIO_PERFORMANCE_STATS */
281 281
282 cc = do_siga_sync(q->schid, gpr2, gpr3); 282 cc = do_siga_sync(q->schid, gpr2, gpr3);
283 if (cc) 283 if (cc)
@@ -322,9 +322,8 @@ qdio_siga_output(struct qdio_q *q)
322 __u32 busy_bit; 322 __u32 busy_bit;
323 __u64 start_time=0; 323 __u64 start_time=0;
324 324
325#ifdef QDIO_PERFORMANCE_STATS 325 if (qdio_performance_stats)
326 perf_stats.siga_outs++; 326 perf_stats.siga_outs++;
327#endif /* QDIO_PERFORMANCE_STATS */
328 327
329 QDIO_DBF_TEXT4(0,trace,"sigaout"); 328 QDIO_DBF_TEXT4(0,trace,"sigaout");
330 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); 329 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
@@ -358,9 +357,8 @@ qdio_siga_input(struct qdio_q *q)
358 QDIO_DBF_TEXT4(0,trace,"sigain"); 357 QDIO_DBF_TEXT4(0,trace,"sigain");
359 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); 358 QDIO_DBF_HEX4(0,trace,&q,sizeof(void*));
360 359
361#ifdef QDIO_PERFORMANCE_STATS 360 if (qdio_performance_stats)
362 perf_stats.siga_ins++; 361 perf_stats.siga_ins++;
363#endif /* QDIO_PERFORMANCE_STATS */
364 362
365 cc = do_siga_input(q->schid, q->mask); 363 cc = do_siga_input(q->schid, q->mask);
366 364
@@ -954,9 +952,8 @@ __qdio_outbound_processing(struct qdio_q *q)
954 952
955 if (unlikely(qdio_reserve_q(q))) { 953 if (unlikely(qdio_reserve_q(q))) {
956 qdio_release_q(q); 954 qdio_release_q(q);
957#ifdef QDIO_PERFORMANCE_STATS 955 if (qdio_performance_stats)
958 o_p_c++; 956 o_p_c++;
959#endif /* QDIO_PERFORMANCE_STATS */
960 /* as we're sissies, we'll check next time */ 957 /* as we're sissies, we'll check next time */
961 if (likely(!atomic_read(&q->is_in_shutdown))) { 958 if (likely(!atomic_read(&q->is_in_shutdown))) {
962 qdio_mark_q(q); 959 qdio_mark_q(q);
@@ -964,10 +961,10 @@ __qdio_outbound_processing(struct qdio_q *q)
964 } 961 }
965 return; 962 return;
966 } 963 }
967#ifdef QDIO_PERFORMANCE_STATS 964 if (qdio_performance_stats) {
968 o_p_nc++; 965 o_p_nc++;
969 perf_stats.tl_runs++; 966 perf_stats.tl_runs++;
970#endif /* QDIO_PERFORMANCE_STATS */ 967 }
971 968
972 /* see comment in qdio_kick_outbound_q */ 969 /* see comment in qdio_kick_outbound_q */
973 siga_attempts=atomic_read(&q->busy_siga_counter); 970 siga_attempts=atomic_read(&q->busy_siga_counter);
@@ -1142,15 +1139,16 @@ qdio_has_inbound_q_moved(struct qdio_q *q)
1142{ 1139{
1143 int i; 1140 int i;
1144 1141
1145#ifdef QDIO_PERFORMANCE_STATS
1146 static int old_pcis=0; 1142 static int old_pcis=0;
1147 static int old_thinints=0; 1143 static int old_thinints=0;
1148 1144
1149 if ((old_pcis==perf_stats.pcis)&&(old_thinints==perf_stats.thinints)) 1145 if (qdio_performance_stats) {
1150 perf_stats.start_time_inbound=NOW; 1146 if ((old_pcis==perf_stats.pcis)&&
1151 else 1147 (old_thinints==perf_stats.thinints))
1152 old_pcis=perf_stats.pcis; 1148 perf_stats.start_time_inbound=NOW;
1153#endif /* QDIO_PERFORMANCE_STATS */ 1149 else
1150 old_pcis=perf_stats.pcis;
1151 }
1154 1152
1155 i=qdio_get_inbound_buffer_frontier(q); 1153 i=qdio_get_inbound_buffer_frontier(q);
1156 if ( (i!=GET_SAVED_FRONTIER(q)) || 1154 if ( (i!=GET_SAVED_FRONTIER(q)) ||
@@ -1340,10 +1338,10 @@ qdio_kick_inbound_handler(struct qdio_q *q)
1340 q->siga_error=0; 1338 q->siga_error=0;
1341 q->error_status_flags=0; 1339 q->error_status_flags=0;
1342 1340
1343#ifdef QDIO_PERFORMANCE_STATS 1341 if (qdio_performance_stats) {
1344 perf_stats.inbound_time+=NOW-perf_stats.start_time_inbound; 1342 perf_stats.inbound_time+=NOW-perf_stats.start_time_inbound;
1345 perf_stats.inbound_cnt++; 1343 perf_stats.inbound_cnt++;
1346#endif /* QDIO_PERFORMANCE_STATS */ 1344 }
1347} 1345}
1348 1346
1349static inline void 1347static inline void
@@ -1363,9 +1361,8 @@ __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
1363 */ 1361 */
1364 if (unlikely(qdio_reserve_q(q))) { 1362 if (unlikely(qdio_reserve_q(q))) {
1365 qdio_release_q(q); 1363 qdio_release_q(q);
1366#ifdef QDIO_PERFORMANCE_STATS 1364 if (qdio_performance_stats)
1367 ii_p_c++; 1365 ii_p_c++;
1368#endif /* QDIO_PERFORMANCE_STATS */
1369 /* 1366 /*
1370 * as we might just be about to stop polling, we make 1367 * as we might just be about to stop polling, we make
1371 * sure that we check again at least once more 1368 * sure that we check again at least once more
@@ -1373,9 +1370,8 @@ __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
1373 tiqdio_sched_tl(); 1370 tiqdio_sched_tl();
1374 return; 1371 return;
1375 } 1372 }
1376#ifdef QDIO_PERFORMANCE_STATS 1373 if (qdio_performance_stats)
1377 ii_p_nc++; 1374 ii_p_nc++;
1378#endif /* QDIO_PERFORMANCE_STATS */
1379 if (unlikely(atomic_read(&q->is_in_shutdown))) { 1375 if (unlikely(atomic_read(&q->is_in_shutdown))) {
1380 qdio_unmark_q(q); 1376 qdio_unmark_q(q);
1381 goto out; 1377 goto out;
@@ -1416,11 +1412,11 @@ __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set)
1416 irq_ptr = (struct qdio_irq*)q->irq_ptr; 1412 irq_ptr = (struct qdio_irq*)q->irq_ptr;
1417 for (i=0;i<irq_ptr->no_output_qs;i++) { 1413 for (i=0;i<irq_ptr->no_output_qs;i++) {
1418 oq = irq_ptr->output_qs[i]; 1414 oq = irq_ptr->output_qs[i];
1419#ifdef QDIO_PERFORMANCE_STATS 1415 if (!qdio_is_outbound_q_done(oq)) {
1420 perf_stats.tl_runs--; 1416 if (qdio_performance_stats)
1421#endif /* QDIO_PERFORMANCE_STATS */ 1417 perf_stats.tl_runs--;
1422 if (!qdio_is_outbound_q_done(oq))
1423 __qdio_outbound_processing(oq); 1418 __qdio_outbound_processing(oq);
1419 }
1424 } 1420 }
1425 } 1421 }
1426 1422
@@ -1457,9 +1453,8 @@ __qdio_inbound_processing(struct qdio_q *q)
1457 1453
1458 if (unlikely(qdio_reserve_q(q))) { 1454 if (unlikely(qdio_reserve_q(q))) {
1459 qdio_release_q(q); 1455 qdio_release_q(q);
1460#ifdef QDIO_PERFORMANCE_STATS 1456 if (qdio_performance_stats)
1461 i_p_c++; 1457 i_p_c++;
1462#endif /* QDIO_PERFORMANCE_STATS */
1463 /* as we're sissies, we'll check next time */ 1458 /* as we're sissies, we'll check next time */
1464 if (likely(!atomic_read(&q->is_in_shutdown))) { 1459 if (likely(!atomic_read(&q->is_in_shutdown))) {
1465 qdio_mark_q(q); 1460 qdio_mark_q(q);
@@ -1467,10 +1462,10 @@ __qdio_inbound_processing(struct qdio_q *q)
1467 } 1462 }
1468 return; 1463 return;
1469 } 1464 }
1470#ifdef QDIO_PERFORMANCE_STATS 1465 if (qdio_performance_stats) {
1471 i_p_nc++; 1466 i_p_nc++;
1472 perf_stats.tl_runs++; 1467 perf_stats.tl_runs++;
1473#endif /* QDIO_PERFORMANCE_STATS */ 1468 }
1474 1469
1475again: 1470again:
1476 if (qdio_has_inbound_q_moved(q)) { 1471 if (qdio_has_inbound_q_moved(q)) {
@@ -1516,9 +1511,8 @@ tiqdio_reset_processing_state(struct qdio_q *q, int q_laps)
1516 1511
1517 if (unlikely(qdio_reserve_q(q))) { 1512 if (unlikely(qdio_reserve_q(q))) {
1518 qdio_release_q(q); 1513 qdio_release_q(q);
1519#ifdef QDIO_PERFORMANCE_STATS 1514 if (qdio_performance_stats)
1520 ii_p_c++; 1515 ii_p_c++;
1521#endif /* QDIO_PERFORMANCE_STATS */
1522 /* 1516 /*
1523 * as we might just be about to stop polling, we make 1517 * as we might just be about to stop polling, we make
1524 * sure that we check again at least once more 1518 * sure that we check again at least once more
@@ -1609,9 +1603,8 @@ tiqdio_tl(unsigned long data)
1609{ 1603{
1610 QDIO_DBF_TEXT4(0,trace,"iqdio_tl"); 1604 QDIO_DBF_TEXT4(0,trace,"iqdio_tl");
1611 1605
1612#ifdef QDIO_PERFORMANCE_STATS 1606 if (qdio_performance_stats)
1613 perf_stats.tl_runs++; 1607 perf_stats.tl_runs++;
1614#endif /* QDIO_PERFORMANCE_STATS */
1615 1608
1616 tiqdio_inbound_checks(); 1609 tiqdio_inbound_checks();
1617} 1610}
@@ -1918,10 +1911,10 @@ tiqdio_thinint_handler(void)
1918{ 1911{
1919 QDIO_DBF_TEXT4(0,trace,"thin_int"); 1912 QDIO_DBF_TEXT4(0,trace,"thin_int");
1920 1913
1921#ifdef QDIO_PERFORMANCE_STATS 1914 if (qdio_performance_stats) {
1922 perf_stats.thinints++; 1915 perf_stats.thinints++;
1923 perf_stats.start_time_inbound=NOW; 1916 perf_stats.start_time_inbound=NOW;
1924#endif /* QDIO_PERFORMANCE_STATS */ 1917 }
1925 1918
1926 /* SVS only when needed: 1919 /* SVS only when needed:
1927 * issue SVS to benefit from iqdio interrupt avoidance 1920 * issue SVS to benefit from iqdio interrupt avoidance
@@ -1976,18 +1969,17 @@ qdio_handle_pci(struct qdio_irq *irq_ptr)
1976 int i; 1969 int i;
1977 struct qdio_q *q; 1970 struct qdio_q *q;
1978 1971
1979#ifdef QDIO_PERFORMANCE_STATS 1972 if (qdio_performance_stats) {
1980 perf_stats.pcis++; 1973 perf_stats.pcis++;
1981 perf_stats.start_time_inbound=NOW; 1974 perf_stats.start_time_inbound=NOW;
1982#endif /* QDIO_PERFORMANCE_STATS */ 1975 }
1983 for (i=0;i<irq_ptr->no_input_qs;i++) { 1976 for (i=0;i<irq_ptr->no_input_qs;i++) {
1984 q=irq_ptr->input_qs[i]; 1977 q=irq_ptr->input_qs[i];
1985 if (q->is_input_q&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT) 1978 if (q->is_input_q&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT)
1986 qdio_mark_q(q); 1979 qdio_mark_q(q);
1987 else { 1980 else {
1988#ifdef QDIO_PERFORMANCE_STATS 1981 if (qdio_performance_stats)
1989 perf_stats.tl_runs--; 1982 perf_stats.tl_runs--;
1990#endif /* QDIO_PERFORMANCE_STATS */
1991 __qdio_inbound_processing(q); 1983 __qdio_inbound_processing(q);
1992 } 1984 }
1993 } 1985 }
@@ -1995,11 +1987,10 @@ qdio_handle_pci(struct qdio_irq *irq_ptr)
1995 return; 1987 return;
1996 for (i=0;i<irq_ptr->no_output_qs;i++) { 1988 for (i=0;i<irq_ptr->no_output_qs;i++) {
1997 q=irq_ptr->output_qs[i]; 1989 q=irq_ptr->output_qs[i];
1998#ifdef QDIO_PERFORMANCE_STATS
1999 perf_stats.tl_runs--;
2000#endif /* QDIO_PERFORMANCE_STATS */
2001 if (qdio_is_outbound_q_done(q)) 1990 if (qdio_is_outbound_q_done(q))
2002 continue; 1991 continue;
1992 if (qdio_performance_stats)
1993 perf_stats.tl_runs--;
2003 if (!irq_ptr->sync_done_on_outb_pcis) 1994 if (!irq_ptr->sync_done_on_outb_pcis)
2004 SYNC_MEMORY; 1995 SYNC_MEMORY;
2005 __qdio_outbound_processing(q); 1996 __qdio_outbound_processing(q);
@@ -2045,11 +2036,13 @@ omit_handler_call:
2045} 2036}
2046 2037
2047static void 2038static void
2048qdio_call_shutdown(void *data) 2039qdio_call_shutdown(struct work_struct *work)
2049{ 2040{
2041 struct ccw_device_private *priv;
2050 struct ccw_device *cdev; 2042 struct ccw_device *cdev;
2051 2043
2052 cdev = (struct ccw_device *)data; 2044 priv = container_of(work, struct ccw_device_private, kick_work);
2045 cdev = priv->cdev;
2053 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); 2046 qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
2054 put_device(&cdev->dev); 2047 put_device(&cdev->dev);
2055} 2048}
@@ -2091,7 +2084,7 @@ qdio_timeout_handler(struct ccw_device *cdev)
2091 if (get_device(&cdev->dev)) { 2084 if (get_device(&cdev->dev)) {
2092 /* Can't call shutdown from interrupt context. */ 2085 /* Can't call shutdown from interrupt context. */
2093 PREPARE_WORK(&cdev->private->kick_work, 2086 PREPARE_WORK(&cdev->private->kick_work,
2094 qdio_call_shutdown, (void *)cdev); 2087 qdio_call_shutdown);
2095 queue_work(ccw_device_work, &cdev->private->kick_work); 2088 queue_work(ccw_device_work, &cdev->private->kick_work);
2096 } 2089 }
2097 break; 2090 break;
@@ -3458,19 +3451,18 @@ do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
3458 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr; 3451 struct qdio_irq *irq = (struct qdio_irq *) q->irq_ptr;
3459 3452
3460 /* This is the outbound handling of queues */ 3453 /* This is the outbound handling of queues */
3461#ifdef QDIO_PERFORMANCE_STATS 3454 if (qdio_performance_stats)
3462 perf_stats.start_time_outbound=NOW; 3455 perf_stats.start_time_outbound=NOW;
3463#endif /* QDIO_PERFORMANCE_STATS */
3464 3456
3465 qdio_do_qdio_fill_output(q,qidx,count,buffers); 3457 qdio_do_qdio_fill_output(q,qidx,count,buffers);
3466 3458
3467 used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count; 3459 used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count;
3468 3460
3469 if (callflags&QDIO_FLAG_DONT_SIGA) { 3461 if (callflags&QDIO_FLAG_DONT_SIGA) {
3470#ifdef QDIO_PERFORMANCE_STATS 3462 if (qdio_performance_stats) {
3471 perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound; 3463 perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound;
3472 perf_stats.outbound_cnt++; 3464 perf_stats.outbound_cnt++;
3473#endif /* QDIO_PERFORMANCE_STATS */ 3465 }
3474 return; 3466 return;
3475 } 3467 }
3476 if (q->is_iqdio_q) { 3468 if (q->is_iqdio_q) {
@@ -3500,9 +3492,8 @@ do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
3500 qdio_kick_outbound_q(q); 3492 qdio_kick_outbound_q(q);
3501 } else { 3493 } else {
3502 QDIO_DBF_TEXT3(0,trace, "fast-req"); 3494 QDIO_DBF_TEXT3(0,trace, "fast-req");
3503#ifdef QDIO_PERFORMANCE_STATS 3495 if (qdio_performance_stats)
3504 perf_stats.fast_reqs++; 3496 perf_stats.fast_reqs++;
3505#endif /* QDIO_PERFORMANCE_STATS */
3506 } 3497 }
3507 } 3498 }
3508 /* 3499 /*
@@ -3513,10 +3504,10 @@ do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags,
3513 __qdio_outbound_processing(q); 3504 __qdio_outbound_processing(q);
3514 } 3505 }
3515 3506
3516#ifdef QDIO_PERFORMANCE_STATS 3507 if (qdio_performance_stats) {
3517 perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound; 3508 perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound;
3518 perf_stats.outbound_cnt++; 3509 perf_stats.outbound_cnt++;
3519#endif /* QDIO_PERFORMANCE_STATS */ 3510 }
3520} 3511}
3521 3512
3522/* count must be 1 in iqdio */ 3513/* count must be 1 in iqdio */
@@ -3574,7 +3565,6 @@ do_QDIO(struct ccw_device *cdev,unsigned int callflags,
3574 return 0; 3565 return 0;
3575} 3566}
3576 3567
3577#ifdef QDIO_PERFORMANCE_STATS
3578static int 3568static int
3579qdio_perf_procfile_read(char *buffer, char **buffer_location, off_t offset, 3569qdio_perf_procfile_read(char *buffer, char **buffer_location, off_t offset,
3580 int buffer_length, int *eof, void *data) 3570 int buffer_length, int *eof, void *data)
@@ -3590,29 +3580,29 @@ qdio_perf_procfile_read(char *buffer, char **buffer_location, off_t offset,
3590 _OUTP_IT("i_p_nc/c=%lu/%lu\n",i_p_nc,i_p_c); 3580 _OUTP_IT("i_p_nc/c=%lu/%lu\n",i_p_nc,i_p_c);
3591 _OUTP_IT("ii_p_nc/c=%lu/%lu\n",ii_p_nc,ii_p_c); 3581 _OUTP_IT("ii_p_nc/c=%lu/%lu\n",ii_p_nc,ii_p_c);
3592 _OUTP_IT("o_p_nc/c=%lu/%lu\n",o_p_nc,o_p_c); 3582 _OUTP_IT("o_p_nc/c=%lu/%lu\n",o_p_nc,o_p_c);
3593 _OUTP_IT("Number of tasklet runs (total) : %u\n", 3583 _OUTP_IT("Number of tasklet runs (total) : %lu\n",
3594 perf_stats.tl_runs); 3584 perf_stats.tl_runs);
3595 _OUTP_IT("\n"); 3585 _OUTP_IT("\n");
3596 _OUTP_IT("Number of SIGA sync's issued : %u\n", 3586 _OUTP_IT("Number of SIGA sync's issued : %lu\n",
3597 perf_stats.siga_syncs); 3587 perf_stats.siga_syncs);
3598 _OUTP_IT("Number of SIGA in's issued : %u\n", 3588 _OUTP_IT("Number of SIGA in's issued : %lu\n",
3599 perf_stats.siga_ins); 3589 perf_stats.siga_ins);
3600 _OUTP_IT("Number of SIGA out's issued : %u\n", 3590 _OUTP_IT("Number of SIGA out's issued : %lu\n",
3601 perf_stats.siga_outs); 3591 perf_stats.siga_outs);
3602 _OUTP_IT("Number of PCIs caught : %u\n", 3592 _OUTP_IT("Number of PCIs caught : %lu\n",
3603 perf_stats.pcis); 3593 perf_stats.pcis);
3604 _OUTP_IT("Number of adapter interrupts caught : %u\n", 3594 _OUTP_IT("Number of adapter interrupts caught : %lu\n",
3605 perf_stats.thinints); 3595 perf_stats.thinints);
3606 _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA) : %u\n", 3596 _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA) : %lu\n",
3607 perf_stats.fast_reqs); 3597 perf_stats.fast_reqs);
3608 _OUTP_IT("\n"); 3598 _OUTP_IT("\n");
3609 _OUTP_IT("Total time of all inbound actions (us) incl. UL : %u\n", 3599 _OUTP_IT("Total time of all inbound actions (us) incl. UL : %lu\n",
3610 perf_stats.inbound_time); 3600 perf_stats.inbound_time);
3611 _OUTP_IT("Number of inbound transfers : %u\n", 3601 _OUTP_IT("Number of inbound transfers : %lu\n",
3612 perf_stats.inbound_cnt); 3602 perf_stats.inbound_cnt);
3613 _OUTP_IT("Total time of all outbound do_QDIOs (us) : %u\n", 3603 _OUTP_IT("Total time of all outbound do_QDIOs (us) : %lu\n",
3614 perf_stats.outbound_time); 3604 perf_stats.outbound_time);
3615 _OUTP_IT("Number of do_QDIOs outbound : %u\n", 3605 _OUTP_IT("Number of do_QDIOs outbound : %lu\n",
3616 perf_stats.outbound_cnt); 3606 perf_stats.outbound_cnt);
3617 _OUTP_IT("\n"); 3607 _OUTP_IT("\n");
3618 3608
@@ -3620,12 +3610,10 @@ qdio_perf_procfile_read(char *buffer, char **buffer_location, off_t offset,
3620} 3610}
3621 3611
3622static struct proc_dir_entry *qdio_perf_proc_file; 3612static struct proc_dir_entry *qdio_perf_proc_file;
3623#endif /* QDIO_PERFORMANCE_STATS */
3624 3613
3625static void 3614static void
3626qdio_add_procfs_entry(void) 3615qdio_add_procfs_entry(void)
3627{ 3616{
3628#ifdef QDIO_PERFORMANCE_STATS
3629 proc_perf_file_registration=0; 3617 proc_perf_file_registration=0;
3630 qdio_perf_proc_file=create_proc_entry(QDIO_PERF, 3618 qdio_perf_proc_file=create_proc_entry(QDIO_PERF,
3631 S_IFREG|0444,&proc_root); 3619 S_IFREG|0444,&proc_root);
@@ -3637,20 +3625,58 @@ qdio_add_procfs_entry(void)
3637 QDIO_PRINT_WARN("was not able to register perf. " \ 3625 QDIO_PRINT_WARN("was not able to register perf. " \
3638 "proc-file (%i).\n", 3626 "proc-file (%i).\n",
3639 proc_perf_file_registration); 3627 proc_perf_file_registration);
3640#endif /* QDIO_PERFORMANCE_STATS */
3641} 3628}
3642 3629
3643static void 3630static void
3644qdio_remove_procfs_entry(void) 3631qdio_remove_procfs_entry(void)
3645{ 3632{
3646#ifdef QDIO_PERFORMANCE_STATS
3647 perf_stats.tl_runs=0; 3633 perf_stats.tl_runs=0;
3648 3634
3649 if (!proc_perf_file_registration) /* means if it went ok earlier */ 3635 if (!proc_perf_file_registration) /* means if it went ok earlier */
3650 remove_proc_entry(QDIO_PERF,&proc_root); 3636 remove_proc_entry(QDIO_PERF,&proc_root);
3651#endif /* QDIO_PERFORMANCE_STATS */
3652} 3637}
3653 3638
3639/**
3640 * attributes in sysfs
3641 *****************************************************************************/
3642
3643static ssize_t
3644qdio_performance_stats_show(struct bus_type *bus, char *buf)
3645{
3646 return sprintf(buf, "%i\n", qdio_performance_stats ? 1 : 0);
3647}
3648
3649static ssize_t
3650qdio_performance_stats_store(struct bus_type *bus, const char *buf, size_t count)
3651{
3652 char *tmp;
3653 int i;
3654
3655 i = simple_strtoul(buf, &tmp, 16);
3656 if ((i == 0) || (i == 1)) {
3657 if (i == qdio_performance_stats)
3658 return count;
3659 qdio_performance_stats = i;
3660 if (i==0) {
3661 /* reset perf. stat. info */
3662 i_p_nc = 0;
3663 i_p_c = 0;
3664 ii_p_nc = 0;
3665 ii_p_c = 0;
3666 o_p_nc = 0;
3667 o_p_c = 0;
3668 memset(&perf_stats, 0, sizeof(struct qdio_perf_stats));
3669 }
3670 } else {
3671 QDIO_PRINT_WARN("QDIO performance_stats: write 0 or 1 to this file!\n");
3672 return -EINVAL;
3673 }
3674 return count;
3675}
3676
3677static BUS_ATTR(qdio_performance_stats, 0644, qdio_performance_stats_show,
3678 qdio_performance_stats_store);
3679
3654static void 3680static void
3655tiqdio_register_thinints(void) 3681tiqdio_register_thinints(void)
3656{ 3682{
@@ -3695,6 +3721,7 @@ qdio_release_qdio_memory(void)
3695 kfree(indicators); 3721 kfree(indicators);
3696} 3722}
3697 3723
3724
3698static void 3725static void
3699qdio_unregister_dbf_views(void) 3726qdio_unregister_dbf_views(void)
3700{ 3727{
@@ -3796,9 +3823,7 @@ static int __init
3796init_QDIO(void) 3823init_QDIO(void)
3797{ 3824{
3798 int res; 3825 int res;
3799#ifdef QDIO_PERFORMANCE_STATS
3800 void *ptr; 3826 void *ptr;
3801#endif /* QDIO_PERFORMANCE_STATS */
3802 3827
3803 printk("qdio: loading %s\n",version); 3828 printk("qdio: loading %s\n",version);
3804 3829
@@ -3811,13 +3836,12 @@ init_QDIO(void)
3811 return res; 3836 return res;
3812 3837
3813 QDIO_DBF_TEXT0(0,setup,"initQDIO"); 3838 QDIO_DBF_TEXT0(0,setup,"initQDIO");
3839 res = bus_create_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
3814 3840
3815#ifdef QDIO_PERFORMANCE_STATS 3841 memset((void*)&perf_stats,0,sizeof(perf_stats));
3816 memset((void*)&perf_stats,0,sizeof(perf_stats));
3817 QDIO_DBF_TEXT0(0,setup,"perfstat"); 3842 QDIO_DBF_TEXT0(0,setup,"perfstat");
3818 ptr=&perf_stats; 3843 ptr=&perf_stats;
3819 QDIO_DBF_HEX0(0,setup,&ptr,sizeof(void*)); 3844 QDIO_DBF_HEX0(0,setup,&ptr,sizeof(void*));
3820#endif /* QDIO_PERFORMANCE_STATS */
3821 3845
3822 qdio_add_procfs_entry(); 3846 qdio_add_procfs_entry();
3823 3847
@@ -3841,7 +3865,7 @@ cleanup_QDIO(void)
3841 qdio_release_qdio_memory(); 3865 qdio_release_qdio_memory();
3842 qdio_unregister_dbf_views(); 3866 qdio_unregister_dbf_views();
3843 mempool_destroy(qdio_mempool_scssc); 3867 mempool_destroy(qdio_mempool_scssc);
3844 3868 bus_remove_file(&ccw_bus_type, &bus_attr_qdio_performance_stats);
3845 printk("qdio: %s: module removed\n",version); 3869 printk("qdio: %s: module removed\n",version);
3846} 3870}
3847 3871
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 42927c1b7451..ec9af72b2afc 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -12,10 +12,6 @@
12#endif /* CONFIG_QDIO_DEBUG */ 12#endif /* CONFIG_QDIO_DEBUG */
13#define QDIO_USE_PROCESSING_STATE 13#define QDIO_USE_PROCESSING_STATE
14 14
15#ifdef CONFIG_QDIO_PERF_STATS
16#define QDIO_PERFORMANCE_STATS
17#endif /* CONFIG_QDIO_PERF_STATS */
18
19#define QDIO_MINIMAL_BH_RELIEF_TIME 16 15#define QDIO_MINIMAL_BH_RELIEF_TIME 16
20#define QDIO_TIMER_POLL_VALUE 1 16#define QDIO_TIMER_POLL_VALUE 1
21#define IQDIO_TIMER_POLL_VALUE 1 17#define IQDIO_TIMER_POLL_VALUE 1
@@ -409,25 +405,23 @@ do_clear_global_summary(void)
409#define CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS 0x08 405#define CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS 0x08
410#define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04 406#define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04
411 407
412#ifdef QDIO_PERFORMANCE_STATS
413struct qdio_perf_stats { 408struct qdio_perf_stats {
414 unsigned int tl_runs; 409 unsigned long tl_runs;
415 410
416 unsigned int siga_outs; 411 unsigned long siga_outs;
417 unsigned int siga_ins; 412 unsigned long siga_ins;
418 unsigned int siga_syncs; 413 unsigned long siga_syncs;
419 unsigned int pcis; 414 unsigned long pcis;
420 unsigned int thinints; 415 unsigned long thinints;
421 unsigned int fast_reqs; 416 unsigned long fast_reqs;
422 417
423 __u64 start_time_outbound; 418 __u64 start_time_outbound;
424 unsigned int outbound_cnt; 419 unsigned long outbound_cnt;
425 unsigned int outbound_time; 420 unsigned long outbound_time;
426 __u64 start_time_inbound; 421 __u64 start_time_inbound;
427 unsigned int inbound_cnt; 422 unsigned long inbound_cnt;
428 unsigned int inbound_time; 423 unsigned long inbound_time;
429}; 424};
430#endif /* QDIO_PERFORMANCE_STATS */
431 425
432/* unlikely as the later the better */ 426/* unlikely as the later the better */
433#define SYNC_MEMORY if (unlikely(q->siga_sync)) qdio_siga_sync_q(q) 427#define SYNC_MEMORY if (unlikely(q->siga_sync)) qdio_siga_sync_q(q)
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index e4dc947e74e9..ad60afe5dd11 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -33,6 +33,7 @@
33#include <linux/kthread.h> 33#include <linux/kthread.h>
34#include <linux/mutex.h> 34#include <linux/mutex.h>
35#include <asm/s390_rdev.h> 35#include <asm/s390_rdev.h>
36#include <asm/reset.h>
36 37
37#include "ap_bus.h" 38#include "ap_bus.h"
38 39
@@ -1128,6 +1129,19 @@ static void ap_poll_thread_stop(void)
1128 mutex_unlock(&ap_poll_thread_mutex); 1129 mutex_unlock(&ap_poll_thread_mutex);
1129} 1130}
1130 1131
1132static void ap_reset(void)
1133{
1134 int i, j;
1135
1136 for (i = 0; i < AP_DOMAINS; i++)
1137 for (j = 0; j < AP_DEVICES; j++)
1138 ap_reset_queue(AP_MKQID(j, i));
1139}
1140
1141static struct reset_call ap_reset_call = {
1142 .fn = ap_reset,
1143};
1144
1131/** 1145/**
1132 * The module initialization code. 1146 * The module initialization code.
1133 */ 1147 */
@@ -1144,6 +1158,7 @@ int __init ap_module_init(void)
1144 printk(KERN_WARNING "AP instructions not installed.\n"); 1158 printk(KERN_WARNING "AP instructions not installed.\n");
1145 return -ENODEV; 1159 return -ENODEV;
1146 } 1160 }
1161 register_reset_call(&ap_reset_call);
1147 1162
1148 /* Create /sys/bus/ap. */ 1163 /* Create /sys/bus/ap. */
1149 rc = bus_register(&ap_bus_type); 1164 rc = bus_register(&ap_bus_type);
@@ -1197,6 +1212,7 @@ out_bus:
1197 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); 1212 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
1198 bus_unregister(&ap_bus_type); 1213 bus_unregister(&ap_bus_type);
1199out: 1214out:
1215 unregister_reset_call(&ap_reset_call);
1200 return rc; 1216 return rc;
1201} 1217}
1202 1218
@@ -1227,6 +1243,7 @@ void ap_module_exit(void)
1227 for (i = 0; ap_bus_attrs[i]; i++) 1243 for (i = 0; ap_bus_attrs[i]; i++)
1228 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); 1244 bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
1229 bus_unregister(&ap_bus_type); 1245 bus_unregister(&ap_bus_type);
1246 unregister_reset_call(&ap_reset_call);
1230} 1247}
1231 1248
1232#ifndef CONFIG_ZCRYPT_MONOLITHIC 1249#ifndef CONFIG_ZCRYPT_MONOLITHIC
diff --git a/include/asm-s390/dasd.h b/include/asm-s390/dasd.h
index c042f9578081..604f68fa6f56 100644
--- a/include/asm-s390/dasd.h
+++ b/include/asm-s390/dasd.h
@@ -69,11 +69,13 @@ typedef struct dasd_information2_t {
69 * 0x01: readonly (ro) 69 * 0x01: readonly (ro)
70 * 0x02: use diag discipline (diag) 70 * 0x02: use diag discipline (diag)
71 * 0x04: set the device initially online (internal use only) 71 * 0x04: set the device initially online (internal use only)
72 * 0x08: enable ERP related logging
72 */ 73 */
73#define DASD_FEATURE_DEFAULT 0x00 74#define DASD_FEATURE_DEFAULT 0x00
74#define DASD_FEATURE_READONLY 0x01 75#define DASD_FEATURE_READONLY 0x01
75#define DASD_FEATURE_USEDIAG 0x02 76#define DASD_FEATURE_USEDIAG 0x02
76#define DASD_FEATURE_INITIAL_ONLINE 0x04 77#define DASD_FEATURE_INITIAL_ONLINE 0x04
78#define DASD_FEATURE_ERPLOG 0x08
77 79
78#define DASD_PARTN_BITS 2 80#define DASD_PARTN_BITS 2
79 81
diff --git a/include/asm-s390/page.h b/include/asm-s390/page.h
index 363ea761d5ee..05ea6f172786 100644
--- a/include/asm-s390/page.h
+++ b/include/asm-s390/page.h
@@ -127,6 +127,26 @@ page_get_storage_key(unsigned long addr)
127 return skey; 127 return skey;
128} 128}
129 129
130extern unsigned long max_pfn;
131
132static inline int pfn_valid(unsigned long pfn)
133{
134 unsigned long dummy;
135 int ccode;
136
137 if (pfn >= max_pfn)
138 return 0;
139
140 asm volatile(
141 " lra %0,0(%2)\n"
142 " ipm %1\n"
143 " srl %1,28\n"
144 : "=d" (dummy), "=d" (ccode)
145 : "a" (pfn << PAGE_SHIFT)
146 : "cc");
147 return !ccode;
148}
149
130#endif /* !__ASSEMBLY__ */ 150#endif /* !__ASSEMBLY__ */
131 151
132/* to align the pointer to the (next) page boundary */ 152/* to align the pointer to the (next) page boundary */
@@ -138,8 +158,6 @@ page_get_storage_key(unsigned long addr)
138#define __va(x) (void *)(unsigned long)(x) 158#define __va(x) (void *)(unsigned long)(x)
139#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 159#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
140#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT) 160#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
141
142#define pfn_valid(pfn) ((pfn) < max_mapnr)
143#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 161#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
144 162
145#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ 163#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
diff --git a/include/asm-s390/pgalloc.h b/include/asm-s390/pgalloc.h
index 28619de5ecae..0707a7e2fc16 100644
--- a/include/asm-s390/pgalloc.h
+++ b/include/asm-s390/pgalloc.h
@@ -25,8 +25,11 @@ extern void diag10(unsigned long addr);
25 * Page allocation orders. 25 * Page allocation orders.
26 */ 26 */
27#ifndef __s390x__ 27#ifndef __s390x__
28# define PTE_ALLOC_ORDER 0
29# define PMD_ALLOC_ORDER 0
28# define PGD_ALLOC_ORDER 1 30# define PGD_ALLOC_ORDER 1
29#else /* __s390x__ */ 31#else /* __s390x__ */
32# define PTE_ALLOC_ORDER 0
30# define PMD_ALLOC_ORDER 2 33# define PMD_ALLOC_ORDER 2
31# define PGD_ALLOC_ORDER 2 34# define PGD_ALLOC_ORDER 2
32#endif /* __s390x__ */ 35#endif /* __s390x__ */
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 2d968a69ed1f..ae61aca5d483 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -107,23 +107,25 @@ extern char empty_zero_page[PAGE_SIZE];
107 * The vmalloc() routines leaves a hole of 4kB between each vmalloced 107 * The vmalloc() routines leaves a hole of 4kB between each vmalloced
108 * area for the same reason. ;) 108 * area for the same reason. ;)
109 */ 109 */
110extern unsigned long vmalloc_end;
110#define VMALLOC_OFFSET (8*1024*1024) 111#define VMALLOC_OFFSET (8*1024*1024)
111#define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) \ 112#define VMALLOC_START (((unsigned long) high_memory + VMALLOC_OFFSET) \
112 & ~(VMALLOC_OFFSET-1)) 113 & ~(VMALLOC_OFFSET-1))
114#define VMALLOC_END vmalloc_end
113 115
114/* 116/*
115 * We need some free virtual space to be able to do vmalloc. 117 * We need some free virtual space to be able to do vmalloc.
116 * VMALLOC_MIN_SIZE defines the minimum size of the vmalloc 118 * VMALLOC_MIN_SIZE defines the minimum size of the vmalloc
117 * area. On a machine with 2GB memory we make sure that we 119 * area. On a machine with 2GB memory we make sure that we
118 * have at least 128MB free space for vmalloc. On a machine 120 * have at least 128MB free space for vmalloc. On a machine
119 * with 4TB we make sure we have at least 1GB. 121 * with 4TB we make sure we have at least 128GB.
120 */ 122 */
121#ifndef __s390x__ 123#ifndef __s390x__
122#define VMALLOC_MIN_SIZE 0x8000000UL 124#define VMALLOC_MIN_SIZE 0x8000000UL
123#define VMALLOC_END 0x80000000UL 125#define VMALLOC_END_INIT 0x80000000UL
124#else /* __s390x__ */ 126#else /* __s390x__ */
125#define VMALLOC_MIN_SIZE 0x40000000UL 127#define VMALLOC_MIN_SIZE 0x2000000000UL
126#define VMALLOC_END 0x40000000000UL 128#define VMALLOC_END_INIT 0x40000000000UL
127#endif /* __s390x__ */ 129#endif /* __s390x__ */
128 130
129/* 131/*
@@ -815,11 +817,17 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
815 817
816#define kern_addr_valid(addr) (1) 818#define kern_addr_valid(addr) (1)
817 819
820extern int add_shared_memory(unsigned long start, unsigned long size);
821extern int remove_shared_memory(unsigned long start, unsigned long size);
822
818/* 823/*
819 * No page table caches to initialise 824 * No page table caches to initialise
820 */ 825 */
821#define pgtable_cache_init() do { } while (0) 826#define pgtable_cache_init() do { } while (0)
822 827
828#define __HAVE_ARCH_MEMMAP_INIT
829extern void memmap_init(unsigned long, int, unsigned long, unsigned long);
830
823#define __HAVE_ARCH_PTEP_ESTABLISH 831#define __HAVE_ARCH_PTEP_ESTABLISH
824#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS 832#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
825#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG 833#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG