aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/kernel-parameters.txt7
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/include/asm/pgtable.h197
-rw-r--r--arch/s390/include/asm/qdio.h4
-rw-r--r--arch/s390/include/asm/syscall.h2
-rw-r--r--arch/s390/kernel/irq.c95
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/mm/hugetlbpage.c103
-rw-r--r--arch/s390/mm/pgtable.c8
-rw-r--r--arch/s390/pci/pci.c4
-rw-r--r--arch/s390/pci/pci_clp.c4
-rw-r--r--arch/s390/pci/pci_debug.c4
-rw-r--r--arch/s390/pci/pci_dma.c50
-rw-r--r--arch/s390/pci/pci_event.c4
-rw-r--r--arch/s390/pci/pci_sysfs.c4
-rw-r--r--drivers/pci/hotplug/s390_pci_hpc.c4
-rw-r--r--drivers/s390/block/dasd.c196
-rw-r--r--drivers/s390/block/dasd_eckd.c30
-rw-r--r--drivers/s390/block/dasd_int.h5
-rw-r--r--drivers/s390/block/dasd_ioctl.c33
-rw-r--r--drivers/s390/char/con3215.c32
-rw-r--r--drivers/s390/cio/qdio_setup.c53
-rw-r--r--drivers/s390/net/qeth_core.h8
-rw-r--r--drivers/s390/net/qeth_core_main.c161
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c49
-rw-r--r--drivers/watchdog/Kconfig2
26 files changed, 619 insertions, 443 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 9344d833b7ea..21ae0e4b9e7e 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -3058,6 +3058,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
3058 3058
3059 S [KNL] Run init in single mode 3059 S [KNL] Run init in single mode
3060 3060
3061 s390_iommu= [HW,S390]
3062 Set s390 IOTLB flushing mode
3063 strict
3064 With strict flushing every unmap operation will result in
3065 an IOTLB flush. Default is lazy flushing before reuse,
3066 which is faster.
3067
3061 sa1100ir [NET] 3068 sa1100ir [NET]
3062 See drivers/net/irda/sa1100_ir.c. 3069 See drivers/net/irda/sa1100_ir.c.
3063 3070
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 720a11d339eb..8ca60f8d5683 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -92,6 +92,7 @@ config S390
92 select ARCH_INLINE_WRITE_UNLOCK_IRQ 92 select ARCH_INLINE_WRITE_UNLOCK_IRQ
93 select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE 93 select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE
94 select ARCH_SAVE_PAGE_KEYS if HIBERNATION 94 select ARCH_SAVE_PAGE_KEYS if HIBERNATION
95 select ARCH_SUPPORTS_ATOMIC_RMW
95 select ARCH_USE_CMPXCHG_LOCKREF 96 select ARCH_USE_CMPXCHG_LOCKREF
96 select ARCH_WANT_IPC_PARSE_VERSION 97 select ARCH_WANT_IPC_PARSE_VERSION
97 select BUILDTIME_EXTABLE_SORT 98 select BUILDTIME_EXTABLE_SORT
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index fcba5e03839f..b76317c1f3eb 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -287,7 +287,14 @@ extern unsigned long MODULES_END;
287#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */ 287#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
288#define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */ 288#define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
289#define _SEGMENT_ENTRY_PTL 0x0f /* page table length */ 289#define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
290#define _SEGMENT_ENTRY_NONE _SEGMENT_ENTRY_PROTECT 290
291#define _SEGMENT_ENTRY_DIRTY 0 /* No sw dirty bit for 31-bit */
292#define _SEGMENT_ENTRY_YOUNG 0 /* No sw young bit for 31-bit */
293#define _SEGMENT_ENTRY_READ 0 /* No sw read bit for 31-bit */
294#define _SEGMENT_ENTRY_WRITE 0 /* No sw write bit for 31-bit */
295#define _SEGMENT_ENTRY_LARGE 0 /* No large pages for 31-bit */
296#define _SEGMENT_ENTRY_BITS_LARGE 0
297#define _SEGMENT_ENTRY_ORIGIN_LARGE 0
291 298
292#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL) 299#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
293#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID) 300#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
@@ -350,7 +357,7 @@ extern unsigned long MODULES_END;
350 357
351/* Bits in the segment table entry */ 358/* Bits in the segment table entry */
352#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL 359#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
353#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff1ff33UL 360#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff0ff33UL
354#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */ 361#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
355#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */ 362#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
356#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */ 363#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
@@ -359,30 +366,34 @@ extern unsigned long MODULES_END;
359#define _SEGMENT_ENTRY (0) 366#define _SEGMENT_ENTRY (0)
360#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID) 367#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
361 368
362#define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */ 369#define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
363#define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */ 370#define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
364#define _SEGMENT_ENTRY_SPLIT 0x001 /* THP splitting bit */ 371#define _SEGMENT_ENTRY_SPLIT 0x0800 /* THP splitting bit */
365#define _SEGMENT_ENTRY_YOUNG 0x002 /* SW segment young bit */ 372#define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
366#define _SEGMENT_ENTRY_NONE _SEGMENT_ENTRY_YOUNG 373#define _SEGMENT_ENTRY_CO 0x0100 /* change-recording override */
374#define _SEGMENT_ENTRY_READ 0x0002 /* SW segment read bit */
375#define _SEGMENT_ENTRY_WRITE 0x0001 /* SW segment write bit */
367 376
368/* 377/*
369 * Segment table entry encoding (R = read-only, I = invalid, y = young bit): 378 * Segment table entry encoding (R = read-only, I = invalid, y = young bit):
370 * ..R...I...y. 379 * dy..R...I...wr
371 * prot-none, old ..0...1...1. 380 * prot-none, clean, old 00..1...1...00
372 * prot-none, young ..1...1...1. 381 * prot-none, clean, young 01..1...1...00
373 * read-only, old ..1...1...0. 382 * prot-none, dirty, old 10..1...1...00
374 * read-only, young ..1...0...1. 383 * prot-none, dirty, young 11..1...1...00
375 * read-write, old ..0...1...0. 384 * read-only, clean, old 00..1...1...01
376 * read-write, young ..0...0...1. 385 * read-only, clean, young 01..1...0...01
386 * read-only, dirty, old 10..1...1...01
387 * read-only, dirty, young 11..1...0...01
388 * read-write, clean, old 00..1...1...11
389 * read-write, clean, young 01..1...0...11
390 * read-write, dirty, old 10..0...1...11
391 * read-write, dirty, young 11..0...0...11
377 * The segment table origin is used to distinguish empty (origin==0) from 392 * The segment table origin is used to distinguish empty (origin==0) from
378 * read-write, old segment table entries (origin!=0) 393 * read-write, old segment table entries (origin!=0)
379 */ 394 */
380 395
381#define _SEGMENT_ENTRY_SPLIT_BIT 0 /* THP splitting bit number */ 396#define _SEGMENT_ENTRY_SPLIT_BIT 11 /* THP splitting bit number */
382
383/* Set of bits not changed in pmd_modify */
384#define _SEGMENT_CHG_MASK (_SEGMENT_ENTRY_ORIGIN | _SEGMENT_ENTRY_LARGE \
385 | _SEGMENT_ENTRY_SPLIT | _SEGMENT_ENTRY_CO)
386 397
387/* Page status table bits for virtualization */ 398/* Page status table bits for virtualization */
388#define PGSTE_ACC_BITS 0xf000000000000000UL 399#define PGSTE_ACC_BITS 0xf000000000000000UL
@@ -455,10 +466,11 @@ extern unsigned long MODULES_END;
455 * Segment entry (large page) protection definitions. 466 * Segment entry (large page) protection definitions.
456 */ 467 */
457#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \ 468#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
458 _SEGMENT_ENTRY_NONE)
459#define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_INVALID | \
460 _SEGMENT_ENTRY_PROTECT) 469 _SEGMENT_ENTRY_PROTECT)
461#define SEGMENT_WRITE __pgprot(_SEGMENT_ENTRY_INVALID) 470#define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_PROTECT | \
471 _SEGMENT_ENTRY_READ)
472#define SEGMENT_WRITE __pgprot(_SEGMENT_ENTRY_READ | \
473 _SEGMENT_ENTRY_WRITE)
462 474
463static inline int mm_has_pgste(struct mm_struct *mm) 475static inline int mm_has_pgste(struct mm_struct *mm)
464{ 476{
@@ -569,25 +581,23 @@ static inline int pmd_none(pmd_t pmd)
569 581
570static inline int pmd_large(pmd_t pmd) 582static inline int pmd_large(pmd_t pmd)
571{ 583{
572#ifdef CONFIG_64BIT
573 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0; 584 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
574#else
575 return 0;
576#endif
577} 585}
578 586
579static inline int pmd_prot_none(pmd_t pmd) 587static inline int pmd_pfn(pmd_t pmd)
580{ 588{
581 return (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) && 589 unsigned long origin_mask;
582 (pmd_val(pmd) & _SEGMENT_ENTRY_NONE); 590
591 origin_mask = _SEGMENT_ENTRY_ORIGIN;
592 if (pmd_large(pmd))
593 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
594 return (pmd_val(pmd) & origin_mask) >> PAGE_SHIFT;
583} 595}
584 596
585static inline int pmd_bad(pmd_t pmd) 597static inline int pmd_bad(pmd_t pmd)
586{ 598{
587#ifdef CONFIG_64BIT
588 if (pmd_large(pmd)) 599 if (pmd_large(pmd))
589 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0; 600 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
590#endif
591 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0; 601 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
592} 602}
593 603
@@ -607,20 +617,22 @@ extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
607#define __HAVE_ARCH_PMD_WRITE 617#define __HAVE_ARCH_PMD_WRITE
608static inline int pmd_write(pmd_t pmd) 618static inline int pmd_write(pmd_t pmd)
609{ 619{
610 if (pmd_prot_none(pmd)) 620 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
611 return 0; 621}
612 return (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) == 0; 622
623static inline int pmd_dirty(pmd_t pmd)
624{
625 int dirty = 1;
626 if (pmd_large(pmd))
627 dirty = (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
628 return dirty;
613} 629}
614 630
615static inline int pmd_young(pmd_t pmd) 631static inline int pmd_young(pmd_t pmd)
616{ 632{
617 int young = 0; 633 int young = 1;
618#ifdef CONFIG_64BIT 634 if (pmd_large(pmd))
619 if (pmd_prot_none(pmd))
620 young = (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) != 0;
621 else
622 young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0; 635 young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
623#endif
624 return young; 636 return young;
625} 637}
626 638
@@ -1391,7 +1403,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1391#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) 1403#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1392#define pte_page(x) pfn_to_page(pte_pfn(x)) 1404#define pte_page(x) pfn_to_page(pte_pfn(x))
1393 1405
1394#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT) 1406#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1395 1407
1396/* Find an entry in the lowest level page table.. */ 1408/* Find an entry in the lowest level page table.. */
1397#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr)) 1409#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
@@ -1413,41 +1425,75 @@ static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1413 return pgprot_val(SEGMENT_WRITE); 1425 return pgprot_val(SEGMENT_WRITE);
1414} 1426}
1415 1427
1416static inline pmd_t pmd_mkyoung(pmd_t pmd) 1428static inline pmd_t pmd_wrprotect(pmd_t pmd)
1417{ 1429{
1418#ifdef CONFIG_64BIT 1430 pmd_val(pmd) &= ~_SEGMENT_ENTRY_WRITE;
1419 if (pmd_prot_none(pmd)) { 1431 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1432 return pmd;
1433}
1434
1435static inline pmd_t pmd_mkwrite(pmd_t pmd)
1436{
1437 pmd_val(pmd) |= _SEGMENT_ENTRY_WRITE;
1438 if (pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1439 return pmd;
1440 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1441 return pmd;
1442}
1443
1444static inline pmd_t pmd_mkclean(pmd_t pmd)
1445{
1446 if (pmd_large(pmd)) {
1447 pmd_val(pmd) &= ~_SEGMENT_ENTRY_DIRTY;
1420 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT; 1448 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1421 } else { 1449 }
1450 return pmd;
1451}
1452
1453static inline pmd_t pmd_mkdirty(pmd_t pmd)
1454{
1455 if (pmd_large(pmd)) {
1456 pmd_val(pmd) |= _SEGMENT_ENTRY_DIRTY;
1457 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1458 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1459 }
1460 return pmd;
1461}
1462
1463static inline pmd_t pmd_mkyoung(pmd_t pmd)
1464{
1465 if (pmd_large(pmd)) {
1422 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG; 1466 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1423 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID; 1467 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1468 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
1424 } 1469 }
1425#endif
1426 return pmd; 1470 return pmd;
1427} 1471}
1428 1472
1429static inline pmd_t pmd_mkold(pmd_t pmd) 1473static inline pmd_t pmd_mkold(pmd_t pmd)
1430{ 1474{
1431#ifdef CONFIG_64BIT 1475 if (pmd_large(pmd)) {
1432 if (pmd_prot_none(pmd)) {
1433 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1434 } else {
1435 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG; 1476 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1436 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; 1477 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1437 } 1478 }
1438#endif
1439 return pmd; 1479 return pmd;
1440} 1480}
1441 1481
1442static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) 1482static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1443{ 1483{
1444 int young; 1484 if (pmd_large(pmd)) {
1445 1485 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN_LARGE |
1446 young = pmd_young(pmd); 1486 _SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_YOUNG |
1447 pmd_val(pmd) &= _SEGMENT_CHG_MASK; 1487 _SEGMENT_ENTRY_LARGE | _SEGMENT_ENTRY_SPLIT;
1488 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1489 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1490 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1491 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1492 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1493 return pmd;
1494 }
1495 pmd_val(pmd) &= _SEGMENT_ENTRY_ORIGIN;
1448 pmd_val(pmd) |= massage_pgprot_pmd(newprot); 1496 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1449 if (young)
1450 pmd = pmd_mkyoung(pmd);
1451 return pmd; 1497 return pmd;
1452} 1498}
1453 1499
@@ -1455,16 +1501,9 @@ static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1455{ 1501{
1456 pmd_t __pmd; 1502 pmd_t __pmd;
1457 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot); 1503 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1458 return pmd_mkyoung(__pmd); 1504 return __pmd;
1459} 1505}
1460 1506
1461static inline pmd_t pmd_mkwrite(pmd_t pmd)
1462{
1463 /* Do not clobber PROT_NONE segments! */
1464 if (!pmd_prot_none(pmd))
1465 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1466 return pmd;
1467}
1468#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */ 1507#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1469 1508
1470static inline void __pmdp_csp(pmd_t *pmdp) 1509static inline void __pmdp_csp(pmd_t *pmdp)
@@ -1555,34 +1594,21 @@ extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1555 1594
1556static inline int pmd_trans_splitting(pmd_t pmd) 1595static inline int pmd_trans_splitting(pmd_t pmd)
1557{ 1596{
1558 return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT; 1597 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) &&
1598 (pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT);
1559} 1599}
1560 1600
1561static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, 1601static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1562 pmd_t *pmdp, pmd_t entry) 1602 pmd_t *pmdp, pmd_t entry)
1563{ 1603{
1564 if (!(pmd_val(entry) & _SEGMENT_ENTRY_INVALID) && MACHINE_HAS_EDAT1)
1565 pmd_val(entry) |= _SEGMENT_ENTRY_CO;
1566 *pmdp = entry; 1604 *pmdp = entry;
1567} 1605}
1568 1606
1569static inline pmd_t pmd_mkhuge(pmd_t pmd) 1607static inline pmd_t pmd_mkhuge(pmd_t pmd)
1570{ 1608{
1571 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE; 1609 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1572 return pmd; 1610 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1573} 1611 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1574
1575static inline pmd_t pmd_wrprotect(pmd_t pmd)
1576{
1577 /* Do not clobber PROT_NONE segments! */
1578 if (!pmd_prot_none(pmd))
1579 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1580 return pmd;
1581}
1582
1583static inline pmd_t pmd_mkdirty(pmd_t pmd)
1584{
1585 /* No dirty bit in the segment table entry. */
1586 return pmd; 1612 return pmd;
1587} 1613}
1588 1614
@@ -1647,11 +1673,6 @@ static inline int has_transparent_hugepage(void)
1647{ 1673{
1648 return MACHINE_HAS_HPAGE ? 1 : 0; 1674 return MACHINE_HAS_HPAGE ? 1 : 0;
1649} 1675}
1650
1651static inline unsigned long pmd_pfn(pmd_t pmd)
1652{
1653 return pmd_val(pmd) >> PAGE_SHIFT;
1654}
1655#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 1676#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1656 1677
1657/* 1678/*
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index d786c634e052..06f3034605a1 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -415,6 +415,10 @@ struct qdio_brinfo_entry_l2 {
415#define QDIO_FLAG_SYNC_OUTPUT 0x02 415#define QDIO_FLAG_SYNC_OUTPUT 0x02
416#define QDIO_FLAG_PCI_OUT 0x10 416#define QDIO_FLAG_PCI_OUT 0x10
417 417
418int qdio_alloc_buffers(struct qdio_buffer **buf, unsigned int count);
419void qdio_free_buffers(struct qdio_buffer **buf, unsigned int count);
420void qdio_reset_buffers(struct qdio_buffer **buf, unsigned int count);
421
418extern int qdio_allocate(struct qdio_initialize *); 422extern int qdio_allocate(struct qdio_initialize *);
419extern int qdio_establish(struct qdio_initialize *); 423extern int qdio_establish(struct qdio_initialize *);
420extern int qdio_activate(struct ccw_device *); 424extern int qdio_activate(struct ccw_device *);
diff --git a/arch/s390/include/asm/syscall.h b/arch/s390/include/asm/syscall.h
index abad78d5b10c..5bc12598ae9e 100644
--- a/arch/s390/include/asm/syscall.h
+++ b/arch/s390/include/asm/syscall.h
@@ -54,7 +54,7 @@ static inline void syscall_set_return_value(struct task_struct *task,
54 struct pt_regs *regs, 54 struct pt_regs *regs,
55 int error, long val) 55 int error, long val)
56{ 56{
57 regs->gprs[2] = error ? -error : val; 57 regs->gprs[2] = error ? error : val;
58} 58}
59 59
60static inline void syscall_get_arguments(struct task_struct *task, 60static inline void syscall_get_arguments(struct task_struct *task,
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 99b0b09646ca..8eb82443cfbd 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -30,6 +30,7 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat);
30EXPORT_PER_CPU_SYMBOL_GPL(irq_stat); 30EXPORT_PER_CPU_SYMBOL_GPL(irq_stat);
31 31
32struct irq_class { 32struct irq_class {
33 int irq;
33 char *name; 34 char *name;
34 char *desc; 35 char *desc;
35}; 36};
@@ -45,9 +46,9 @@ struct irq_class {
45 * up with having a sum which accounts each interrupt twice. 46 * up with having a sum which accounts each interrupt twice.
46 */ 47 */
47static const struct irq_class irqclass_main_desc[NR_IRQS_BASE] = { 48static const struct irq_class irqclass_main_desc[NR_IRQS_BASE] = {
48 [EXT_INTERRUPT] = {.name = "EXT"}, 49 {.irq = EXT_INTERRUPT, .name = "EXT"},
49 [IO_INTERRUPT] = {.name = "I/O"}, 50 {.irq = IO_INTERRUPT, .name = "I/O"},
50 [THIN_INTERRUPT] = {.name = "AIO"}, 51 {.irq = THIN_INTERRUPT, .name = "AIO"},
51}; 52};
52 53
53/* 54/*
@@ -56,38 +57,38 @@ static const struct irq_class irqclass_main_desc[NR_IRQS_BASE] = {
56 * In addition this list contains non external / I/O events like NMIs. 57 * In addition this list contains non external / I/O events like NMIs.
57 */ 58 */
58static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = { 59static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = {
59 [IRQEXT_CLK] = {.name = "CLK", .desc = "[EXT] Clock Comparator"}, 60 {.irq = IRQEXT_CLK, .name = "CLK", .desc = "[EXT] Clock Comparator"},
60 [IRQEXT_EXC] = {.name = "EXC", .desc = "[EXT] External Call"}, 61 {.irq = IRQEXT_EXC, .name = "EXC", .desc = "[EXT] External Call"},
61 [IRQEXT_EMS] = {.name = "EMS", .desc = "[EXT] Emergency Signal"}, 62 {.irq = IRQEXT_EMS, .name = "EMS", .desc = "[EXT] Emergency Signal"},
62 [IRQEXT_TMR] = {.name = "TMR", .desc = "[EXT] CPU Timer"}, 63 {.irq = IRQEXT_TMR, .name = "TMR", .desc = "[EXT] CPU Timer"},
63 [IRQEXT_TLA] = {.name = "TAL", .desc = "[EXT] Timing Alert"}, 64 {.irq = IRQEXT_TLA, .name = "TAL", .desc = "[EXT] Timing Alert"},
64 [IRQEXT_PFL] = {.name = "PFL", .desc = "[EXT] Pseudo Page Fault"}, 65 {.irq = IRQEXT_PFL, .name = "PFL", .desc = "[EXT] Pseudo Page Fault"},
65 [IRQEXT_DSD] = {.name = "DSD", .desc = "[EXT] DASD Diag"}, 66 {.irq = IRQEXT_DSD, .name = "DSD", .desc = "[EXT] DASD Diag"},
66 [IRQEXT_VRT] = {.name = "VRT", .desc = "[EXT] Virtio"}, 67 {.irq = IRQEXT_VRT, .name = "VRT", .desc = "[EXT] Virtio"},
67 [IRQEXT_SCP] = {.name = "SCP", .desc = "[EXT] Service Call"}, 68 {.irq = IRQEXT_SCP, .name = "SCP", .desc = "[EXT] Service Call"},
68 [IRQEXT_IUC] = {.name = "IUC", .desc = "[EXT] IUCV"}, 69 {.irq = IRQEXT_IUC, .name = "IUC", .desc = "[EXT] IUCV"},
69 [IRQEXT_CMS] = {.name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"}, 70 {.irq = IRQEXT_CMS, .name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"},
70 [IRQEXT_CMC] = {.name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"}, 71 {.irq = IRQEXT_CMC, .name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"},
71 [IRQEXT_CMR] = {.name = "CMR", .desc = "[EXT] CPU-Measurement: RI"}, 72 {.irq = IRQEXT_CMR, .name = "CMR", .desc = "[EXT] CPU-Measurement: RI"},
72 [IRQIO_CIO] = {.name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"}, 73 {.irq = IRQIO_CIO, .name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"},
73 [IRQIO_QAI] = {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"}, 74 {.irq = IRQIO_QAI, .name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"},
74 [IRQIO_DAS] = {.name = "DAS", .desc = "[I/O] DASD"}, 75 {.irq = IRQIO_DAS, .name = "DAS", .desc = "[I/O] DASD"},
75 [IRQIO_C15] = {.name = "C15", .desc = "[I/O] 3215"}, 76 {.irq = IRQIO_C15, .name = "C15", .desc = "[I/O] 3215"},
76 [IRQIO_C70] = {.name = "C70", .desc = "[I/O] 3270"}, 77 {.irq = IRQIO_C70, .name = "C70", .desc = "[I/O] 3270"},
77 [IRQIO_TAP] = {.name = "TAP", .desc = "[I/O] Tape"}, 78 {.irq = IRQIO_TAP, .name = "TAP", .desc = "[I/O] Tape"},
78 [IRQIO_VMR] = {.name = "VMR", .desc = "[I/O] Unit Record Devices"}, 79 {.irq = IRQIO_VMR, .name = "VMR", .desc = "[I/O] Unit Record Devices"},
79 [IRQIO_LCS] = {.name = "LCS", .desc = "[I/O] LCS"}, 80 {.irq = IRQIO_LCS, .name = "LCS", .desc = "[I/O] LCS"},
80 [IRQIO_CLW] = {.name = "CLW", .desc = "[I/O] CLAW"}, 81 {.irq = IRQIO_CLW, .name = "CLW", .desc = "[I/O] CLAW"},
81 [IRQIO_CTC] = {.name = "CTC", .desc = "[I/O] CTC"}, 82 {.irq = IRQIO_CTC, .name = "CTC", .desc = "[I/O] CTC"},
82 [IRQIO_APB] = {.name = "APB", .desc = "[I/O] AP Bus"}, 83 {.irq = IRQIO_APB, .name = "APB", .desc = "[I/O] AP Bus"},
83 [IRQIO_ADM] = {.name = "ADM", .desc = "[I/O] EADM Subchannel"}, 84 {.irq = IRQIO_ADM, .name = "ADM", .desc = "[I/O] EADM Subchannel"},
84 [IRQIO_CSC] = {.name = "CSC", .desc = "[I/O] CHSC Subchannel"}, 85 {.irq = IRQIO_CSC, .name = "CSC", .desc = "[I/O] CHSC Subchannel"},
85 [IRQIO_PCI] = {.name = "PCI", .desc = "[I/O] PCI Interrupt" }, 86 {.irq = IRQIO_PCI, .name = "PCI", .desc = "[I/O] PCI Interrupt" },
86 [IRQIO_MSI] = {.name = "MSI", .desc = "[I/O] MSI Interrupt" }, 87 {.irq = IRQIO_MSI, .name = "MSI", .desc = "[I/O] MSI Interrupt" },
87 [IRQIO_VIR] = {.name = "VIR", .desc = "[I/O] Virtual I/O Devices"}, 88 {.irq = IRQIO_VIR, .name = "VIR", .desc = "[I/O] Virtual I/O Devices"},
88 [IRQIO_VAI] = {.name = "VAI", .desc = "[I/O] Virtual I/O Devices AI"}, 89 {.irq = IRQIO_VAI, .name = "VAI", .desc = "[I/O] Virtual I/O Devices AI"},
89 [NMI_NMI] = {.name = "NMI", .desc = "[NMI] Machine Check"}, 90 {.irq = NMI_NMI, .name = "NMI", .desc = "[NMI] Machine Check"},
90 [CPU_RST] = {.name = "RST", .desc = "[CPU] CPU Restart"}, 91 {.irq = CPU_RST, .name = "RST", .desc = "[CPU] CPU Restart"},
91}; 92};
92 93
93void __init init_IRQ(void) 94void __init init_IRQ(void)
@@ -116,33 +117,37 @@ void do_IRQ(struct pt_regs *regs, int irq)
116 */ 117 */
117int show_interrupts(struct seq_file *p, void *v) 118int show_interrupts(struct seq_file *p, void *v)
118{ 119{
119 int irq = *(loff_t *) v; 120 int index = *(loff_t *) v;
120 int cpu; 121 int cpu, irq;
121 122
122 get_online_cpus(); 123 get_online_cpus();
123 if (irq == 0) { 124 if (index == 0) {
124 seq_puts(p, " "); 125 seq_puts(p, " ");
125 for_each_online_cpu(cpu) 126 for_each_online_cpu(cpu)
126 seq_printf(p, "CPU%d ", cpu); 127 seq_printf(p, "CPU%d ", cpu);
127 seq_putc(p, '\n'); 128 seq_putc(p, '\n');
128 goto out; 129 goto out;
129 } 130 }
130 if (irq < NR_IRQS) { 131 if (index < NR_IRQS) {
131 if (irq >= NR_IRQS_BASE) 132 if (index >= NR_IRQS_BASE)
132 goto out; 133 goto out;
133 seq_printf(p, "%s: ", irqclass_main_desc[irq].name); 134 /* Adjust index to process irqclass_main_desc array entries */
135 index--;
136 seq_printf(p, "%s: ", irqclass_main_desc[index].name);
137 irq = irqclass_main_desc[index].irq;
134 for_each_online_cpu(cpu) 138 for_each_online_cpu(cpu)
135 seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu)); 139 seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));
136 seq_putc(p, '\n'); 140 seq_putc(p, '\n');
137 goto out; 141 goto out;
138 } 142 }
139 for (irq = 0; irq < NR_ARCH_IRQS; irq++) { 143 for (index = 0; index < NR_ARCH_IRQS; index++) {
140 seq_printf(p, "%s: ", irqclass_sub_desc[irq].name); 144 seq_printf(p, "%s: ", irqclass_sub_desc[index].name);
145 irq = irqclass_sub_desc[index].irq;
141 for_each_online_cpu(cpu) 146 for_each_online_cpu(cpu)
142 seq_printf(p, "%10u ", 147 seq_printf(p, "%10u ",
143 per_cpu(irq_stat, cpu).irqs[irq]); 148 per_cpu(irq_stat, cpu).irqs[irq]);
144 if (irqclass_sub_desc[irq].desc) 149 if (irqclass_sub_desc[index].desc)
145 seq_printf(p, " %s", irqclass_sub_desc[irq].desc); 150 seq_printf(p, " %s", irqclass_sub_desc[index].desc);
146 seq_putc(p, '\n'); 151 seq_putc(p, '\n');
147 } 152 }
148out: 153out:
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 1e2264b46e4c..ae1d5be7dd88 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -501,6 +501,8 @@ static int kdump_mem_notifier(struct notifier_block *nb,
501{ 501{
502 struct memory_notify *arg = data; 502 struct memory_notify *arg = data;
503 503
504 if (action != MEM_GOING_OFFLINE)
505 return NOTIFY_OK;
504 if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res))) 506 if (arg->start_pfn < PFN_DOWN(resource_size(&crashk_res)))
505 return NOTIFY_BAD; 507 return NOTIFY_BAD;
506 if (arg->start_pfn > PFN_DOWN(crashk_res.end)) 508 if (arg->start_pfn > PFN_DOWN(crashk_res.end))
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 0ff66a7e29bb..389bc17934b7 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -10,42 +10,33 @@
10 10
11static inline pmd_t __pte_to_pmd(pte_t pte) 11static inline pmd_t __pte_to_pmd(pte_t pte)
12{ 12{
13 int none, young, prot;
14 pmd_t pmd; 13 pmd_t pmd;
15 14
16 /* 15 /*
17 * Convert encoding pte bits pmd bits 16 * Convert encoding pte bits pmd bits
18 * .IR...wrdytp ..R...I...y. 17 * .IR...wrdytp dy..R...I...wr
19 * empty .10...000000 -> ..0...1...0. 18 * empty .10...000000 -> 00..0...1...00
20 * prot-none, clean, old .11...000001 -> ..0...1...1. 19 * prot-none, clean, old .11...000001 -> 00..1...1...00
21 * prot-none, clean, young .11...000101 -> ..1...1...1. 20 * prot-none, clean, young .11...000101 -> 01..1...1...00
22 * prot-none, dirty, old .10...001001 -> ..0...1...1. 21 * prot-none, dirty, old .10...001001 -> 10..1...1...00
23 * prot-none, dirty, young .10...001101 -> ..1...1...1. 22 * prot-none, dirty, young .10...001101 -> 11..1...1...00
24 * read-only, clean, old .11...010001 -> ..1...1...0. 23 * read-only, clean, old .11...010001 -> 00..1...1...01
25 * read-only, clean, young .01...010101 -> ..1...0...1. 24 * read-only, clean, young .01...010101 -> 01..1...0...01
26 * read-only, dirty, old .11...011001 -> ..1...1...0. 25 * read-only, dirty, old .11...011001 -> 10..1...1...01
27 * read-only, dirty, young .01...011101 -> ..1...0...1. 26 * read-only, dirty, young .01...011101 -> 11..1...0...01
28 * read-write, clean, old .11...110001 -> ..0...1...0. 27 * read-write, clean, old .11...110001 -> 00..0...1...11
29 * read-write, clean, young .01...110101 -> ..0...0...1. 28 * read-write, clean, young .01...110101 -> 01..0...0...11
30 * read-write, dirty, old .10...111001 -> ..0...1...0. 29 * read-write, dirty, old .10...111001 -> 10..0...1...11
31 * read-write, dirty, young .00...111101 -> ..0...0...1. 30 * read-write, dirty, young .00...111101 -> 11..0...0...11
32 * Huge ptes are dirty by definition, a clean pte is made dirty
33 * by the conversion.
34 */ 31 */
35 if (pte_present(pte)) { 32 if (pte_present(pte)) {
36 pmd_val(pmd) = pte_val(pte) & PAGE_MASK; 33 pmd_val(pmd) = pte_val(pte) & PAGE_MASK;
37 if (pte_val(pte) & _PAGE_INVALID) 34 pmd_val(pmd) |= (pte_val(pte) & _PAGE_READ) >> 4;
38 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID; 35 pmd_val(pmd) |= (pte_val(pte) & _PAGE_WRITE) >> 4;
39 none = (pte_val(pte) & _PAGE_PRESENT) && 36 pmd_val(pmd) |= (pte_val(pte) & _PAGE_INVALID) >> 5;
40 !(pte_val(pte) & _PAGE_READ) && 37 pmd_val(pmd) |= (pte_val(pte) & _PAGE_PROTECT);
41 !(pte_val(pte) & _PAGE_WRITE); 38 pmd_val(pmd) |= (pte_val(pte) & _PAGE_DIRTY) << 10;
42 prot = (pte_val(pte) & _PAGE_PROTECT) && 39 pmd_val(pmd) |= (pte_val(pte) & _PAGE_YOUNG) << 10;
43 !(pte_val(pte) & _PAGE_WRITE);
44 young = pte_val(pte) & _PAGE_YOUNG;
45 if (none || young)
46 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
47 if (prot || (none && young))
48 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
49 } else 40 } else
50 pmd_val(pmd) = _SEGMENT_ENTRY_INVALID; 41 pmd_val(pmd) = _SEGMENT_ENTRY_INVALID;
51 return pmd; 42 return pmd;
@@ -56,34 +47,31 @@ static inline pte_t __pmd_to_pte(pmd_t pmd)
56 pte_t pte; 47 pte_t pte;
57 48
58 /* 49 /*
59 * Convert encoding pmd bits pte bits 50 * Convert encoding pmd bits pte bits
60 * ..R...I...y. .IR...wrdytp 51 * dy..R...I...wr .IR...wrdytp
61 * empty ..0...1...0. -> .10...000000 52 * empty 00..0...1...00 -> .10...001100
62 * prot-none, old ..0...1...1. -> .10...001001 53 * prot-none, clean, old 00..0...1...00 -> .10...000001
63 * prot-none, young ..1...1...1. -> .10...001101 54 * prot-none, clean, young 01..0...1...00 -> .10...000101
64 * read-only, old ..1...1...0. -> .11...011001 55 * prot-none, dirty, old 10..0...1...00 -> .10...001001
65 * read-only, young ..1...0...1. -> .01...011101 56 * prot-none, dirty, young 11..0...1...00 -> .10...001101
66 * read-write, old ..0...1...0. -> .10...111001 57 * read-only, clean, old 00..1...1...01 -> .11...010001
67 * read-write, young ..0...0...1. -> .00...111101 58 * read-only, clean, young 01..1...1...01 -> .11...010101
68 * Huge ptes are dirty by definition 59 * read-only, dirty, old 10..1...1...01 -> .11...011001
60 * read-only, dirty, young 11..1...1...01 -> .11...011101
61 * read-write, clean, old 00..0...1...11 -> .10...110001
62 * read-write, clean, young 01..0...1...11 -> .10...110101
63 * read-write, dirty, old 10..0...1...11 -> .10...111001
64 * read-write, dirty, young 11..0...1...11 -> .10...111101
69 */ 65 */
70 if (pmd_present(pmd)) { 66 if (pmd_present(pmd)) {
71 pte_val(pte) = _PAGE_PRESENT | _PAGE_LARGE | _PAGE_DIRTY | 67 pte_val(pte) = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN_LARGE;
72 (pmd_val(pmd) & PAGE_MASK); 68 pte_val(pte) |= _PAGE_LARGE | _PAGE_PRESENT;
73 if (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) 69 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_READ) << 4;
74 pte_val(pte) |= _PAGE_INVALID; 70 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) << 4;
75 if (pmd_prot_none(pmd)) { 71 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) << 5;
76 if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) 72 pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT);
77 pte_val(pte) |= _PAGE_YOUNG; 73 pmd_val(pmd) |= (pte_val(pte) & _PAGE_DIRTY) << 10;
78 } else { 74 pmd_val(pmd) |= (pte_val(pte) & _PAGE_YOUNG) << 10;
79 pte_val(pte) |= _PAGE_READ;
80 if (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT)
81 pte_val(pte) |= _PAGE_PROTECT;
82 else
83 pte_val(pte) |= _PAGE_WRITE;
84 if (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG)
85 pte_val(pte) |= _PAGE_YOUNG;
86 }
87 } else 75 } else
88 pte_val(pte) = _PAGE_INVALID; 76 pte_val(pte) = _PAGE_INVALID;
89 return pte; 77 return pte;
@@ -96,6 +84,7 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
96 84
97 pmd = __pte_to_pmd(pte); 85 pmd = __pte_to_pmd(pte);
98 if (!MACHINE_HAS_HPAGE) { 86 if (!MACHINE_HAS_HPAGE) {
87 /* Emulated huge ptes loose the dirty and young bit */
99 pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN; 88 pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
100 pmd_val(pmd) |= pte_page(pte)[1].index; 89 pmd_val(pmd) |= pte_page(pte)[1].index;
101 } else 90 } else
@@ -113,6 +102,8 @@ pte_t huge_ptep_get(pte_t *ptep)
113 origin = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN; 102 origin = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN;
114 pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN; 103 pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
115 pmd_val(pmd) |= *(unsigned long *) origin; 104 pmd_val(pmd) |= *(unsigned long *) origin;
105 /* Emulated huge ptes are young and dirty by definition */
106 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG | _SEGMENT_ENTRY_DIRTY;
116 } 107 }
117 return __pmd_to_pte(pmd); 108 return __pmd_to_pte(pmd);
118} 109}
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 37b8241ec784..19daa53a3da4 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -1279,6 +1279,7 @@ static unsigned long page_table_realloc_pmd(struct mmu_gather *tlb,
1279{ 1279{
1280 unsigned long next, *table, *new; 1280 unsigned long next, *table, *new;
1281 struct page *page; 1281 struct page *page;
1282 spinlock_t *ptl;
1282 pmd_t *pmd; 1283 pmd_t *pmd;
1283 1284
1284 pmd = pmd_offset(pud, addr); 1285 pmd = pmd_offset(pud, addr);
@@ -1296,7 +1297,7 @@ again:
1296 if (!new) 1297 if (!new)
1297 return -ENOMEM; 1298 return -ENOMEM;
1298 1299
1299 spin_lock(&mm->page_table_lock); 1300 ptl = pmd_lock(mm, pmd);
1300 if (likely((unsigned long *) pmd_deref(*pmd) == table)) { 1301 if (likely((unsigned long *) pmd_deref(*pmd) == table)) {
1301 /* Nuke pmd entry pointing to the "short" page table */ 1302 /* Nuke pmd entry pointing to the "short" page table */
1302 pmdp_flush_lazy(mm, addr, pmd); 1303 pmdp_flush_lazy(mm, addr, pmd);
@@ -1310,7 +1311,7 @@ again:
1310 page_table_free_rcu(tlb, table); 1311 page_table_free_rcu(tlb, table);
1311 new = NULL; 1312 new = NULL;
1312 } 1313 }
1313 spin_unlock(&mm->page_table_lock); 1314 spin_unlock(ptl);
1314 if (new) { 1315 if (new) {
1315 page_table_free_pgste(new); 1316 page_table_free_pgste(new);
1316 goto again; 1317 goto again;
@@ -1432,6 +1433,9 @@ int pmdp_set_access_flags(struct vm_area_struct *vma,
1432{ 1433{
1433 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 1434 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1434 1435
1436 entry = pmd_mkyoung(entry);
1437 if (dirty)
1438 entry = pmd_mkdirty(entry);
1435 if (pmd_same(*pmdp, entry)) 1439 if (pmd_same(*pmdp, entry))
1436 return 0; 1440 return 0;
1437 pmdp_invalidate(vma, address, pmdp); 1441 pmdp_invalidate(vma, address, pmdp);
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index 30de42730b2f..2fa7b14b9c08 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -15,8 +15,8 @@
15 * Thomas Klein 15 * Thomas Klein
16 */ 16 */
17 17
18#define COMPONENT "zPCI" 18#define KMSG_COMPONENT "zpci"
19#define pr_fmt(fmt) COMPONENT ": " fmt 19#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
20 20
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 96545d7659fd..6e22a247de9b 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -5,8 +5,8 @@
5 * Jan Glauber <jang@linux.vnet.ibm.com> 5 * Jan Glauber <jang@linux.vnet.ibm.com>
6 */ 6 */
7 7
8#define COMPONENT "zPCI" 8#define KMSG_COMPONENT "zpci"
9#define pr_fmt(fmt) COMPONENT ": " fmt 9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10 10
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c
index c5c66840ac00..eec598c5939f 100644
--- a/arch/s390/pci/pci_debug.c
+++ b/arch/s390/pci/pci_debug.c
@@ -5,8 +5,8 @@
5 * Jan Glauber <jang@linux.vnet.ibm.com> 5 * Jan Glauber <jang@linux.vnet.ibm.com>
6 */ 6 */
7 7
8#define COMPONENT "zPCI" 8#define KMSG_COMPONENT "zpci"
9#define pr_fmt(fmt) COMPONENT ": " fmt 9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10 10
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/seq_file.h> 12#include <linux/seq_file.h>
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index f91c03119804..4cbb29a4d615 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -16,6 +16,13 @@
16 16
17static struct kmem_cache *dma_region_table_cache; 17static struct kmem_cache *dma_region_table_cache;
18static struct kmem_cache *dma_page_table_cache; 18static struct kmem_cache *dma_page_table_cache;
19static int s390_iommu_strict;
20
21static int zpci_refresh_global(struct zpci_dev *zdev)
22{
23 return zpci_refresh_trans((u64) zdev->fh << 32, zdev->start_dma,
24 zdev->iommu_pages * PAGE_SIZE);
25}
19 26
20static unsigned long *dma_alloc_cpu_table(void) 27static unsigned long *dma_alloc_cpu_table(void)
21{ 28{
@@ -155,18 +162,15 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
155 } 162 }
156 163
157 /* 164 /*
158 * rpcit is not required to establish new translations when previously 165 * With zdev->tlb_refresh == 0, rpcit is not required to establish new
159 * invalid translation-table entries are validated, however it is 166 * translations when previously invalid translation-table entries are
160 * required when altering previously valid entries. 167 * validated. With lazy unmap, it also is skipped for previously valid
168 * entries, but a global rpcit is then required before any address can
169 * be re-used, i.e. after each iommu bitmap wrap-around.
161 */ 170 */
162 if (!zdev->tlb_refresh && 171 if (!zdev->tlb_refresh &&
163 ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) 172 (!s390_iommu_strict ||
164 /* 173 ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)))
165 * TODO: also need to check that the old entry is indeed INVALID
166 * and not only for one page but for the whole range...
167 * -> now we WARN_ON in that case but with lazy unmap that
168 * needs to be redone!
169 */
170 goto no_refresh; 174 goto no_refresh;
171 175
172 rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr, 176 rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
@@ -220,16 +224,21 @@ static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev,
220static unsigned long dma_alloc_iommu(struct zpci_dev *zdev, int size) 224static unsigned long dma_alloc_iommu(struct zpci_dev *zdev, int size)
221{ 225{
222 unsigned long offset, flags; 226 unsigned long offset, flags;
227 int wrap = 0;
223 228
224 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags); 229 spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
225 offset = __dma_alloc_iommu(zdev, zdev->next_bit, size); 230 offset = __dma_alloc_iommu(zdev, zdev->next_bit, size);
226 if (offset == -1) 231 if (offset == -1) {
232 /* wrap-around */
227 offset = __dma_alloc_iommu(zdev, 0, size); 233 offset = __dma_alloc_iommu(zdev, 0, size);
234 wrap = 1;
235 }
228 236
229 if (offset != -1) { 237 if (offset != -1) {
230 zdev->next_bit = offset + size; 238 zdev->next_bit = offset + size;
231 if (zdev->next_bit >= zdev->iommu_pages) 239 if (!zdev->tlb_refresh && !s390_iommu_strict && wrap)
232 zdev->next_bit = 0; 240 /* global flush after wrap-around with lazy unmap */
241 zpci_refresh_global(zdev);
233 } 242 }
234 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags); 243 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
235 return offset; 244 return offset;
@@ -243,7 +252,11 @@ static void dma_free_iommu(struct zpci_dev *zdev, unsigned long offset, int size
243 if (!zdev->iommu_bitmap) 252 if (!zdev->iommu_bitmap)
244 goto out; 253 goto out;
245 bitmap_clear(zdev->iommu_bitmap, offset, size); 254 bitmap_clear(zdev->iommu_bitmap, offset, size);
246 if (offset >= zdev->next_bit) 255 /*
256 * Lazy flush for unmap: need to move next_bit to avoid address re-use
257 * until wrap-around.
258 */
259 if (!s390_iommu_strict && offset >= zdev->next_bit)
247 zdev->next_bit = offset + size; 260 zdev->next_bit = offset + size;
248out: 261out:
249 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags); 262 spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
@@ -504,3 +517,12 @@ struct dma_map_ops s390_dma_ops = {
504 /* dma_supported is unconditionally true without a callback */ 517 /* dma_supported is unconditionally true without a callback */
505}; 518};
506EXPORT_SYMBOL_GPL(s390_dma_ops); 519EXPORT_SYMBOL_GPL(s390_dma_ops);
520
521static int __init s390_iommu_setup(char *str)
522{
523 if (!strncmp(str, "strict", 6))
524 s390_iommu_strict = 1;
525 return 0;
526}
527
528__setup("s390_iommu=", s390_iommu_setup);
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index 6d7f5a3016ca..460fdb21cf61 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -5,8 +5,8 @@
5 * Jan Glauber <jang@linux.vnet.ibm.com> 5 * Jan Glauber <jang@linux.vnet.ibm.com>
6 */ 6 */
7 7
8#define COMPONENT "zPCI" 8#define KMSG_COMPONENT "zpci"
9#define pr_fmt(fmt) COMPONENT ": " fmt 9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10 10
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/pci.h> 12#include <linux/pci.h>
diff --git a/arch/s390/pci/pci_sysfs.c b/arch/s390/pci/pci_sysfs.c
index 9190214b8702..fa3ce891e597 100644
--- a/arch/s390/pci/pci_sysfs.c
+++ b/arch/s390/pci/pci_sysfs.c
@@ -5,8 +5,8 @@
5 * Jan Glauber <jang@linux.vnet.ibm.com> 5 * Jan Glauber <jang@linux.vnet.ibm.com>
6 */ 6 */
7 7
8#define COMPONENT "zPCI" 8#define KMSG_COMPONENT "zpci"
9#define pr_fmt(fmt) COMPONENT ": " fmt 9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10 10
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/stat.h> 12#include <linux/stat.h>
diff --git a/drivers/pci/hotplug/s390_pci_hpc.c b/drivers/pci/hotplug/s390_pci_hpc.c
index d1332d2f8730..d77e46bca54c 100644
--- a/drivers/pci/hotplug/s390_pci_hpc.c
+++ b/drivers/pci/hotplug/s390_pci_hpc.c
@@ -7,8 +7,8 @@
7 * Jan Glauber <jang@linux.vnet.ibm.com> 7 * Jan Glauber <jang@linux.vnet.ibm.com>
8 */ 8 */
9 9
10#define COMPONENT "zPCI hpc" 10#define KMSG_COMPONENT "zpci"
11#define pr_fmt(fmt) COMPONENT ": " fmt 11#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12 12
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 1eef0f586950..5df05f26b7d9 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -42,8 +42,10 @@
42 * SECTION: exported variables of dasd.c 42 * SECTION: exported variables of dasd.c
43 */ 43 */
44debug_info_t *dasd_debug_area; 44debug_info_t *dasd_debug_area;
45EXPORT_SYMBOL(dasd_debug_area);
45static struct dentry *dasd_debugfs_root_entry; 46static struct dentry *dasd_debugfs_root_entry;
46struct dasd_discipline *dasd_diag_discipline_pointer; 47struct dasd_discipline *dasd_diag_discipline_pointer;
48EXPORT_SYMBOL(dasd_diag_discipline_pointer);
47void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); 49void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
48 50
49MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>"); 51MODULE_AUTHOR("Holger Smolinski <Holger.Smolinski@de.ibm.com>");
@@ -164,6 +166,7 @@ struct dasd_block *dasd_alloc_block(void)
164 166
165 return block; 167 return block;
166} 168}
169EXPORT_SYMBOL_GPL(dasd_alloc_block);
167 170
168/* 171/*
169 * Free memory of a device structure. 172 * Free memory of a device structure.
@@ -172,6 +175,7 @@ void dasd_free_block(struct dasd_block *block)
172{ 175{
173 kfree(block); 176 kfree(block);
174} 177}
178EXPORT_SYMBOL_GPL(dasd_free_block);
175 179
176/* 180/*
177 * Make a new device known to the system. 181 * Make a new device known to the system.
@@ -281,10 +285,15 @@ static int dasd_state_basic_to_known(struct dasd_device *device)
281{ 285{
282 int rc; 286 int rc;
283 287
288 if (device->discipline->basic_to_known) {
289 rc = device->discipline->basic_to_known(device);
290 if (rc)
291 return rc;
292 }
293
284 if (device->block) { 294 if (device->block) {
285 dasd_profile_exit(&device->block->profile); 295 dasd_profile_exit(&device->block->profile);
286 if (device->block->debugfs_dentry) 296 debugfs_remove(device->block->debugfs_dentry);
287 debugfs_remove(device->block->debugfs_dentry);
288 dasd_gendisk_free(device->block); 297 dasd_gendisk_free(device->block);
289 dasd_block_clear_timer(device->block); 298 dasd_block_clear_timer(device->block);
290 } 299 }
@@ -293,9 +302,7 @@ static int dasd_state_basic_to_known(struct dasd_device *device)
293 return rc; 302 return rc;
294 dasd_device_clear_timer(device); 303 dasd_device_clear_timer(device);
295 dasd_profile_exit(&device->profile); 304 dasd_profile_exit(&device->profile);
296 if (device->debugfs_dentry) 305 debugfs_remove(device->debugfs_dentry);
297 debugfs_remove(device->debugfs_dentry);
298
299 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 306 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
300 if (device->debug_area != NULL) { 307 if (device->debug_area != NULL) {
301 debug_unregister(device->debug_area); 308 debug_unregister(device->debug_area);
@@ -374,11 +381,6 @@ static int dasd_state_ready_to_basic(struct dasd_device *device)
374{ 381{
375 int rc; 382 int rc;
376 383
377 if (device->discipline->ready_to_basic) {
378 rc = device->discipline->ready_to_basic(device);
379 if (rc)
380 return rc;
381 }
382 device->state = DASD_STATE_BASIC; 384 device->state = DASD_STATE_BASIC;
383 if (device->block) { 385 if (device->block) {
384 struct dasd_block *block = device->block; 386 struct dasd_block *block = device->block;
@@ -579,6 +581,7 @@ void dasd_kick_device(struct dasd_device *device)
579 /* queue call to dasd_kick_device to the kernel event daemon. */ 581 /* queue call to dasd_kick_device to the kernel event daemon. */
580 schedule_work(&device->kick_work); 582 schedule_work(&device->kick_work);
581} 583}
584EXPORT_SYMBOL(dasd_kick_device);
582 585
583/* 586/*
584 * dasd_reload_device will schedule a call do do_reload_device to the kernel 587 * dasd_reload_device will schedule a call do do_reload_device to the kernel
@@ -639,6 +642,7 @@ void dasd_set_target_state(struct dasd_device *device, int target)
639 mutex_unlock(&device->state_mutex); 642 mutex_unlock(&device->state_mutex);
640 dasd_put_device(device); 643 dasd_put_device(device);
641} 644}
645EXPORT_SYMBOL(dasd_set_target_state);
642 646
643/* 647/*
644 * Enable devices with device numbers in [from..to]. 648 * Enable devices with device numbers in [from..to].
@@ -661,6 +665,7 @@ void dasd_enable_device(struct dasd_device *device)
661 if (device->discipline->kick_validate) 665 if (device->discipline->kick_validate)
662 device->discipline->kick_validate(device); 666 device->discipline->kick_validate(device);
663} 667}
668EXPORT_SYMBOL(dasd_enable_device);
664 669
665/* 670/*
666 * SECTION: device operation (interrupt handler, start i/o, term i/o ...) 671 * SECTION: device operation (interrupt handler, start i/o, term i/o ...)
@@ -972,37 +977,37 @@ static void dasd_stats_seq_print(struct seq_file *m,
972 seq_printf(m, "total_sectors %u\n", data->dasd_io_sects); 977 seq_printf(m, "total_sectors %u\n", data->dasd_io_sects);
973 seq_printf(m, "total_pav %u\n", data->dasd_io_alias); 978 seq_printf(m, "total_pav %u\n", data->dasd_io_alias);
974 seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm); 979 seq_printf(m, "total_hpf %u\n", data->dasd_io_tpm);
975 seq_printf(m, "histogram_sectors "); 980 seq_puts(m, "histogram_sectors ");
976 dasd_stats_array(m, data->dasd_io_secs); 981 dasd_stats_array(m, data->dasd_io_secs);
977 seq_printf(m, "histogram_io_times "); 982 seq_puts(m, "histogram_io_times ");
978 dasd_stats_array(m, data->dasd_io_times); 983 dasd_stats_array(m, data->dasd_io_times);
979 seq_printf(m, "histogram_io_times_weighted "); 984 seq_puts(m, "histogram_io_times_weighted ");
980 dasd_stats_array(m, data->dasd_io_timps); 985 dasd_stats_array(m, data->dasd_io_timps);
981 seq_printf(m, "histogram_time_build_to_ssch "); 986 seq_puts(m, "histogram_time_build_to_ssch ");
982 dasd_stats_array(m, data->dasd_io_time1); 987 dasd_stats_array(m, data->dasd_io_time1);
983 seq_printf(m, "histogram_time_ssch_to_irq "); 988 seq_puts(m, "histogram_time_ssch_to_irq ");
984 dasd_stats_array(m, data->dasd_io_time2); 989 dasd_stats_array(m, data->dasd_io_time2);
985 seq_printf(m, "histogram_time_ssch_to_irq_weighted "); 990 seq_puts(m, "histogram_time_ssch_to_irq_weighted ");
986 dasd_stats_array(m, data->dasd_io_time2ps); 991 dasd_stats_array(m, data->dasd_io_time2ps);
987 seq_printf(m, "histogram_time_irq_to_end "); 992 seq_puts(m, "histogram_time_irq_to_end ");
988 dasd_stats_array(m, data->dasd_io_time3); 993 dasd_stats_array(m, data->dasd_io_time3);
989 seq_printf(m, "histogram_ccw_queue_length "); 994 seq_puts(m, "histogram_ccw_queue_length ");
990 dasd_stats_array(m, data->dasd_io_nr_req); 995 dasd_stats_array(m, data->dasd_io_nr_req);
991 seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs); 996 seq_printf(m, "total_read_requests %u\n", data->dasd_read_reqs);
992 seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects); 997 seq_printf(m, "total_read_sectors %u\n", data->dasd_read_sects);
993 seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias); 998 seq_printf(m, "total_read_pav %u\n", data->dasd_read_alias);
994 seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm); 999 seq_printf(m, "total_read_hpf %u\n", data->dasd_read_tpm);
995 seq_printf(m, "histogram_read_sectors "); 1000 seq_puts(m, "histogram_read_sectors ");
996 dasd_stats_array(m, data->dasd_read_secs); 1001 dasd_stats_array(m, data->dasd_read_secs);
997 seq_printf(m, "histogram_read_times "); 1002 seq_puts(m, "histogram_read_times ");
998 dasd_stats_array(m, data->dasd_read_times); 1003 dasd_stats_array(m, data->dasd_read_times);
999 seq_printf(m, "histogram_read_time_build_to_ssch "); 1004 seq_puts(m, "histogram_read_time_build_to_ssch ");
1000 dasd_stats_array(m, data->dasd_read_time1); 1005 dasd_stats_array(m, data->dasd_read_time1);
1001 seq_printf(m, "histogram_read_time_ssch_to_irq "); 1006 seq_puts(m, "histogram_read_time_ssch_to_irq ");
1002 dasd_stats_array(m, data->dasd_read_time2); 1007 dasd_stats_array(m, data->dasd_read_time2);
1003 seq_printf(m, "histogram_read_time_irq_to_end "); 1008 seq_puts(m, "histogram_read_time_irq_to_end ");
1004 dasd_stats_array(m, data->dasd_read_time3); 1009 dasd_stats_array(m, data->dasd_read_time3);
1005 seq_printf(m, "histogram_read_ccw_queue_length "); 1010 seq_puts(m, "histogram_read_ccw_queue_length ");
1006 dasd_stats_array(m, data->dasd_read_nr_req); 1011 dasd_stats_array(m, data->dasd_read_nr_req);
1007} 1012}
1008 1013
@@ -1016,7 +1021,7 @@ static int dasd_stats_show(struct seq_file *m, void *v)
1016 data = profile->data; 1021 data = profile->data;
1017 if (!data) { 1022 if (!data) {
1018 spin_unlock_bh(&profile->lock); 1023 spin_unlock_bh(&profile->lock);
1019 seq_printf(m, "disabled\n"); 1024 seq_puts(m, "disabled\n");
1020 return 0; 1025 return 0;
1021 } 1026 }
1022 dasd_stats_seq_print(m, data); 1027 dasd_stats_seq_print(m, data);
@@ -1069,7 +1074,7 @@ static ssize_t dasd_stats_global_write(struct file *file,
1069static int dasd_stats_global_show(struct seq_file *m, void *v) 1074static int dasd_stats_global_show(struct seq_file *m, void *v)
1070{ 1075{
1071 if (!dasd_global_profile_level) { 1076 if (!dasd_global_profile_level) {
1072 seq_printf(m, "disabled\n"); 1077 seq_puts(m, "disabled\n");
1073 return 0; 1078 return 0;
1074 } 1079 }
1075 dasd_stats_seq_print(m, &dasd_global_profile_data); 1080 dasd_stats_seq_print(m, &dasd_global_profile_data);
@@ -1111,23 +1116,17 @@ static void dasd_profile_init(struct dasd_profile *profile,
1111static void dasd_profile_exit(struct dasd_profile *profile) 1116static void dasd_profile_exit(struct dasd_profile *profile)
1112{ 1117{
1113 dasd_profile_off(profile); 1118 dasd_profile_off(profile);
1114 if (profile->dentry) { 1119 debugfs_remove(profile->dentry);
1115 debugfs_remove(profile->dentry); 1120 profile->dentry = NULL;
1116 profile->dentry = NULL;
1117 }
1118} 1121}
1119 1122
1120static void dasd_statistics_removeroot(void) 1123static void dasd_statistics_removeroot(void)
1121{ 1124{
1122 dasd_global_profile_level = DASD_PROFILE_OFF; 1125 dasd_global_profile_level = DASD_PROFILE_OFF;
1123 if (dasd_global_profile_dentry) { 1126 debugfs_remove(dasd_global_profile_dentry);
1124 debugfs_remove(dasd_global_profile_dentry); 1127 dasd_global_profile_dentry = NULL;
1125 dasd_global_profile_dentry = NULL; 1128 debugfs_remove(dasd_debugfs_global_entry);
1126 } 1129 debugfs_remove(dasd_debugfs_root_entry);
1127 if (dasd_debugfs_global_entry)
1128 debugfs_remove(dasd_debugfs_global_entry);
1129 if (dasd_debugfs_root_entry)
1130 debugfs_remove(dasd_debugfs_root_entry);
1131} 1130}
1132 1131
1133static void dasd_statistics_createroot(void) 1132static void dasd_statistics_createroot(void)
@@ -1178,7 +1177,7 @@ static void dasd_statistics_removeroot(void)
1178 1177
1179int dasd_stats_generic_show(struct seq_file *m, void *v) 1178int dasd_stats_generic_show(struct seq_file *m, void *v)
1180{ 1179{
1181 seq_printf(m, "Statistics are not activated in this kernel\n"); 1180 seq_puts(m, "Statistics are not activated in this kernel\n");
1182 return 0; 1181 return 0;
1183} 1182}
1184 1183
@@ -1243,6 +1242,7 @@ struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength,
1243 dasd_get_device(device); 1242 dasd_get_device(device);
1244 return cqr; 1243 return cqr;
1245} 1244}
1245EXPORT_SYMBOL(dasd_kmalloc_request);
1246 1246
1247struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, 1247struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
1248 int datasize, 1248 int datasize,
@@ -1282,6 +1282,7 @@ struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
1282 dasd_get_device(device); 1282 dasd_get_device(device);
1283 return cqr; 1283 return cqr;
1284} 1284}
1285EXPORT_SYMBOL(dasd_smalloc_request);
1285 1286
1286/* 1287/*
1287 * Free memory of a channel program. This function needs to free all the 1288 * Free memory of a channel program. This function needs to free all the
@@ -1304,6 +1305,7 @@ void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
1304 kfree(cqr); 1305 kfree(cqr);
1305 dasd_put_device(device); 1306 dasd_put_device(device);
1306} 1307}
1308EXPORT_SYMBOL(dasd_kfree_request);
1307 1309
1308void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device) 1310void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
1309{ 1311{
@@ -1314,6 +1316,7 @@ void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
1314 spin_unlock_irqrestore(&device->mem_lock, flags); 1316 spin_unlock_irqrestore(&device->mem_lock, flags);
1315 dasd_put_device(device); 1317 dasd_put_device(device);
1316} 1318}
1319EXPORT_SYMBOL(dasd_sfree_request);
1317 1320
1318/* 1321/*
1319 * Check discipline magic in cqr. 1322 * Check discipline magic in cqr.
@@ -1391,6 +1394,7 @@ int dasd_term_IO(struct dasd_ccw_req *cqr)
1391 dasd_schedule_device_bh(device); 1394 dasd_schedule_device_bh(device);
1392 return rc; 1395 return rc;
1393} 1396}
1397EXPORT_SYMBOL(dasd_term_IO);
1394 1398
1395/* 1399/*
1396 * Start the i/o. This start_IO can fail if the channel is really busy. 1400 * Start the i/o. This start_IO can fail if the channel is really busy.
@@ -1509,6 +1513,7 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
1509 cqr->intrc = rc; 1513 cqr->intrc = rc;
1510 return rc; 1514 return rc;
1511} 1515}
1516EXPORT_SYMBOL(dasd_start_IO);
1512 1517
1513/* 1518/*
1514 * Timeout function for dasd devices. This is used for different purposes 1519 * Timeout function for dasd devices. This is used for different purposes
@@ -1541,6 +1546,7 @@ void dasd_device_set_timer(struct dasd_device *device, int expires)
1541 else 1546 else
1542 mod_timer(&device->timer, jiffies + expires); 1547 mod_timer(&device->timer, jiffies + expires);
1543} 1548}
1549EXPORT_SYMBOL(dasd_device_set_timer);
1544 1550
1545/* 1551/*
1546 * Clear timeout for a device. 1552 * Clear timeout for a device.
@@ -1549,6 +1555,7 @@ void dasd_device_clear_timer(struct dasd_device *device)
1549{ 1555{
1550 del_timer(&device->timer); 1556 del_timer(&device->timer);
1551} 1557}
1558EXPORT_SYMBOL(dasd_device_clear_timer);
1552 1559
1553static void dasd_handle_killed_request(struct ccw_device *cdev, 1560static void dasd_handle_killed_request(struct ccw_device *cdev,
1554 unsigned long intparm) 1561 unsigned long intparm)
@@ -1601,6 +1608,7 @@ void dasd_generic_handle_state_change(struct dasd_device *device)
1601 if (device->block) 1608 if (device->block)
1602 dasd_schedule_block_bh(device->block); 1609 dasd_schedule_block_bh(device->block);
1603} 1610}
1611EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
1604 1612
1605/* 1613/*
1606 * Interrupt handler for "normal" ssch-io based dasd devices. 1614 * Interrupt handler for "normal" ssch-io based dasd devices.
@@ -1667,8 +1675,11 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1667 if (cqr->status == DASD_CQR_CLEAR_PENDING && 1675 if (cqr->status == DASD_CQR_CLEAR_PENDING &&
1668 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) { 1676 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
1669 cqr->status = DASD_CQR_CLEARED; 1677 cqr->status = DASD_CQR_CLEARED;
1678 if (cqr->callback_data == DASD_SLEEPON_START_TAG)
1679 cqr->callback_data = DASD_SLEEPON_END_TAG;
1670 dasd_device_clear_timer(device); 1680 dasd_device_clear_timer(device);
1671 wake_up(&dasd_flush_wq); 1681 wake_up(&dasd_flush_wq);
1682 wake_up(&generic_waitq);
1672 dasd_schedule_device_bh(device); 1683 dasd_schedule_device_bh(device);
1673 return; 1684 return;
1674 } 1685 }
@@ -1722,6 +1733,7 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1722 dasd_device_clear_timer(device); 1733 dasd_device_clear_timer(device);
1723 dasd_schedule_device_bh(device); 1734 dasd_schedule_device_bh(device);
1724} 1735}
1736EXPORT_SYMBOL(dasd_int_handler);
1725 1737
1726enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb) 1738enum uc_todo dasd_generic_uc_handler(struct ccw_device *cdev, struct irb *irb)
1727{ 1739{
@@ -1995,6 +2007,7 @@ finished:
1995 __dasd_device_process_final_queue(device, &flush_queue); 2007 __dasd_device_process_final_queue(device, &flush_queue);
1996 return rc; 2008 return rc;
1997} 2009}
2010EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
1998 2011
1999/* 2012/*
2000 * Acquire the device lock and process queues for the device. 2013 * Acquire the device lock and process queues for the device.
@@ -2034,6 +2047,7 @@ void dasd_schedule_device_bh(struct dasd_device *device)
2034 dasd_get_device(device); 2047 dasd_get_device(device);
2035 tasklet_hi_schedule(&device->tasklet); 2048 tasklet_hi_schedule(&device->tasklet);
2036} 2049}
2050EXPORT_SYMBOL(dasd_schedule_device_bh);
2037 2051
2038void dasd_device_set_stop_bits(struct dasd_device *device, int bits) 2052void dasd_device_set_stop_bits(struct dasd_device *device, int bits)
2039{ 2053{
@@ -2066,6 +2080,7 @@ void dasd_add_request_head(struct dasd_ccw_req *cqr)
2066 dasd_schedule_device_bh(device); 2080 dasd_schedule_device_bh(device);
2067 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2081 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
2068} 2082}
2083EXPORT_SYMBOL(dasd_add_request_head);
2069 2084
2070/* 2085/*
2071 * Queue a request to the tail of the device ccw_queue. 2086 * Queue a request to the tail of the device ccw_queue.
@@ -2084,6 +2099,7 @@ void dasd_add_request_tail(struct dasd_ccw_req *cqr)
2084 dasd_schedule_device_bh(device); 2099 dasd_schedule_device_bh(device);
2085 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); 2100 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
2086} 2101}
2102EXPORT_SYMBOL(dasd_add_request_tail);
2087 2103
2088/* 2104/*
2089 * Wakeup helper for the 'sleep_on' functions. 2105 * Wakeup helper for the 'sleep_on' functions.
@@ -2291,13 +2307,27 @@ retry:
2291 2307
2292 rc = 0; 2308 rc = 0;
2293 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { 2309 list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) {
2294 if (__dasd_sleep_on_erp(cqr)) 2310 /*
2295 rc = 1; 2311 * for alias devices simplify error recovery and
2312 * return to upper layer
2313 */
2314 if (cqr->startdev != cqr->basedev &&
2315 (cqr->status == DASD_CQR_TERMINATED ||
2316 cqr->status == DASD_CQR_NEED_ERP))
2317 return -EAGAIN;
2318 else {
2319 /* normal recovery for basedev IO */
2320 if (__dasd_sleep_on_erp(cqr)) {
2321 if (!cqr->status == DASD_CQR_TERMINATED &&
2322 !cqr->status == DASD_CQR_NEED_ERP)
2323 break;
2324 rc = 1;
2325 }
2326 }
2296 } 2327 }
2297 if (rc) 2328 if (rc)
2298 goto retry; 2329 goto retry;
2299 2330
2300
2301 return 0; 2331 return 0;
2302} 2332}
2303 2333
@@ -2309,6 +2339,7 @@ int dasd_sleep_on(struct dasd_ccw_req *cqr)
2309{ 2339{
2310 return _dasd_sleep_on(cqr, 0); 2340 return _dasd_sleep_on(cqr, 0);
2311} 2341}
2342EXPORT_SYMBOL(dasd_sleep_on);
2312 2343
2313/* 2344/*
2314 * Start requests from a ccw_queue and wait for their completion. 2345 * Start requests from a ccw_queue and wait for their completion.
@@ -2327,6 +2358,7 @@ int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
2327{ 2358{
2328 return _dasd_sleep_on(cqr, 1); 2359 return _dasd_sleep_on(cqr, 1);
2329} 2360}
2361EXPORT_SYMBOL(dasd_sleep_on_interruptible);
2330 2362
2331/* 2363/*
2332 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock 2364 * Whoa nelly now it gets really hairy. For some functions (e.g. steal lock
@@ -2401,6 +2433,7 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
2401 2433
2402 return rc; 2434 return rc;
2403} 2435}
2436EXPORT_SYMBOL(dasd_sleep_on_immediatly);
2404 2437
2405/* 2438/*
2406 * Cancels a request that was started with dasd_sleep_on_req. 2439 * Cancels a request that was started with dasd_sleep_on_req.
@@ -2423,6 +2456,8 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr)
2423 case DASD_CQR_QUEUED: 2456 case DASD_CQR_QUEUED:
2424 /* request was not started - just set to cleared */ 2457 /* request was not started - just set to cleared */
2425 cqr->status = DASD_CQR_CLEARED; 2458 cqr->status = DASD_CQR_CLEARED;
2459 if (cqr->callback_data == DASD_SLEEPON_START_TAG)
2460 cqr->callback_data = DASD_SLEEPON_END_TAG;
2426 break; 2461 break;
2427 case DASD_CQR_IN_IO: 2462 case DASD_CQR_IN_IO:
2428 /* request in IO - terminate IO and release again */ 2463 /* request in IO - terminate IO and release again */
@@ -2442,6 +2477,7 @@ int dasd_cancel_req(struct dasd_ccw_req *cqr)
2442 dasd_schedule_device_bh(device); 2477 dasd_schedule_device_bh(device);
2443 return rc; 2478 return rc;
2444} 2479}
2480EXPORT_SYMBOL(dasd_cancel_req);
2445 2481
2446/* 2482/*
2447 * SECTION: Operations of the dasd_block layer. 2483 * SECTION: Operations of the dasd_block layer.
@@ -2475,6 +2511,7 @@ void dasd_block_set_timer(struct dasd_block *block, int expires)
2475 else 2511 else
2476 mod_timer(&block->timer, jiffies + expires); 2512 mod_timer(&block->timer, jiffies + expires);
2477} 2513}
2514EXPORT_SYMBOL(dasd_block_set_timer);
2478 2515
2479/* 2516/*
2480 * Clear timeout for a dasd_block. 2517 * Clear timeout for a dasd_block.
@@ -2483,6 +2520,7 @@ void dasd_block_clear_timer(struct dasd_block *block)
2483{ 2520{
2484 del_timer(&block->timer); 2521 del_timer(&block->timer);
2485} 2522}
2523EXPORT_SYMBOL(dasd_block_clear_timer);
2486 2524
2487/* 2525/*
2488 * Process finished error recovery ccw. 2526 * Process finished error recovery ccw.
@@ -2864,6 +2902,7 @@ void dasd_schedule_block_bh(struct dasd_block *block)
2864 dasd_get_device(block->base); 2902 dasd_get_device(block->base);
2865 tasklet_hi_schedule(&block->tasklet); 2903 tasklet_hi_schedule(&block->tasklet);
2866} 2904}
2905EXPORT_SYMBOL(dasd_schedule_block_bh);
2867 2906
2868 2907
2869/* 2908/*
@@ -3202,8 +3241,8 @@ static void dasd_generic_auto_online(void *data, async_cookie_t cookie)
3202 3241
3203 ret = ccw_device_set_online(cdev); 3242 ret = ccw_device_set_online(cdev);
3204 if (ret) 3243 if (ret)
3205 pr_warning("%s: Setting the DASD online failed with rc=%d\n", 3244 pr_warn("%s: Setting the DASD online failed with rc=%d\n",
3206 dev_name(&cdev->dev), ret); 3245 dev_name(&cdev->dev), ret);
3207} 3246}
3208 3247
3209/* 3248/*
@@ -3234,6 +3273,7 @@ int dasd_generic_probe(struct ccw_device *cdev,
3234 async_schedule(dasd_generic_auto_online, cdev); 3273 async_schedule(dasd_generic_auto_online, cdev);
3235 return 0; 3274 return 0;
3236} 3275}
3276EXPORT_SYMBOL_GPL(dasd_generic_probe);
3237 3277
3238/* 3278/*
3239 * This will one day be called from a global not_oper handler. 3279 * This will one day be called from a global not_oper handler.
@@ -3276,6 +3316,7 @@ void dasd_generic_remove(struct ccw_device *cdev)
3276 3316
3277 dasd_remove_sysfs_files(cdev); 3317 dasd_remove_sysfs_files(cdev);
3278} 3318}
3319EXPORT_SYMBOL_GPL(dasd_generic_remove);
3279 3320
3280/* 3321/*
3281 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either 3322 * Activate a device. This is called from dasd_{eckd,fba}_probe() when either
@@ -3298,9 +3339,8 @@ int dasd_generic_set_online(struct ccw_device *cdev,
3298 discipline = base_discipline; 3339 discipline = base_discipline;
3299 if (device->features & DASD_FEATURE_USEDIAG) { 3340 if (device->features & DASD_FEATURE_USEDIAG) {
3300 if (!dasd_diag_discipline_pointer) { 3341 if (!dasd_diag_discipline_pointer) {
3301 pr_warning("%s Setting the DASD online failed because " 3342 pr_warn("%s Setting the DASD online failed because of missing DIAG discipline\n",
3302 "of missing DIAG discipline\n", 3343 dev_name(&cdev->dev));
3303 dev_name(&cdev->dev));
3304 dasd_delete_device(device); 3344 dasd_delete_device(device);
3305 return -ENODEV; 3345 return -ENODEV;
3306 } 3346 }
@@ -3321,9 +3361,8 @@ int dasd_generic_set_online(struct ccw_device *cdev,
3321 /* check_device will allocate block device if necessary */ 3361 /* check_device will allocate block device if necessary */
3322 rc = discipline->check_device(device); 3362 rc = discipline->check_device(device);
3323 if (rc) { 3363 if (rc) {
3324 pr_warning("%s Setting the DASD online with discipline %s " 3364 pr_warn("%s Setting the DASD online with discipline %s failed with rc=%i\n",
3325 "failed with rc=%i\n", 3365 dev_name(&cdev->dev), discipline->name, rc);
3326 dev_name(&cdev->dev), discipline->name, rc);
3327 module_put(discipline->owner); 3366 module_put(discipline->owner);
3328 module_put(base_discipline->owner); 3367 module_put(base_discipline->owner);
3329 dasd_delete_device(device); 3368 dasd_delete_device(device);
@@ -3332,8 +3371,8 @@ int dasd_generic_set_online(struct ccw_device *cdev,
3332 3371
3333 dasd_set_target_state(device, DASD_STATE_ONLINE); 3372 dasd_set_target_state(device, DASD_STATE_ONLINE);
3334 if (device->state <= DASD_STATE_KNOWN) { 3373 if (device->state <= DASD_STATE_KNOWN) {
3335 pr_warning("%s Setting the DASD online failed because of a " 3374 pr_warn("%s Setting the DASD online failed because of a missing discipline\n",
3336 "missing discipline\n", dev_name(&cdev->dev)); 3375 dev_name(&cdev->dev));
3337 rc = -ENODEV; 3376 rc = -ENODEV;
3338 dasd_set_target_state(device, DASD_STATE_NEW); 3377 dasd_set_target_state(device, DASD_STATE_NEW);
3339 if (device->block) 3378 if (device->block)
@@ -3348,6 +3387,7 @@ int dasd_generic_set_online(struct ccw_device *cdev,
3348 dasd_put_device(device); 3387 dasd_put_device(device);
3349 return rc; 3388 return rc;
3350} 3389}
3390EXPORT_SYMBOL_GPL(dasd_generic_set_online);
3351 3391
3352int dasd_generic_set_offline(struct ccw_device *cdev) 3392int dasd_generic_set_offline(struct ccw_device *cdev)
3353{ 3393{
@@ -3371,13 +3411,11 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
3371 open_count = atomic_read(&device->block->open_count); 3411 open_count = atomic_read(&device->block->open_count);
3372 if (open_count > max_count) { 3412 if (open_count > max_count) {
3373 if (open_count > 0) 3413 if (open_count > 0)
3374 pr_warning("%s: The DASD cannot be set offline " 3414 pr_warn("%s: The DASD cannot be set offline with open count %i\n",
3375 "with open count %i\n", 3415 dev_name(&cdev->dev), open_count);
3376 dev_name(&cdev->dev), open_count);
3377 else 3416 else
3378 pr_warning("%s: The DASD cannot be set offline " 3417 pr_warn("%s: The DASD cannot be set offline while it is in use\n",
3379 "while it is in use\n", 3418 dev_name(&cdev->dev));
3380 dev_name(&cdev->dev));
3381 clear_bit(DASD_FLAG_OFFLINE, &device->flags); 3419 clear_bit(DASD_FLAG_OFFLINE, &device->flags);
3382 dasd_put_device(device); 3420 dasd_put_device(device);
3383 return -EBUSY; 3421 return -EBUSY;
@@ -3451,6 +3489,7 @@ interrupted:
3451 dasd_put_device(device); 3489 dasd_put_device(device);
3452 return rc; 3490 return rc;
3453} 3491}
3492EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
3454 3493
3455int dasd_generic_last_path_gone(struct dasd_device *device) 3494int dasd_generic_last_path_gone(struct dasd_device *device)
3456{ 3495{
@@ -3492,6 +3531,10 @@ int dasd_generic_path_operational(struct dasd_device *device)
3492 dasd_schedule_device_bh(device); 3531 dasd_schedule_device_bh(device);
3493 if (device->block) 3532 if (device->block)
3494 dasd_schedule_block_bh(device->block); 3533 dasd_schedule_block_bh(device->block);
3534
3535 if (!device->stopped)
3536 wake_up(&generic_waitq);
3537
3495 return 1; 3538 return 1;
3496} 3539}
3497EXPORT_SYMBOL_GPL(dasd_generic_path_operational); 3540EXPORT_SYMBOL_GPL(dasd_generic_path_operational);
@@ -3523,6 +3566,7 @@ int dasd_generic_notify(struct ccw_device *cdev, int event)
3523 dasd_put_device(device); 3566 dasd_put_device(device);
3524 return ret; 3567 return ret;
3525} 3568}
3569EXPORT_SYMBOL_GPL(dasd_generic_notify);
3526 3570
3527void dasd_generic_path_event(struct ccw_device *cdev, int *path_event) 3571void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
3528{ 3572{
@@ -3872,39 +3916,3 @@ failed:
3872 3916
3873module_init(dasd_init); 3917module_init(dasd_init);
3874module_exit(dasd_exit); 3918module_exit(dasd_exit);
3875
3876EXPORT_SYMBOL(dasd_debug_area);
3877EXPORT_SYMBOL(dasd_diag_discipline_pointer);
3878
3879EXPORT_SYMBOL(dasd_add_request_head);
3880EXPORT_SYMBOL(dasd_add_request_tail);
3881EXPORT_SYMBOL(dasd_cancel_req);
3882EXPORT_SYMBOL(dasd_device_clear_timer);
3883EXPORT_SYMBOL(dasd_block_clear_timer);
3884EXPORT_SYMBOL(dasd_enable_device);
3885EXPORT_SYMBOL(dasd_int_handler);
3886EXPORT_SYMBOL(dasd_kfree_request);
3887EXPORT_SYMBOL(dasd_kick_device);
3888EXPORT_SYMBOL(dasd_kmalloc_request);
3889EXPORT_SYMBOL(dasd_schedule_device_bh);
3890EXPORT_SYMBOL(dasd_schedule_block_bh);
3891EXPORT_SYMBOL(dasd_set_target_state);
3892EXPORT_SYMBOL(dasd_device_set_timer);
3893EXPORT_SYMBOL(dasd_block_set_timer);
3894EXPORT_SYMBOL(dasd_sfree_request);
3895EXPORT_SYMBOL(dasd_sleep_on);
3896EXPORT_SYMBOL(dasd_sleep_on_immediatly);
3897EXPORT_SYMBOL(dasd_sleep_on_interruptible);
3898EXPORT_SYMBOL(dasd_smalloc_request);
3899EXPORT_SYMBOL(dasd_start_IO);
3900EXPORT_SYMBOL(dasd_term_IO);
3901
3902EXPORT_SYMBOL_GPL(dasd_generic_probe);
3903EXPORT_SYMBOL_GPL(dasd_generic_remove);
3904EXPORT_SYMBOL_GPL(dasd_generic_notify);
3905EXPORT_SYMBOL_GPL(dasd_generic_set_online);
3906EXPORT_SYMBOL_GPL(dasd_generic_set_offline);
3907EXPORT_SYMBOL_GPL(dasd_generic_handle_state_change);
3908EXPORT_SYMBOL_GPL(dasd_flush_device_queue);
3909EXPORT_SYMBOL_GPL(dasd_alloc_block);
3910EXPORT_SYMBOL_GPL(dasd_free_block);
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 2e8e0755070b..51dea7baf02c 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2039,7 +2039,7 @@ static int dasd_eckd_online_to_ready(struct dasd_device *device)
2039 return 0; 2039 return 0;
2040}; 2040};
2041 2041
2042static int dasd_eckd_ready_to_basic(struct dasd_device *device) 2042static int dasd_eckd_basic_to_known(struct dasd_device *device)
2043{ 2043{
2044 return dasd_alias_remove_device(device); 2044 return dasd_alias_remove_device(device);
2045}; 2045};
@@ -2061,11 +2061,12 @@ dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo)
2061 2061
2062static struct dasd_ccw_req * 2062static struct dasd_ccw_req *
2063dasd_eckd_build_format(struct dasd_device *base, 2063dasd_eckd_build_format(struct dasd_device *base,
2064 struct format_data_t *fdata) 2064 struct format_data_t *fdata,
2065 int enable_pav)
2065{ 2066{
2066 struct dasd_eckd_private *base_priv; 2067 struct dasd_eckd_private *base_priv;
2067 struct dasd_eckd_private *start_priv; 2068 struct dasd_eckd_private *start_priv;
2068 struct dasd_device *startdev; 2069 struct dasd_device *startdev = NULL;
2069 struct dasd_ccw_req *fcp; 2070 struct dasd_ccw_req *fcp;
2070 struct eckd_count *ect; 2071 struct eckd_count *ect;
2071 struct ch_t address; 2072 struct ch_t address;
@@ -2079,7 +2080,9 @@ dasd_eckd_build_format(struct dasd_device *base,
2079 int nr_tracks; 2080 int nr_tracks;
2080 int use_prefix; 2081 int use_prefix;
2081 2082
2082 startdev = dasd_alias_get_start_dev(base); 2083 if (enable_pav)
2084 startdev = dasd_alias_get_start_dev(base);
2085
2083 if (!startdev) 2086 if (!startdev)
2084 startdev = base; 2087 startdev = base;
2085 2088
@@ -2309,6 +2312,7 @@ dasd_eckd_build_format(struct dasd_device *base,
2309 2312
2310 fcp->startdev = startdev; 2313 fcp->startdev = startdev;
2311 fcp->memdev = startdev; 2314 fcp->memdev = startdev;
2315 fcp->basedev = base;
2312 fcp->retries = 256; 2316 fcp->retries = 256;
2313 fcp->expires = startdev->default_expires * HZ; 2317 fcp->expires = startdev->default_expires * HZ;
2314 fcp->buildclk = get_tod_clock(); 2318 fcp->buildclk = get_tod_clock();
@@ -2319,7 +2323,8 @@ dasd_eckd_build_format(struct dasd_device *base,
2319 2323
2320static int 2324static int
2321dasd_eckd_format_device(struct dasd_device *base, 2325dasd_eckd_format_device(struct dasd_device *base,
2322 struct format_data_t *fdata) 2326 struct format_data_t *fdata,
2327 int enable_pav)
2323{ 2328{
2324 struct dasd_ccw_req *cqr, *n; 2329 struct dasd_ccw_req *cqr, *n;
2325 struct dasd_block *block; 2330 struct dasd_block *block;
@@ -2327,7 +2332,7 @@ dasd_eckd_format_device(struct dasd_device *base,
2327 struct list_head format_queue; 2332 struct list_head format_queue;
2328 struct dasd_device *device; 2333 struct dasd_device *device;
2329 int old_stop, format_step; 2334 int old_stop, format_step;
2330 int step, rc = 0; 2335 int step, rc = 0, sleep_rc;
2331 2336
2332 block = base->block; 2337 block = base->block;
2333 private = (struct dasd_eckd_private *) base->private; 2338 private = (struct dasd_eckd_private *) base->private;
@@ -2361,11 +2366,11 @@ dasd_eckd_format_device(struct dasd_device *base,
2361 } 2366 }
2362 2367
2363 INIT_LIST_HEAD(&format_queue); 2368 INIT_LIST_HEAD(&format_queue);
2364 old_stop = fdata->stop_unit;
2365 2369
2370 old_stop = fdata->stop_unit;
2366 while (fdata->start_unit <= 1) { 2371 while (fdata->start_unit <= 1) {
2367 fdata->stop_unit = fdata->start_unit; 2372 fdata->stop_unit = fdata->start_unit;
2368 cqr = dasd_eckd_build_format(base, fdata); 2373 cqr = dasd_eckd_build_format(base, fdata, enable_pav);
2369 list_add(&cqr->blocklist, &format_queue); 2374 list_add(&cqr->blocklist, &format_queue);
2370 2375
2371 fdata->stop_unit = old_stop; 2376 fdata->stop_unit = old_stop;
@@ -2383,7 +2388,7 @@ retry:
2383 if (step > format_step) 2388 if (step > format_step)
2384 fdata->stop_unit = fdata->start_unit + format_step - 1; 2389 fdata->stop_unit = fdata->start_unit + format_step - 1;
2385 2390
2386 cqr = dasd_eckd_build_format(base, fdata); 2391 cqr = dasd_eckd_build_format(base, fdata, enable_pav);
2387 if (IS_ERR(cqr)) { 2392 if (IS_ERR(cqr)) {
2388 if (PTR_ERR(cqr) == -ENOMEM) { 2393 if (PTR_ERR(cqr) == -ENOMEM) {
2389 /* 2394 /*
@@ -2403,7 +2408,7 @@ retry:
2403 } 2408 }
2404 2409
2405sleep: 2410sleep:
2406 dasd_sleep_on_queue(&format_queue); 2411 sleep_rc = dasd_sleep_on_queue(&format_queue);
2407 2412
2408 list_for_each_entry_safe(cqr, n, &format_queue, blocklist) { 2413 list_for_each_entry_safe(cqr, n, &format_queue, blocklist) {
2409 device = cqr->startdev; 2414 device = cqr->startdev;
@@ -2415,6 +2420,9 @@ sleep:
2415 private->count--; 2420 private->count--;
2416 } 2421 }
2417 2422
2423 if (sleep_rc)
2424 return sleep_rc;
2425
2418 /* 2426 /*
2419 * in case of ENOMEM we need to retry after 2427 * in case of ENOMEM we need to retry after
2420 * first requests are finished 2428 * first requests are finished
@@ -4511,7 +4519,7 @@ static struct dasd_discipline dasd_eckd_discipline = {
4511 .verify_path = dasd_eckd_verify_path, 4519 .verify_path = dasd_eckd_verify_path,
4512 .basic_to_ready = dasd_eckd_basic_to_ready, 4520 .basic_to_ready = dasd_eckd_basic_to_ready,
4513 .online_to_ready = dasd_eckd_online_to_ready, 4521 .online_to_ready = dasd_eckd_online_to_ready,
4514 .ready_to_basic = dasd_eckd_ready_to_basic, 4522 .basic_to_known = dasd_eckd_basic_to_known,
4515 .fill_geometry = dasd_eckd_fill_geometry, 4523 .fill_geometry = dasd_eckd_fill_geometry,
4516 .start_IO = dasd_start_IO, 4524 .start_IO = dasd_start_IO,
4517 .term_IO = dasd_term_IO, 4525 .term_IO = dasd_term_IO,
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 690001af0d09..c20170166909 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -175,6 +175,7 @@ struct dasd_ccw_req {
175 struct dasd_block *block; /* the originating block device */ 175 struct dasd_block *block; /* the originating block device */
176 struct dasd_device *memdev; /* the device used to allocate this */ 176 struct dasd_device *memdev; /* the device used to allocate this */
177 struct dasd_device *startdev; /* device the request is started on */ 177 struct dasd_device *startdev; /* device the request is started on */
178 struct dasd_device *basedev; /* base device if no block->base */
178 void *cpaddr; /* address of ccw or tcw */ 179 void *cpaddr; /* address of ccw or tcw */
179 unsigned char cpmode; /* 0 = cmd mode, 1 = itcw */ 180 unsigned char cpmode; /* 0 = cmd mode, 1 = itcw */
180 char status; /* status of this request */ 181 char status; /* status of this request */
@@ -304,7 +305,7 @@ struct dasd_discipline {
304 */ 305 */
305 int (*basic_to_ready) (struct dasd_device *); 306 int (*basic_to_ready) (struct dasd_device *);
306 int (*online_to_ready) (struct dasd_device *); 307 int (*online_to_ready) (struct dasd_device *);
307 int (*ready_to_basic) (struct dasd_device *); 308 int (*basic_to_known)(struct dasd_device *);
308 309
309 /* (struct dasd_device *); 310 /* (struct dasd_device *);
310 * Device operation functions. build_cp creates a ccw chain for 311 * Device operation functions. build_cp creates a ccw chain for
@@ -321,7 +322,7 @@ struct dasd_discipline {
321 int (*term_IO) (struct dasd_ccw_req *); 322 int (*term_IO) (struct dasd_ccw_req *);
322 void (*handle_terminated_request) (struct dasd_ccw_req *); 323 void (*handle_terminated_request) (struct dasd_ccw_req *);
323 int (*format_device) (struct dasd_device *, 324 int (*format_device) (struct dasd_device *,
324 struct format_data_t *); 325 struct format_data_t *, int enable_pav);
325 int (*free_cp) (struct dasd_ccw_req *, struct request *); 326 int (*free_cp) (struct dasd_ccw_req *, struct request *);
326 327
327 /* 328 /*
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 25a0f2f8b0b9..02837d0ad942 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -203,7 +203,9 @@ static int
203dasd_format(struct dasd_block *block, struct format_data_t *fdata) 203dasd_format(struct dasd_block *block, struct format_data_t *fdata)
204{ 204{
205 struct dasd_device *base; 205 struct dasd_device *base;
206 int rc; 206 int enable_pav = 1;
207 int rc, retries;
208 int start, stop;
207 209
208 base = block->base; 210 base = block->base;
209 if (base->discipline->format_device == NULL) 211 if (base->discipline->format_device == NULL)
@@ -231,11 +233,30 @@ dasd_format(struct dasd_block *block, struct format_data_t *fdata)
231 bdput(bdev); 233 bdput(bdev);
232 } 234 }
233 235
234 rc = base->discipline->format_device(base, fdata); 236 retries = 255;
235 if (rc) 237 /* backup start- and endtrack for retries */
236 return rc; 238 start = fdata->start_unit;
237 239 stop = fdata->stop_unit;
238 return 0; 240 do {
241 rc = base->discipline->format_device(base, fdata, enable_pav);
242 if (rc) {
243 if (rc == -EAGAIN) {
244 retries--;
245 /* disable PAV in case of errors */
246 enable_pav = 0;
247 fdata->start_unit = start;
248 fdata->stop_unit = stop;
249 } else
250 return rc;
251 } else
252 /* success */
253 break;
254 } while (retries);
255
256 if (!retries)
257 return -EIO;
258 else
259 return 0;
239} 260}
240 261
241/* 262/*
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 5af7f0bd6125..a6d47e5eee9e 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -288,12 +288,16 @@ static void raw3215_timeout(unsigned long __data)
288 unsigned long flags; 288 unsigned long flags;
289 289
290 spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); 290 spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags);
291 if (raw->flags & RAW3215_TIMER_RUNS) { 291 raw->flags &= ~RAW3215_TIMER_RUNS;
292 del_timer(&raw->timer); 292 if (!(raw->port.flags & ASYNC_SUSPENDED)) {
293 raw->flags &= ~RAW3215_TIMER_RUNS; 293 raw3215_mk_write_req(raw);
294 if (!(raw->port.flags & ASYNC_SUSPENDED)) { 294 raw3215_start_io(raw);
295 raw3215_mk_write_req(raw); 295 if ((raw->queued_read || raw->queued_write) &&
296 raw3215_start_io(raw); 296 !(raw->flags & RAW3215_WORKING) &&
297 !(raw->flags & RAW3215_TIMER_RUNS)) {
298 raw->timer.expires = RAW3215_TIMEOUT + jiffies;
299 add_timer(&raw->timer);
300 raw->flags |= RAW3215_TIMER_RUNS;
297 } 301 }
298 } 302 }
299 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); 303 spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags);
@@ -317,17 +321,15 @@ static inline void raw3215_try_io(struct raw3215_info *raw)
317 (raw->flags & RAW3215_FLUSHING)) { 321 (raw->flags & RAW3215_FLUSHING)) {
318 /* execute write requests bigger than minimum size */ 322 /* execute write requests bigger than minimum size */
319 raw3215_start_io(raw); 323 raw3215_start_io(raw);
320 if (raw->flags & RAW3215_TIMER_RUNS) {
321 del_timer(&raw->timer);
322 raw->flags &= ~RAW3215_TIMER_RUNS;
323 }
324 } else if (!(raw->flags & RAW3215_TIMER_RUNS)) {
325 /* delay small writes */
326 raw->timer.expires = RAW3215_TIMEOUT + jiffies;
327 add_timer(&raw->timer);
328 raw->flags |= RAW3215_TIMER_RUNS;
329 } 324 }
330 } 325 }
326 if ((raw->queued_read || raw->queued_write) &&
327 !(raw->flags & RAW3215_WORKING) &&
328 !(raw->flags & RAW3215_TIMER_RUNS)) {
329 raw->timer.expires = RAW3215_TIMEOUT + jiffies;
330 add_timer(&raw->timer);
331 raw->flags |= RAW3215_TIMER_RUNS;
332 }
331} 333}
332 334
333/* 335/*
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index f5f4a91fab44..f76bff68d1de 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -17,6 +17,8 @@
17#include "qdio.h" 17#include "qdio.h"
18#include "qdio_debug.h" 18#include "qdio_debug.h"
19 19
20#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
21
20static struct kmem_cache *qdio_q_cache; 22static struct kmem_cache *qdio_q_cache;
21static struct kmem_cache *qdio_aob_cache; 23static struct kmem_cache *qdio_aob_cache;
22 24
@@ -32,6 +34,57 @@ void qdio_release_aob(struct qaob *aob)
32} 34}
33EXPORT_SYMBOL_GPL(qdio_release_aob); 35EXPORT_SYMBOL_GPL(qdio_release_aob);
34 36
37/**
38 * qdio_free_buffers() - free qdio buffers
39 * @buf: array of pointers to qdio buffers
40 * @count: number of qdio buffers to free
41 */
42void qdio_free_buffers(struct qdio_buffer **buf, unsigned int count)
43{
44 int pos;
45
46 for (pos = 0; pos < count; pos += QBUFF_PER_PAGE)
47 free_page((unsigned long) buf[pos]);
48}
49EXPORT_SYMBOL_GPL(qdio_free_buffers);
50
51/**
52 * qdio_alloc_buffers() - allocate qdio buffers
53 * @buf: array of pointers to qdio buffers
54 * @count: number of qdio buffers to allocate
55 */
56int qdio_alloc_buffers(struct qdio_buffer **buf, unsigned int count)
57{
58 int pos;
59
60 for (pos = 0; pos < count; pos += QBUFF_PER_PAGE) {
61 buf[pos] = (void *) get_zeroed_page(GFP_KERNEL);
62 if (!buf[pos]) {
63 qdio_free_buffers(buf, count);
64 return -ENOMEM;
65 }
66 }
67 for (pos = 0; pos < count; pos++)
68 if (pos % QBUFF_PER_PAGE)
69 buf[pos] = buf[pos - 1] + 1;
70 return 0;
71}
72EXPORT_SYMBOL_GPL(qdio_alloc_buffers);
73
74/**
75 * qdio_reset_buffers() - reset qdio buffers
76 * @buf: array of pointers to qdio buffers
77 * @count: number of qdio buffers that will be zeroed
78 */
79void qdio_reset_buffers(struct qdio_buffer **buf, unsigned int count)
80{
81 int pos;
82
83 for (pos = 0; pos < count; pos++)
84 memset(buf[pos], 0, sizeof(struct qdio_buffer));
85}
86EXPORT_SYMBOL_GPL(qdio_reset_buffers);
87
35/* 88/*
36 * qebsm is only available under 64bit but the adapter sets the feature 89 * qebsm is only available under 64bit but the adapter sets the feature
37 * flag anyway, so we manually override it. 90 * flag anyway, so we manually override it.
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index bbafbd0e017a..97ef37b51068 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -439,10 +439,10 @@ struct qeth_qdio_buffer {
439}; 439};
440 440
441struct qeth_qdio_q { 441struct qeth_qdio_q {
442 struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q]; 442 struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
443 struct qeth_qdio_buffer bufs[QDIO_MAX_BUFFERS_PER_Q]; 443 struct qeth_qdio_buffer bufs[QDIO_MAX_BUFFERS_PER_Q];
444 int next_buf_to_init; 444 int next_buf_to_init;
445} __attribute__ ((aligned(256))); 445};
446 446
447struct qeth_qdio_out_buffer { 447struct qeth_qdio_out_buffer {
448 struct qdio_buffer *buffer; 448 struct qdio_buffer *buffer;
@@ -465,7 +465,7 @@ enum qeth_out_q_states {
465}; 465};
466 466
467struct qeth_qdio_out_q { 467struct qeth_qdio_out_q {
468 struct qdio_buffer qdio_bufs[QDIO_MAX_BUFFERS_PER_Q]; 468 struct qdio_buffer *qdio_bufs[QDIO_MAX_BUFFERS_PER_Q];
469 struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q]; 469 struct qeth_qdio_out_buffer *bufs[QDIO_MAX_BUFFERS_PER_Q];
470 struct qdio_outbuf_state *bufstates; /* convenience pointer */ 470 struct qdio_outbuf_state *bufstates; /* convenience pointer */
471 int queue_no; 471 int queue_no;
@@ -483,7 +483,7 @@ struct qeth_qdio_out_q {
483 atomic_t used_buffers; 483 atomic_t used_buffers;
484 /* indicates whether PCI flag must be set (or if one is outstanding) */ 484 /* indicates whether PCI flag must be set (or if one is outstanding) */
485 atomic_t set_pci_flags_count; 485 atomic_t set_pci_flags_count;
486} __attribute__ ((aligned(256))); 486};
487 487
488struct qeth_qdio_info { 488struct qeth_qdio_info {
489 atomic_t state; 489 atomic_t state;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 71bfacfc097e..c0d6ba8655c7 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -292,14 +292,43 @@ int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
292} 292}
293EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool); 293EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool);
294 294
295static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
296{
297 if (!q)
298 return;
299
300 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
301 kfree(q);
302}
303
304static struct qeth_qdio_q *qeth_alloc_qdio_queue(void)
305{
306 struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
307 int i;
308
309 if (!q)
310 return NULL;
311
312 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
313 kfree(q);
314 return NULL;
315 }
316
317 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
318 q->bufs[i].buffer = q->qdio_bufs[i];
319
320 QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *));
321 return q;
322}
323
295static inline int qeth_cq_init(struct qeth_card *card) 324static inline int qeth_cq_init(struct qeth_card *card)
296{ 325{
297 int rc; 326 int rc;
298 327
299 if (card->options.cq == QETH_CQ_ENABLED) { 328 if (card->options.cq == QETH_CQ_ENABLED) {
300 QETH_DBF_TEXT(SETUP, 2, "cqinit"); 329 QETH_DBF_TEXT(SETUP, 2, "cqinit");
301 memset(card->qdio.c_q->qdio_bufs, 0, 330 qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
302 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer)); 331 QDIO_MAX_BUFFERS_PER_Q);
303 card->qdio.c_q->next_buf_to_init = 127; 332 card->qdio.c_q->next_buf_to_init = 127;
304 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 333 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
305 card->qdio.no_in_queues - 1, 0, 334 card->qdio.no_in_queues - 1, 0,
@@ -323,21 +352,12 @@ static inline int qeth_alloc_cq(struct qeth_card *card)
323 struct qdio_outbuf_state *outbuf_states; 352 struct qdio_outbuf_state *outbuf_states;
324 353
325 QETH_DBF_TEXT(SETUP, 2, "cqon"); 354 QETH_DBF_TEXT(SETUP, 2, "cqon");
326 card->qdio.c_q = kzalloc(sizeof(struct qeth_qdio_q), 355 card->qdio.c_q = qeth_alloc_qdio_queue();
327 GFP_KERNEL);
328 if (!card->qdio.c_q) { 356 if (!card->qdio.c_q) {
329 rc = -1; 357 rc = -1;
330 goto kmsg_out; 358 goto kmsg_out;
331 } 359 }
332 QETH_DBF_HEX(SETUP, 2, &card->qdio.c_q, sizeof(void *));
333
334 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
335 card->qdio.c_q->bufs[i].buffer =
336 &card->qdio.c_q->qdio_bufs[i];
337 }
338
339 card->qdio.no_in_queues = 2; 360 card->qdio.no_in_queues = 2;
340
341 card->qdio.out_bufstates = 361 card->qdio.out_bufstates =
342 kzalloc(card->qdio.no_out_queues * 362 kzalloc(card->qdio.no_out_queues *
343 QDIO_MAX_BUFFERS_PER_Q * 363 QDIO_MAX_BUFFERS_PER_Q *
@@ -361,7 +381,7 @@ static inline int qeth_alloc_cq(struct qeth_card *card)
361out: 381out:
362 return rc; 382 return rc;
363free_cq_out: 383free_cq_out:
364 kfree(card->qdio.c_q); 384 qeth_free_qdio_queue(card->qdio.c_q);
365 card->qdio.c_q = NULL; 385 card->qdio.c_q = NULL;
366kmsg_out: 386kmsg_out:
367 dev_err(&card->gdev->dev, "Failed to create completion queue\n"); 387 dev_err(&card->gdev->dev, "Failed to create completion queue\n");
@@ -372,7 +392,7 @@ static inline void qeth_free_cq(struct qeth_card *card)
372{ 392{
373 if (card->qdio.c_q) { 393 if (card->qdio.c_q) {
374 --card->qdio.no_in_queues; 394 --card->qdio.no_in_queues;
375 kfree(card->qdio.c_q); 395 qeth_free_qdio_queue(card->qdio.c_q);
376 card->qdio.c_q = NULL; 396 card->qdio.c_q = NULL;
377 } 397 }
378 kfree(card->qdio.out_bufstates); 398 kfree(card->qdio.out_bufstates);
@@ -1282,35 +1302,6 @@ static void qeth_free_buffer_pool(struct qeth_card *card)
1282 } 1302 }
1283} 1303}
1284 1304
1285static void qeth_free_qdio_buffers(struct qeth_card *card)
1286{
1287 int i, j;
1288
1289 if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
1290 QETH_QDIO_UNINITIALIZED)
1291 return;
1292
1293 qeth_free_cq(card);
1294 cancel_delayed_work_sync(&card->buffer_reclaim_work);
1295 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
1296 if (card->qdio.in_q->bufs[j].rx_skb)
1297 dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
1298 }
1299 kfree(card->qdio.in_q);
1300 card->qdio.in_q = NULL;
1301 /* inbound buffer pool */
1302 qeth_free_buffer_pool(card);
1303 /* free outbound qdio_qs */
1304 if (card->qdio.out_qs) {
1305 for (i = 0; i < card->qdio.no_out_queues; ++i) {
1306 qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
1307 kfree(card->qdio.out_qs[i]);
1308 }
1309 kfree(card->qdio.out_qs);
1310 card->qdio.out_qs = NULL;
1311 }
1312}
1313
1314static void qeth_clean_channel(struct qeth_channel *channel) 1305static void qeth_clean_channel(struct qeth_channel *channel)
1315{ 1306{
1316 int cnt; 1307 int cnt;
@@ -2392,7 +2383,7 @@ static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
2392 rc = -ENOMEM; 2383 rc = -ENOMEM;
2393 goto out; 2384 goto out;
2394 } 2385 }
2395 newbuf->buffer = &q->qdio_bufs[bidx]; 2386 newbuf->buffer = q->qdio_bufs[bidx];
2396 skb_queue_head_init(&newbuf->skb_list); 2387 skb_queue_head_init(&newbuf->skb_list);
2397 lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key); 2388 lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
2398 newbuf->q = q; 2389 newbuf->q = q;
@@ -2411,6 +2402,28 @@ out:
2411 return rc; 2402 return rc;
2412} 2403}
2413 2404
2405static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q)
2406{
2407 if (!q)
2408 return;
2409
2410 qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2411 kfree(q);
2412}
2413
2414static struct qeth_qdio_out_q *qeth_alloc_qdio_out_buf(void)
2415{
2416 struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
2417
2418 if (!q)
2419 return NULL;
2420
2421 if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
2422 kfree(q);
2423 return NULL;
2424 }
2425 return q;
2426}
2414 2427
2415static int qeth_alloc_qdio_buffers(struct qeth_card *card) 2428static int qeth_alloc_qdio_buffers(struct qeth_card *card)
2416{ 2429{
@@ -2422,19 +2435,11 @@ static int qeth_alloc_qdio_buffers(struct qeth_card *card)
2422 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED) 2435 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
2423 return 0; 2436 return 0;
2424 2437
2425 card->qdio.in_q = kzalloc(sizeof(struct qeth_qdio_q), 2438 QETH_DBF_TEXT(SETUP, 2, "inq");
2426 GFP_KERNEL); 2439 card->qdio.in_q = qeth_alloc_qdio_queue();
2427 if (!card->qdio.in_q) 2440 if (!card->qdio.in_q)
2428 goto out_nomem; 2441 goto out_nomem;
2429 QETH_DBF_TEXT(SETUP, 2, "inq"); 2442
2430 QETH_DBF_HEX(SETUP, 2, &card->qdio.in_q, sizeof(void *));
2431 memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q));
2432 /* give inbound qeth_qdio_buffers their qdio_buffers */
2433 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
2434 card->qdio.in_q->bufs[i].buffer =
2435 &card->qdio.in_q->qdio_bufs[i];
2436 card->qdio.in_q->bufs[i].rx_skb = NULL;
2437 }
2438 /* inbound buffer pool */ 2443 /* inbound buffer pool */
2439 if (qeth_alloc_buffer_pool(card)) 2444 if (qeth_alloc_buffer_pool(card))
2440 goto out_freeinq; 2445 goto out_freeinq;
@@ -2446,8 +2451,7 @@ static int qeth_alloc_qdio_buffers(struct qeth_card *card)
2446 if (!card->qdio.out_qs) 2451 if (!card->qdio.out_qs)
2447 goto out_freepool; 2452 goto out_freepool;
2448 for (i = 0; i < card->qdio.no_out_queues; ++i) { 2453 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2449 card->qdio.out_qs[i] = kzalloc(sizeof(struct qeth_qdio_out_q), 2454 card->qdio.out_qs[i] = qeth_alloc_qdio_out_buf();
2450 GFP_KERNEL);
2451 if (!card->qdio.out_qs[i]) 2455 if (!card->qdio.out_qs[i])
2452 goto out_freeoutq; 2456 goto out_freeoutq;
2453 QETH_DBF_TEXT_(SETUP, 2, "outq %i", i); 2457 QETH_DBF_TEXT_(SETUP, 2, "outq %i", i);
@@ -2476,7 +2480,7 @@ out_freeoutqbufs:
2476 } 2480 }
2477out_freeoutq: 2481out_freeoutq:
2478 while (i > 0) { 2482 while (i > 0) {
2479 kfree(card->qdio.out_qs[--i]); 2483 qeth_free_qdio_out_buf(card->qdio.out_qs[--i]);
2480 qeth_clear_outq_buffers(card->qdio.out_qs[i], 1); 2484 qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
2481 } 2485 }
2482 kfree(card->qdio.out_qs); 2486 kfree(card->qdio.out_qs);
@@ -2484,13 +2488,42 @@ out_freeoutq:
2484out_freepool: 2488out_freepool:
2485 qeth_free_buffer_pool(card); 2489 qeth_free_buffer_pool(card);
2486out_freeinq: 2490out_freeinq:
2487 kfree(card->qdio.in_q); 2491 qeth_free_qdio_queue(card->qdio.in_q);
2488 card->qdio.in_q = NULL; 2492 card->qdio.in_q = NULL;
2489out_nomem: 2493out_nomem:
2490 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); 2494 atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
2491 return -ENOMEM; 2495 return -ENOMEM;
2492} 2496}
2493 2497
2498static void qeth_free_qdio_buffers(struct qeth_card *card)
2499{
2500 int i, j;
2501
2502 if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
2503 QETH_QDIO_UNINITIALIZED)
2504 return;
2505
2506 qeth_free_cq(card);
2507 cancel_delayed_work_sync(&card->buffer_reclaim_work);
2508 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2509 if (card->qdio.in_q->bufs[j].rx_skb)
2510 dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
2511 }
2512 qeth_free_qdio_queue(card->qdio.in_q);
2513 card->qdio.in_q = NULL;
2514 /* inbound buffer pool */
2515 qeth_free_buffer_pool(card);
2516 /* free outbound qdio_qs */
2517 if (card->qdio.out_qs) {
2518 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2519 qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
2520 qeth_free_qdio_out_buf(card->qdio.out_qs[i]);
2521 }
2522 kfree(card->qdio.out_qs);
2523 card->qdio.out_qs = NULL;
2524 }
2525}
2526
2494static void qeth_create_qib_param_field(struct qeth_card *card, 2527static void qeth_create_qib_param_field(struct qeth_card *card,
2495 char *param_field) 2528 char *param_field)
2496{ 2529{
@@ -2788,8 +2821,8 @@ int qeth_init_qdio_queues(struct qeth_card *card)
2788 QETH_DBF_TEXT(SETUP, 2, "initqdqs"); 2821 QETH_DBF_TEXT(SETUP, 2, "initqdqs");
2789 2822
2790 /* inbound queue */ 2823 /* inbound queue */
2791 memset(card->qdio.in_q->qdio_bufs, 0, 2824 qdio_reset_buffers(card->qdio.in_q->qdio_bufs,
2792 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer)); 2825 QDIO_MAX_BUFFERS_PER_Q);
2793 qeth_initialize_working_pool_list(card); 2826 qeth_initialize_working_pool_list(card);
2794 /*give only as many buffers to hardware as we have buffer pool entries*/ 2827 /*give only as many buffers to hardware as we have buffer pool entries*/
2795 for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i) 2828 for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; ++i)
@@ -2811,8 +2844,8 @@ int qeth_init_qdio_queues(struct qeth_card *card)
2811 2844
2812 /* outbound queue */ 2845 /* outbound queue */
2813 for (i = 0; i < card->qdio.no_out_queues; ++i) { 2846 for (i = 0; i < card->qdio.no_out_queues; ++i) {
2814 memset(card->qdio.out_qs[i]->qdio_bufs, 0, 2847 qdio_reset_buffers(card->qdio.out_qs[i]->qdio_bufs,
2815 QDIO_MAX_BUFFERS_PER_Q * sizeof(struct qdio_buffer)); 2848 QDIO_MAX_BUFFERS_PER_Q);
2816 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) { 2849 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2817 qeth_clear_output_buffer(card->qdio.out_qs[i], 2850 qeth_clear_output_buffer(card->qdio.out_qs[i],
2818 card->qdio.out_qs[i]->bufs[j], 2851 card->qdio.out_qs[i]->bufs[j],
@@ -3569,7 +3602,7 @@ static void qeth_qdio_cq_handler(struct qeth_card *card,
3569 3602
3570 for (i = first_element; i < first_element + count; ++i) { 3603 for (i = first_element; i < first_element + count; ++i) {
3571 int bidx = i % QDIO_MAX_BUFFERS_PER_Q; 3604 int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
3572 struct qdio_buffer *buffer = &cq->qdio_bufs[bidx]; 3605 struct qdio_buffer *buffer = cq->qdio_bufs[bidx];
3573 int e; 3606 int e;
3574 3607
3575 e = 0; 3608 e = 0;
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 06025cdaa4ad..495e1cb3afa6 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -14,27 +14,10 @@
14#include "zfcp_ext.h" 14#include "zfcp_ext.h"
15#include "zfcp_qdio.h" 15#include "zfcp_qdio.h"
16 16
17#define QBUFF_PER_PAGE (PAGE_SIZE / sizeof(struct qdio_buffer))
18
19static bool enable_multibuffer = 1; 17static bool enable_multibuffer = 1;
20module_param_named(datarouter, enable_multibuffer, bool, 0400); 18module_param_named(datarouter, enable_multibuffer, bool, 0400);
21MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)"); 19MODULE_PARM_DESC(datarouter, "Enable hardware data router support (default on)");
22 20
23static int zfcp_qdio_buffers_enqueue(struct qdio_buffer **sbal)
24{
25 int pos;
26
27 for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos += QBUFF_PER_PAGE) {
28 sbal[pos] = (struct qdio_buffer *) get_zeroed_page(GFP_KERNEL);
29 if (!sbal[pos])
30 return -ENOMEM;
31 }
32 for (pos = 0; pos < QDIO_MAX_BUFFERS_PER_Q; pos++)
33 if (pos % QBUFF_PER_PAGE)
34 sbal[pos] = sbal[pos - 1] + 1;
35 return 0;
36}
37
38static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id, 21static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id,
39 unsigned int qdio_err) 22 unsigned int qdio_err)
40{ 23{
@@ -326,15 +309,30 @@ static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
326static int zfcp_qdio_allocate(struct zfcp_qdio *qdio) 309static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
327{ 310{
328 struct qdio_initialize init_data; 311 struct qdio_initialize init_data;
312 int ret;
329 313
330 if (zfcp_qdio_buffers_enqueue(qdio->req_q) || 314 ret = qdio_alloc_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
331 zfcp_qdio_buffers_enqueue(qdio->res_q)) 315 if (ret)
332 return -ENOMEM; 316 return -ENOMEM;
333 317
318 ret = qdio_alloc_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
319 if (ret)
320 goto free_req_q;
321
334 zfcp_qdio_setup_init_data(&init_data, qdio); 322 zfcp_qdio_setup_init_data(&init_data, qdio);
335 init_waitqueue_head(&qdio->req_q_wq); 323 init_waitqueue_head(&qdio->req_q_wq);
336 324
337 return qdio_allocate(&init_data); 325 ret = qdio_allocate(&init_data);
326 if (ret)
327 goto free_res_q;
328
329 return 0;
330
331free_res_q:
332 qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
333free_req_q:
334 qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
335 return ret;
338} 336}
339 337
340/** 338/**
@@ -448,19 +446,14 @@ failed_establish:
448 446
449void zfcp_qdio_destroy(struct zfcp_qdio *qdio) 447void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
450{ 448{
451 int p;
452
453 if (!qdio) 449 if (!qdio)
454 return; 450 return;
455 451
456 if (qdio->adapter->ccw_device) 452 if (qdio->adapter->ccw_device)
457 qdio_free(qdio->adapter->ccw_device); 453 qdio_free(qdio->adapter->ccw_device);
458 454
459 for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) { 455 qdio_free_buffers(qdio->req_q, QDIO_MAX_BUFFERS_PER_Q);
460 free_page((unsigned long) qdio->req_q[p]); 456 qdio_free_buffers(qdio->res_q, QDIO_MAX_BUFFERS_PER_Q);
461 free_page((unsigned long) qdio->res_q[p]);
462 }
463
464 kfree(qdio); 457 kfree(qdio);
465} 458}
466 459
@@ -475,7 +468,7 @@ int zfcp_qdio_setup(struct zfcp_adapter *adapter)
475 qdio->adapter = adapter; 468 qdio->adapter = adapter;
476 469
477 if (zfcp_qdio_allocate(qdio)) { 470 if (zfcp_qdio_allocate(qdio)) {
478 zfcp_qdio_destroy(qdio); 471 kfree(qdio);
479 return -ENOMEM; 472 return -ENOMEM;
480 } 473 }
481 474
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 76dd54122f76..f57312fced80 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -1293,7 +1293,7 @@ config DIAG288_WATCHDOG
1293 both. 1293 both.
1294 1294
1295 To compile this driver as a module, choose M here. The module 1295 To compile this driver as a module, choose M here. The module
1296 will be called vmwatchdog. 1296 will be called diag288_wdt.
1297 1297
1298# SUPERH (sh + sh64) Architecture 1298# SUPERH (sh + sh64) Architecture
1299 1299