aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/cio
diff options
context:
space:
mode:
authorGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
committerGlenn Elliott <gelliott@cs.unc.edu>2012-03-04 19:47:13 -0500
commitc71c03bda1e86c9d5198c5d83f712e695c4f2a1e (patch)
treeecb166cb3e2b7e2adb3b5e292245fefd23381ac8 /drivers/s390/cio
parentea53c912f8a86a8567697115b6a0d8152beee5c8 (diff)
parent6a00f206debf8a5c8899055726ad127dbeeed098 (diff)
Merge branch 'mpi-master' into wip-k-fmlpwip-k-fmlp
Conflicts: litmus/sched_cedf.c
Diffstat (limited to 'drivers/s390/cio')
-rw-r--r--drivers/s390/cio/blacklist.c10
-rw-r--r--drivers/s390/cio/ccwgroup.c84
-rw-r--r--drivers/s390/cio/chp.c41
-rw-r--r--drivers/s390/cio/chp.h12
-rw-r--r--drivers/s390/cio/chsc.c345
-rw-r--r--drivers/s390/cio/chsc.h46
-rw-r--r--drivers/s390/cio/chsc_sch.c30
-rw-r--r--drivers/s390/cio/cio.c45
-rw-r--r--drivers/s390/cio/cio.h11
-rw-r--r--drivers/s390/cio/css.c67
-rw-r--r--drivers/s390/cio/css.h10
-rw-r--r--drivers/s390/cio/device.c88
-rw-r--r--drivers/s390/cio/device.h1
-rw-r--r--drivers/s390/cio/device_fsm.c53
-rw-r--r--drivers/s390/cio/device_ops.c43
-rw-r--r--drivers/s390/cio/device_pgid.c23
-rw-r--r--drivers/s390/cio/io_sch.h121
-rw-r--r--drivers/s390/cio/ioasm.h34
-rw-r--r--drivers/s390/cio/itcw.c62
-rw-r--r--drivers/s390/cio/orb.h67
-rw-r--r--drivers/s390/cio/qdio.h56
-rw-r--r--drivers/s390/cio/qdio_debug.c34
-rw-r--r--drivers/s390/cio/qdio_main.c351
-rw-r--r--drivers/s390/cio/qdio_setup.c21
-rw-r--r--drivers/s390/cio/qdio_thinint.c104
25 files changed, 1071 insertions, 688 deletions
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 13cb60162e42..76058a5166ed 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -79,17 +79,15 @@ static int pure_hex(char **cp, unsigned int *val, int min_digit,
79 int max_digit, int max_val) 79 int max_digit, int max_val)
80{ 80{
81 int diff; 81 int diff;
82 unsigned int value;
83 82
84 diff = 0; 83 diff = 0;
85 *val = 0; 84 *val = 0;
86 85
87 while (isxdigit(**cp) && (diff <= max_digit)) { 86 while (diff <= max_digit) {
87 int value = hex_to_bin(**cp);
88 88
89 if (isdigit(**cp)) 89 if (value < 0)
90 value = **cp - '0'; 90 break;
91 else
92 value = tolower(**cp) - 'a' + 10;
93 *val = *val * 16 + value; 91 *val = *val * 16 + value;
94 (*cp)++; 92 (*cp)++;
95 diff++; 93 diff++;
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index 97b25d68e3e7..5c567414c4bb 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -67,6 +67,27 @@ __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
67} 67}
68 68
69/* 69/*
70 * Remove references from ccw devices to ccw group device and from
71 * ccw group device to ccw devices.
72 */
73static void __ccwgroup_remove_cdev_refs(struct ccwgroup_device *gdev)
74{
75 struct ccw_device *cdev;
76 int i;
77
78 for (i = 0; i < gdev->count; i++) {
79 cdev = gdev->cdev[i];
80 if (!cdev)
81 continue;
82 spin_lock_irq(cdev->ccwlock);
83 dev_set_drvdata(&cdev->dev, NULL);
84 spin_unlock_irq(cdev->ccwlock);
85 gdev->cdev[i] = NULL;
86 put_device(&cdev->dev);
87 }
88}
89
90/*
70 * Provide an 'ungroup' attribute so the user can remove group devices no 91 * Provide an 'ungroup' attribute so the user can remove group devices no
71 * longer needed or accidentially created. Saves memory :) 92 * longer needed or accidentially created. Saves memory :)
72 */ 93 */
@@ -78,6 +99,7 @@ static void ccwgroup_ungroup_callback(struct device *dev)
78 if (device_is_registered(&gdev->dev)) { 99 if (device_is_registered(&gdev->dev)) {
79 __ccwgroup_remove_symlinks(gdev); 100 __ccwgroup_remove_symlinks(gdev);
80 device_unregister(dev); 101 device_unregister(dev);
102 __ccwgroup_remove_cdev_refs(gdev);
81 } 103 }
82 mutex_unlock(&gdev->reg_mutex); 104 mutex_unlock(&gdev->reg_mutex);
83} 105}
@@ -116,21 +138,7 @@ static DEVICE_ATTR(ungroup, 0200, NULL, ccwgroup_ungroup_store);
116static void 138static void
117ccwgroup_release (struct device *dev) 139ccwgroup_release (struct device *dev)
118{ 140{
119 struct ccwgroup_device *gdev; 141 kfree(to_ccwgroupdev(dev));
120 int i;
121
122 gdev = to_ccwgroupdev(dev);
123
124 for (i = 0; i < gdev->count; i++) {
125 if (gdev->cdev[i]) {
126 spin_lock_irq(gdev->cdev[i]->ccwlock);
127 if (dev_get_drvdata(&gdev->cdev[i]->dev) == gdev)
128 dev_set_drvdata(&gdev->cdev[i]->dev, NULL);
129 spin_unlock_irq(gdev->cdev[i]->ccwlock);
130 put_device(&gdev->cdev[i]->dev);
131 }
132 }
133 kfree(gdev);
134} 142}
135 143
136static int 144static int
@@ -420,7 +428,7 @@ ccwgroup_online_store (struct device *dev, struct device_attribute *attr, const
420 gdev = to_ccwgroupdev(dev); 428 gdev = to_ccwgroupdev(dev);
421 gdrv = to_ccwgroupdrv(dev->driver); 429 gdrv = to_ccwgroupdrv(dev->driver);
422 430
423 if (!try_module_get(gdrv->owner)) 431 if (!try_module_get(gdrv->driver.owner))
424 return -EINVAL; 432 return -EINVAL;
425 433
426 ret = strict_strtoul(buf, 0, &value); 434 ret = strict_strtoul(buf, 0, &value);
@@ -434,7 +442,7 @@ ccwgroup_online_store (struct device *dev, struct device_attribute *attr, const
434 else 442 else
435 ret = -EINVAL; 443 ret = -EINVAL;
436out: 444out:
437 module_put(gdrv->owner); 445 module_put(gdrv->driver.owner);
438 return (ret == 0) ? count : ret; 446 return (ret == 0) ? count : ret;
439} 447}
440 448
@@ -608,8 +616,6 @@ int ccwgroup_driver_register(struct ccwgroup_driver *cdriver)
608{ 616{
609 /* register our new driver with the core */ 617 /* register our new driver with the core */
610 cdriver->driver.bus = &ccwgroup_bus_type; 618 cdriver->driver.bus = &ccwgroup_bus_type;
611 cdriver->driver.name = cdriver->name;
612 cdriver->driver.owner = cdriver->owner;
613 619
614 return driver_register(&cdriver->driver); 620 return driver_register(&cdriver->driver);
615} 621}
@@ -639,6 +645,7 @@ void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
639 mutex_lock(&gdev->reg_mutex); 645 mutex_lock(&gdev->reg_mutex);
640 __ccwgroup_remove_symlinks(gdev); 646 __ccwgroup_remove_symlinks(gdev);
641 device_unregister(dev); 647 device_unregister(dev);
648 __ccwgroup_remove_cdev_refs(gdev);
642 mutex_unlock(&gdev->reg_mutex); 649 mutex_unlock(&gdev->reg_mutex);
643 put_device(dev); 650 put_device(dev);
644 } 651 }
@@ -660,25 +667,6 @@ int ccwgroup_probe_ccwdev(struct ccw_device *cdev)
660 return 0; 667 return 0;
661} 668}
662 669
663static struct ccwgroup_device *
664__ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev)
665{
666 struct ccwgroup_device *gdev;
667
668 gdev = dev_get_drvdata(&cdev->dev);
669 if (gdev) {
670 if (get_device(&gdev->dev)) {
671 mutex_lock(&gdev->reg_mutex);
672 if (device_is_registered(&gdev->dev))
673 return gdev;
674 mutex_unlock(&gdev->reg_mutex);
675 put_device(&gdev->dev);
676 }
677 return NULL;
678 }
679 return NULL;
680}
681
682/** 670/**
683 * ccwgroup_remove_ccwdev() - remove function for slave devices 671 * ccwgroup_remove_ccwdev() - remove function for slave devices
684 * @cdev: ccw device to be removed 672 * @cdev: ccw device to be removed
@@ -694,13 +682,25 @@ void ccwgroup_remove_ccwdev(struct ccw_device *cdev)
694 /* Ignore offlining errors, device is gone anyway. */ 682 /* Ignore offlining errors, device is gone anyway. */
695 ccw_device_set_offline(cdev); 683 ccw_device_set_offline(cdev);
696 /* If one of its devices is gone, the whole group is done for. */ 684 /* If one of its devices is gone, the whole group is done for. */
697 gdev = __ccwgroup_get_gdev_by_cdev(cdev); 685 spin_lock_irq(cdev->ccwlock);
698 if (gdev) { 686 gdev = dev_get_drvdata(&cdev->dev);
687 if (!gdev) {
688 spin_unlock_irq(cdev->ccwlock);
689 return;
690 }
691 /* Get ccwgroup device reference for local processing. */
692 get_device(&gdev->dev);
693 spin_unlock_irq(cdev->ccwlock);
694 /* Unregister group device. */
695 mutex_lock(&gdev->reg_mutex);
696 if (device_is_registered(&gdev->dev)) {
699 __ccwgroup_remove_symlinks(gdev); 697 __ccwgroup_remove_symlinks(gdev);
700 device_unregister(&gdev->dev); 698 device_unregister(&gdev->dev);
701 mutex_unlock(&gdev->reg_mutex); 699 __ccwgroup_remove_cdev_refs(gdev);
702 put_device(&gdev->dev);
703 } 700 }
701 mutex_unlock(&gdev->reg_mutex);
702 /* Release ccwgroup device reference for local processing. */
703 put_device(&gdev->dev);
704} 704}
705 705
706MODULE_LICENSE("GPL"); 706MODULE_LICENSE("GPL");
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 6c9fa15aac7b..2d32233943a9 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/s390/cio/chp.c 2 * drivers/s390/cio/chp.c
3 * 3 *
4 * Copyright IBM Corp. 1999,2007 4 * Copyright IBM Corp. 1999,2010
5 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com) 5 * Author(s): Cornelia Huck (cornelia.huck@de.ibm.com)
6 * Arnd Bergmann (arndb@de.ibm.com) 6 * Arnd Bergmann (arndb@de.ibm.com)
7 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 7 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
@@ -54,12 +54,6 @@ static struct work_struct cfg_work;
54/* Wait queue for configure completion events. */ 54/* Wait queue for configure completion events. */
55static wait_queue_head_t cfg_wait_queue; 55static wait_queue_head_t cfg_wait_queue;
56 56
57/* Return channel_path struct for given chpid. */
58static inline struct channel_path *chpid_to_chp(struct chp_id chpid)
59{
60 return channel_subsystems[chpid.cssid]->chps[chpid.id];
61}
62
63/* Set vary state for given chpid. */ 57/* Set vary state for given chpid. */
64static void set_chp_logically_online(struct chp_id chpid, int onoff) 58static void set_chp_logically_online(struct chp_id chpid, int onoff)
65{ 59{
@@ -241,11 +235,13 @@ static ssize_t chp_status_show(struct device *dev,
241 struct device_attribute *attr, char *buf) 235 struct device_attribute *attr, char *buf)
242{ 236{
243 struct channel_path *chp = to_channelpath(dev); 237 struct channel_path *chp = to_channelpath(dev);
238 int status;
244 239
245 if (!chp) 240 mutex_lock(&chp->lock);
246 return 0; 241 status = chp->state;
247 return (chp_get_status(chp->chpid) ? sprintf(buf, "online\n") : 242 mutex_unlock(&chp->lock);
248 sprintf(buf, "offline\n")); 243
244 return status ? sprintf(buf, "online\n") : sprintf(buf, "offline\n");
249} 245}
250 246
251static ssize_t chp_status_write(struct device *dev, 247static ssize_t chp_status_write(struct device *dev,
@@ -261,15 +257,18 @@ static ssize_t chp_status_write(struct device *dev,
261 if (!num_args) 257 if (!num_args)
262 return count; 258 return count;
263 259
264 if (!strnicmp(cmd, "on", 2) || !strcmp(cmd, "1")) 260 if (!strnicmp(cmd, "on", 2) || !strcmp(cmd, "1")) {
261 mutex_lock(&cp->lock);
265 error = s390_vary_chpid(cp->chpid, 1); 262 error = s390_vary_chpid(cp->chpid, 1);
266 else if (!strnicmp(cmd, "off", 3) || !strcmp(cmd, "0")) 263 mutex_unlock(&cp->lock);
264 } else if (!strnicmp(cmd, "off", 3) || !strcmp(cmd, "0")) {
265 mutex_lock(&cp->lock);
267 error = s390_vary_chpid(cp->chpid, 0); 266 error = s390_vary_chpid(cp->chpid, 0);
268 else 267 mutex_unlock(&cp->lock);
268 } else
269 error = -EINVAL; 269 error = -EINVAL;
270 270
271 return error < 0 ? error : count; 271 return error < 0 ? error : count;
272
273} 272}
274 273
275static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write); 274static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
@@ -315,10 +314,12 @@ static ssize_t chp_type_show(struct device *dev, struct device_attribute *attr,
315 char *buf) 314 char *buf)
316{ 315{
317 struct channel_path *chp = to_channelpath(dev); 316 struct channel_path *chp = to_channelpath(dev);
317 u8 type;
318 318
319 if (!chp) 319 mutex_lock(&chp->lock);
320 return 0; 320 type = chp->desc.desc;
321 return sprintf(buf, "%x\n", chp->desc.desc); 321 mutex_unlock(&chp->lock);
322 return sprintf(buf, "%x\n", type);
322} 323}
323 324
324static DEVICE_ATTR(type, 0444, chp_type_show, NULL); 325static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
@@ -395,6 +396,7 @@ int chp_new(struct chp_id chpid)
395 chp->state = 1; 396 chp->state = 1;
396 chp->dev.parent = &channel_subsystems[chpid.cssid]->device; 397 chp->dev.parent = &channel_subsystems[chpid.cssid]->device;
397 chp->dev.release = chp_release; 398 chp->dev.release = chp_release;
399 mutex_init(&chp->lock);
398 400
399 /* Obtain channel path description and fill it in. */ 401 /* Obtain channel path description and fill it in. */
400 ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc); 402 ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc);
@@ -464,7 +466,10 @@ void *chp_get_chp_desc(struct chp_id chpid)
464 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL); 466 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
465 if (!desc) 467 if (!desc)
466 return NULL; 468 return NULL;
469
470 mutex_lock(&chp->lock);
467 memcpy(desc, &chp->desc, sizeof(struct channel_path_desc)); 471 memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
472 mutex_unlock(&chp->lock);
468 return desc; 473 return desc;
469} 474}
470 475
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h
index 26c3d2246176..12b4903d6fe3 100644
--- a/drivers/s390/cio/chp.h
+++ b/drivers/s390/cio/chp.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * drivers/s390/cio/chp.h 2 * drivers/s390/cio/chp.h
3 * 3 *
4 * Copyright IBM Corp. 2007 4 * Copyright IBM Corp. 2007,2010
5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com> 5 * Author(s): Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
6 */ 6 */
7 7
@@ -10,6 +10,7 @@
10 10
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/device.h> 12#include <linux/device.h>
13#include <linux/mutex.h>
13#include <asm/chpid.h> 14#include <asm/chpid.h>
14#include "chsc.h" 15#include "chsc.h"
15#include "css.h" 16#include "css.h"
@@ -40,16 +41,23 @@ static inline int chp_test_bit(u8 *bitmap, int num)
40 41
41 42
42struct channel_path { 43struct channel_path {
44 struct device dev;
43 struct chp_id chpid; 45 struct chp_id chpid;
46 struct mutex lock; /* Serialize access to below members. */
44 int state; 47 int state;
45 struct channel_path_desc desc; 48 struct channel_path_desc desc;
46 /* Channel-measurement related stuff: */ 49 /* Channel-measurement related stuff: */
47 int cmg; 50 int cmg;
48 int shared; 51 int shared;
49 void *cmg_chars; 52 void *cmg_chars;
50 struct device dev;
51}; 53};
52 54
55/* Return channel_path struct for given chpid. */
56static inline struct channel_path *chpid_to_chp(struct chp_id chpid)
57{
58 return channel_subsystems[chpid.cssid]->chps[chpid.id];
59}
60
53int chp_get_status(struct chp_id chpid); 61int chp_get_status(struct chp_id chpid);
54u8 chp_get_sch_opm(struct subchannel *sch); 62u8 chp_get_sch_opm(struct subchannel *sch);
55int chp_is_registered(struct chp_id chpid); 63int chp_is_registered(struct chp_id chpid);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 4cbb1a6ca33c..75c3f1f8fd43 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -2,7 +2,7 @@
2 * drivers/s390/cio/chsc.c 2 * drivers/s390/cio/chsc.c
3 * S/390 common I/O routines -- channel subsystem call 3 * S/390 common I/O routines -- channel subsystem call
4 * 4 *
5 * Copyright IBM Corp. 1999,2008 5 * Copyright IBM Corp. 1999,2010
6 * Author(s): Ingo Adlung (adlung@de.ibm.com) 6 * Author(s): Ingo Adlung (adlung@de.ibm.com)
7 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com)
8 * Arnd Bergmann (arndb@de.ibm.com) 8 * Arnd Bergmann (arndb@de.ibm.com)
@@ -29,8 +29,8 @@
29#include "chsc.h" 29#include "chsc.h"
30 30
31static void *sei_page; 31static void *sei_page;
32static DEFINE_SPINLOCK(siosl_lock); 32static void *chsc_page;
33static DEFINE_SPINLOCK(sda_lock); 33static DEFINE_SPINLOCK(chsc_page_lock);
34 34
35/** 35/**
36 * chsc_error_from_response() - convert a chsc response to an error 36 * chsc_error_from_response() - convert a chsc response to an error
@@ -85,17 +85,15 @@ struct chsc_ssd_area {
85 85
86int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) 86int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
87{ 87{
88 unsigned long page;
89 struct chsc_ssd_area *ssd_area; 88 struct chsc_ssd_area *ssd_area;
90 int ccode; 89 int ccode;
91 int ret; 90 int ret;
92 int i; 91 int i;
93 int mask; 92 int mask;
94 93
95 page = get_zeroed_page(GFP_KERNEL | GFP_DMA); 94 spin_lock_irq(&chsc_page_lock);
96 if (!page) 95 memset(chsc_page, 0, PAGE_SIZE);
97 return -ENOMEM; 96 ssd_area = chsc_page;
98 ssd_area = (struct chsc_ssd_area *) page;
99 ssd_area->request.length = 0x0010; 97 ssd_area->request.length = 0x0010;
100 ssd_area->request.code = 0x0004; 98 ssd_area->request.code = 0x0004;
101 ssd_area->ssid = schid.ssid; 99 ssd_area->ssid = schid.ssid;
@@ -106,25 +104,25 @@ int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
106 /* Check response. */ 104 /* Check response. */
107 if (ccode > 0) { 105 if (ccode > 0) {
108 ret = (ccode == 3) ? -ENODEV : -EBUSY; 106 ret = (ccode == 3) ? -ENODEV : -EBUSY;
109 goto out_free; 107 goto out;
110 } 108 }
111 ret = chsc_error_from_response(ssd_area->response.code); 109 ret = chsc_error_from_response(ssd_area->response.code);
112 if (ret != 0) { 110 if (ret != 0) {
113 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n", 111 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
114 schid.ssid, schid.sch_no, 112 schid.ssid, schid.sch_no,
115 ssd_area->response.code); 113 ssd_area->response.code);
116 goto out_free; 114 goto out;
117 } 115 }
118 if (!ssd_area->sch_valid) { 116 if (!ssd_area->sch_valid) {
119 ret = -ENODEV; 117 ret = -ENODEV;
120 goto out_free; 118 goto out;
121 } 119 }
122 /* Copy data */ 120 /* Copy data */
123 ret = 0; 121 ret = 0;
124 memset(ssd, 0, sizeof(struct chsc_ssd_info)); 122 memset(ssd, 0, sizeof(struct chsc_ssd_info));
125 if ((ssd_area->st != SUBCHANNEL_TYPE_IO) && 123 if ((ssd_area->st != SUBCHANNEL_TYPE_IO) &&
126 (ssd_area->st != SUBCHANNEL_TYPE_MSG)) 124 (ssd_area->st != SUBCHANNEL_TYPE_MSG))
127 goto out_free; 125 goto out;
128 ssd->path_mask = ssd_area->path_mask; 126 ssd->path_mask = ssd_area->path_mask;
129 ssd->fla_valid_mask = ssd_area->fla_valid_mask; 127 ssd->fla_valid_mask = ssd_area->fla_valid_mask;
130 for (i = 0; i < 8; i++) { 128 for (i = 0; i < 8; i++) {
@@ -136,8 +134,8 @@ int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
136 if (ssd_area->fla_valid_mask & mask) 134 if (ssd_area->fla_valid_mask & mask)
137 ssd->fla[i] = ssd_area->fla[i]; 135 ssd->fla[i] = ssd_area->fla[i];
138 } 136 }
139out_free: 137out:
140 free_page(page); 138 spin_unlock_irq(&chsc_page_lock);
141 return ret; 139 return ret;
142} 140}
143 141
@@ -328,6 +326,36 @@ static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
328 s390_process_res_acc(&link); 326 s390_process_res_acc(&link);
329} 327}
330 328
329static void chsc_process_sei_chp_avail(struct chsc_sei_area *sei_area)
330{
331 struct channel_path *chp;
332 struct chp_id chpid;
333 u8 *data;
334 int num;
335
336 CIO_CRW_EVENT(4, "chsc: channel path availability information\n");
337 if (sei_area->rs != 0)
338 return;
339 data = sei_area->ccdf;
340 chp_id_init(&chpid);
341 for (num = 0; num <= __MAX_CHPID; num++) {
342 if (!chp_test_bit(data, num))
343 continue;
344 chpid.id = num;
345
346 CIO_CRW_EVENT(4, "Update information for channel path "
347 "%x.%02x\n", chpid.cssid, chpid.id);
348 chp = chpid_to_chp(chpid);
349 if (!chp) {
350 chp_new(chpid);
351 continue;
352 }
353 mutex_lock(&chp->lock);
354 chsc_determine_base_channel_path_desc(chpid, &chp->desc);
355 mutex_unlock(&chp->lock);
356 }
357}
358
331struct chp_config_data { 359struct chp_config_data {
332 u8 map[32]; 360 u8 map[32];
333 u8 op; 361 u8 op;
@@ -378,9 +406,12 @@ static void chsc_process_sei(struct chsc_sei_area *sei_area)
378 case 1: /* link incident*/ 406 case 1: /* link incident*/
379 chsc_process_sei_link_incident(sei_area); 407 chsc_process_sei_link_incident(sei_area);
380 break; 408 break;
381 case 2: /* i/o resource accessibiliy */ 409 case 2: /* i/o resource accessibility */
382 chsc_process_sei_res_acc(sei_area); 410 chsc_process_sei_res_acc(sei_area);
383 break; 411 break;
412 case 7: /* channel-path-availability information */
413 chsc_process_sei_chp_avail(sei_area);
414 break;
384 case 8: /* channel-path-configuration notification */ 415 case 8: /* channel-path-configuration notification */
385 chsc_process_sei_chp_config(sei_area); 416 chsc_process_sei_chp_config(sei_area);
386 break; 417 break;
@@ -497,6 +528,7 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
497 */ 528 */
498int chsc_chp_vary(struct chp_id chpid, int on) 529int chsc_chp_vary(struct chp_id chpid, int on)
499{ 530{
531 struct channel_path *chp = chpid_to_chp(chpid);
500 struct chp_link link; 532 struct chp_link link;
501 533
502 memset(&link, 0, sizeof(struct chp_link)); 534 memset(&link, 0, sizeof(struct chp_link));
@@ -506,11 +538,12 @@ int chsc_chp_vary(struct chp_id chpid, int on)
506 /* 538 /*
507 * Redo PathVerification on the devices the chpid connects to 539 * Redo PathVerification on the devices the chpid connects to
508 */ 540 */
509 541 if (on) {
510 if (on) 542 /* Try to update the channel path descritor. */
543 chsc_determine_base_channel_path_desc(chpid, &chp->desc);
511 for_each_subchannel_staged(s390_subchannel_vary_chpid_on, 544 for_each_subchannel_staged(s390_subchannel_vary_chpid_on,
512 __s390_vary_chpid_on, &link); 545 __s390_vary_chpid_on, &link);
513 else 546 } else
514 for_each_subchannel_staged(s390_subchannel_vary_chpid_off, 547 for_each_subchannel_staged(s390_subchannel_vary_chpid_off,
515 NULL, &link); 548 NULL, &link);
516 549
@@ -552,7 +585,7 @@ cleanup:
552 return ret; 585 return ret;
553} 586}
554 587
555int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) 588int __chsc_do_secm(struct channel_subsystem *css, int enable)
556{ 589{
557 struct { 590 struct {
558 struct chsc_header request; 591 struct chsc_header request;
@@ -573,7 +606,9 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
573 } __attribute__ ((packed)) *secm_area; 606 } __attribute__ ((packed)) *secm_area;
574 int ret, ccode; 607 int ret, ccode;
575 608
576 secm_area = page; 609 spin_lock_irq(&chsc_page_lock);
610 memset(chsc_page, 0, PAGE_SIZE);
611 secm_area = chsc_page;
577 secm_area->request.length = 0x0050; 612 secm_area->request.length = 0x0050;
578 secm_area->request.code = 0x0016; 613 secm_area->request.code = 0x0016;
579 614
@@ -584,8 +619,10 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
584 secm_area->operation_code = enable ? 0 : 1; 619 secm_area->operation_code = enable ? 0 : 1;
585 620
586 ccode = chsc(secm_area); 621 ccode = chsc(secm_area);
587 if (ccode > 0) 622 if (ccode > 0) {
588 return (ccode == 3) ? -ENODEV : -EBUSY; 623 ret = (ccode == 3) ? -ENODEV : -EBUSY;
624 goto out;
625 }
589 626
590 switch (secm_area->response.code) { 627 switch (secm_area->response.code) {
591 case 0x0102: 628 case 0x0102:
@@ -598,37 +635,32 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
598 if (ret != 0) 635 if (ret != 0)
599 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n", 636 CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n",
600 secm_area->response.code); 637 secm_area->response.code);
638out:
639 spin_unlock_irq(&chsc_page_lock);
601 return ret; 640 return ret;
602} 641}
603 642
604int 643int
605chsc_secm(struct channel_subsystem *css, int enable) 644chsc_secm(struct channel_subsystem *css, int enable)
606{ 645{
607 void *secm_area;
608 int ret; 646 int ret;
609 647
610 secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
611 if (!secm_area)
612 return -ENOMEM;
613
614 if (enable && !css->cm_enabled) { 648 if (enable && !css->cm_enabled) {
615 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 649 css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
616 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 650 css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
617 if (!css->cub_addr1 || !css->cub_addr2) { 651 if (!css->cub_addr1 || !css->cub_addr2) {
618 free_page((unsigned long)css->cub_addr1); 652 free_page((unsigned long)css->cub_addr1);
619 free_page((unsigned long)css->cub_addr2); 653 free_page((unsigned long)css->cub_addr2);
620 free_page((unsigned long)secm_area);
621 return -ENOMEM; 654 return -ENOMEM;
622 } 655 }
623 } 656 }
624 ret = __chsc_do_secm(css, enable, secm_area); 657 ret = __chsc_do_secm(css, enable);
625 if (!ret) { 658 if (!ret) {
626 css->cm_enabled = enable; 659 css->cm_enabled = enable;
627 if (css->cm_enabled) { 660 if (css->cm_enabled) {
628 ret = chsc_add_cmg_attr(css); 661 ret = chsc_add_cmg_attr(css);
629 if (ret) { 662 if (ret) {
630 memset(secm_area, 0, PAGE_SIZE); 663 __chsc_do_secm(css, 0);
631 __chsc_do_secm(css, 0, secm_area);
632 css->cm_enabled = 0; 664 css->cm_enabled = 0;
633 } 665 }
634 } else 666 } else
@@ -638,44 +670,24 @@ chsc_secm(struct channel_subsystem *css, int enable)
638 free_page((unsigned long)css->cub_addr1); 670 free_page((unsigned long)css->cub_addr1);
639 free_page((unsigned long)css->cub_addr2); 671 free_page((unsigned long)css->cub_addr2);
640 } 672 }
641 free_page((unsigned long)secm_area);
642 return ret; 673 return ret;
643} 674}
644 675
645int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, 676int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
646 int c, int m, 677 int c, int m, void *page)
647 struct chsc_response_struct *resp)
648{ 678{
679 struct chsc_scpd *scpd_area;
649 int ccode, ret; 680 int ccode, ret;
650 681
651 struct {
652 struct chsc_header request;
653 u32 : 2;
654 u32 m : 1;
655 u32 c : 1;
656 u32 fmt : 4;
657 u32 cssid : 8;
658 u32 : 4;
659 u32 rfmt : 4;
660 u32 first_chpid : 8;
661 u32 : 24;
662 u32 last_chpid : 8;
663 u32 zeroes1;
664 struct chsc_header response;
665 u8 data[PAGE_SIZE - 20];
666 } __attribute__ ((packed)) *scpd_area;
667
668 if ((rfmt == 1) && !css_general_characteristics.fcs) 682 if ((rfmt == 1) && !css_general_characteristics.fcs)
669 return -EINVAL; 683 return -EINVAL;
670 if ((rfmt == 2) && !css_general_characteristics.cib) 684 if ((rfmt == 2) && !css_general_characteristics.cib)
671 return -EINVAL; 685 return -EINVAL;
672 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
673 if (!scpd_area)
674 return -ENOMEM;
675 686
687 memset(page, 0, PAGE_SIZE);
688 scpd_area = page;
676 scpd_area->request.length = 0x0010; 689 scpd_area->request.length = 0x0010;
677 scpd_area->request.code = 0x0002; 690 scpd_area->request.code = 0x0002;
678
679 scpd_area->cssid = chpid.cssid; 691 scpd_area->cssid = chpid.cssid;
680 scpd_area->first_chpid = chpid.id; 692 scpd_area->first_chpid = chpid.id;
681 scpd_area->last_chpid = chpid.id; 693 scpd_area->last_chpid = chpid.id;
@@ -685,20 +697,13 @@ int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
685 scpd_area->rfmt = rfmt; 697 scpd_area->rfmt = rfmt;
686 698
687 ccode = chsc(scpd_area); 699 ccode = chsc(scpd_area);
688 if (ccode > 0) { 700 if (ccode > 0)
689 ret = (ccode == 3) ? -ENODEV : -EBUSY; 701 return (ccode == 3) ? -ENODEV : -EBUSY;
690 goto out;
691 }
692 702
693 ret = chsc_error_from_response(scpd_area->response.code); 703 ret = chsc_error_from_response(scpd_area->response.code);
694 if (ret == 0) 704 if (ret)
695 /* Success. */
696 memcpy(resp, &scpd_area->response, scpd_area->response.length);
697 else
698 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", 705 CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n",
699 scpd_area->response.code); 706 scpd_area->response.code);
700out:
701 free_page((unsigned long)scpd_area);
702 return ret; 707 return ret;
703} 708}
704EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc); 709EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc);
@@ -707,17 +712,38 @@ int chsc_determine_base_channel_path_desc(struct chp_id chpid,
707 struct channel_path_desc *desc) 712 struct channel_path_desc *desc)
708{ 713{
709 struct chsc_response_struct *chsc_resp; 714 struct chsc_response_struct *chsc_resp;
715 struct chsc_scpd *scpd_area;
716 unsigned long flags;
710 int ret; 717 int ret;
711 718
712 chsc_resp = kzalloc(sizeof(*chsc_resp), GFP_KERNEL); 719 spin_lock_irqsave(&chsc_page_lock, flags);
713 if (!chsc_resp) 720 scpd_area = chsc_page;
714 return -ENOMEM; 721 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, scpd_area);
715 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 0, 0, chsc_resp); 722 if (ret)
723 goto out;
724 chsc_resp = (void *)&scpd_area->response;
725 memcpy(desc, &chsc_resp->data, sizeof(*desc));
726out:
727 spin_unlock_irqrestore(&chsc_page_lock, flags);
728 return ret;
729}
730
731int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
732 struct channel_path_desc_fmt1 *desc)
733{
734 struct chsc_response_struct *chsc_resp;
735 struct chsc_scpd *scpd_area;
736 int ret;
737
738 spin_lock_irq(&chsc_page_lock);
739 scpd_area = chsc_page;
740 ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area);
716 if (ret) 741 if (ret)
717 goto out_free; 742 goto out;
743 chsc_resp = (void *)&scpd_area->response;
718 memcpy(desc, &chsc_resp->data, sizeof(*desc)); 744 memcpy(desc, &chsc_resp->data, sizeof(*desc));
719out_free: 745out:
720 kfree(chsc_resp); 746 spin_unlock_irq(&chsc_page_lock);
721 return ret; 747 return ret;
722} 748}
723 749
@@ -725,33 +751,22 @@ static void
725chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, 751chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
726 struct cmg_chars *chars) 752 struct cmg_chars *chars)
727{ 753{
728 switch (chp->cmg) { 754 struct cmg_chars *cmg_chars;
729 case 2: 755 int i, mask;
730 case 3: 756
731 chp->cmg_chars = kmalloc(sizeof(struct cmg_chars), 757 cmg_chars = chp->cmg_chars;
732 GFP_KERNEL); 758 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
733 if (chp->cmg_chars) { 759 mask = 0x80 >> (i + 3);
734 int i, mask; 760 if (cmcv & mask)
735 struct cmg_chars *cmg_chars; 761 cmg_chars->values[i] = chars->values[i];
736 762 else
737 cmg_chars = chp->cmg_chars; 763 cmg_chars->values[i] = 0;
738 for (i = 0; i < NR_MEASUREMENT_CHARS; i++) {
739 mask = 0x80 >> (i + 3);
740 if (cmcv & mask)
741 cmg_chars->values[i] = chars->values[i];
742 else
743 cmg_chars->values[i] = 0;
744 }
745 }
746 break;
747 default:
748 /* No cmg-dependent data. */
749 break;
750 } 764 }
751} 765}
752 766
753int chsc_get_channel_measurement_chars(struct channel_path *chp) 767int chsc_get_channel_measurement_chars(struct channel_path *chp)
754{ 768{
769 struct cmg_chars *cmg_chars;
755 int ccode, ret; 770 int ccode, ret;
756 771
757 struct { 772 struct {
@@ -775,13 +790,16 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
775 u32 data[NR_MEASUREMENT_CHARS]; 790 u32 data[NR_MEASUREMENT_CHARS];
776 } __attribute__ ((packed)) *scmc_area; 791 } __attribute__ ((packed)) *scmc_area;
777 792
778 scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 793 chp->cmg_chars = NULL;
779 if (!scmc_area) 794 cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL);
795 if (!cmg_chars)
780 return -ENOMEM; 796 return -ENOMEM;
781 797
798 spin_lock_irq(&chsc_page_lock);
799 memset(chsc_page, 0, PAGE_SIZE);
800 scmc_area = chsc_page;
782 scmc_area->request.length = 0x0010; 801 scmc_area->request.length = 0x0010;
783 scmc_area->request.code = 0x0022; 802 scmc_area->request.code = 0x0022;
784
785 scmc_area->first_chpid = chp->chpid.id; 803 scmc_area->first_chpid = chp->chpid.id;
786 scmc_area->last_chpid = chp->chpid.id; 804 scmc_area->last_chpid = chp->chpid.id;
787 805
@@ -792,53 +810,65 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp)
792 } 810 }
793 811
794 ret = chsc_error_from_response(scmc_area->response.code); 812 ret = chsc_error_from_response(scmc_area->response.code);
795 if (ret == 0) { 813 if (ret) {
796 /* Success. */
797 if (!scmc_area->not_valid) {
798 chp->cmg = scmc_area->cmg;
799 chp->shared = scmc_area->shared;
800 chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
801 (struct cmg_chars *)
802 &scmc_area->data);
803 } else {
804 chp->cmg = -1;
805 chp->shared = -1;
806 }
807 } else {
808 CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n", 814 CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n",
809 scmc_area->response.code); 815 scmc_area->response.code);
816 goto out;
817 }
818 if (scmc_area->not_valid) {
819 chp->cmg = -1;
820 chp->shared = -1;
821 goto out;
810 } 822 }
823 chp->cmg = scmc_area->cmg;
824 chp->shared = scmc_area->shared;
825 if (chp->cmg != 2 && chp->cmg != 3) {
826 /* No cmg-dependent data. */
827 goto out;
828 }
829 chp->cmg_chars = cmg_chars;
830 chsc_initialize_cmg_chars(chp, scmc_area->cmcv,
831 (struct cmg_chars *) &scmc_area->data);
811out: 832out:
812 free_page((unsigned long)scmc_area); 833 spin_unlock_irq(&chsc_page_lock);
834 if (!chp->cmg_chars)
835 kfree(cmg_chars);
836
813 return ret; 837 return ret;
814} 838}
815 839
816int __init chsc_alloc_sei_area(void) 840int __init chsc_init(void)
817{ 841{
818 int ret; 842 int ret;
819 843
820 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 844 sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
821 if (!sei_page) { 845 chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
822 CIO_MSG_EVENT(0, "Can't allocate page for processing of " 846 if (!sei_page || !chsc_page) {
823 "chsc machine checks!\n"); 847 ret = -ENOMEM;
824 return -ENOMEM; 848 goto out_err;
825 } 849 }
826 ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw); 850 ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw);
827 if (ret) 851 if (ret)
828 kfree(sei_page); 852 goto out_err;
853 return ret;
854out_err:
855 free_page((unsigned long)chsc_page);
856 free_page((unsigned long)sei_page);
829 return ret; 857 return ret;
830} 858}
831 859
832void __init chsc_free_sei_area(void) 860void __init chsc_init_cleanup(void)
833{ 861{
834 crw_unregister_handler(CRW_RSC_CSS); 862 crw_unregister_handler(CRW_RSC_CSS);
835 kfree(sei_page); 863 free_page((unsigned long)chsc_page);
864 free_page((unsigned long)sei_page);
836} 865}
837 866
838int chsc_enable_facility(int operation_code) 867int chsc_enable_facility(int operation_code)
839{ 868{
869 unsigned long flags;
840 int ret; 870 int ret;
841 static struct { 871 struct {
842 struct chsc_header request; 872 struct chsc_header request;
843 u8 reserved1:4; 873 u8 reserved1:4;
844 u8 format:4; 874 u8 format:4;
@@ -851,32 +881,33 @@ int chsc_enable_facility(int operation_code)
851 u32 reserved5:4; 881 u32 reserved5:4;
852 u32 format2:4; 882 u32 format2:4;
853 u32 reserved6:24; 883 u32 reserved6:24;
854 } __attribute__ ((packed, aligned(4096))) sda_area; 884 } __attribute__ ((packed)) *sda_area;
855 885
856 spin_lock(&sda_lock); 886 spin_lock_irqsave(&chsc_page_lock, flags);
857 memset(&sda_area, 0, sizeof(sda_area)); 887 memset(chsc_page, 0, PAGE_SIZE);
858 sda_area.request.length = 0x0400; 888 sda_area = chsc_page;
859 sda_area.request.code = 0x0031; 889 sda_area->request.length = 0x0400;
860 sda_area.operation_code = operation_code; 890 sda_area->request.code = 0x0031;
891 sda_area->operation_code = operation_code;
861 892
862 ret = chsc(&sda_area); 893 ret = chsc(sda_area);
863 if (ret > 0) { 894 if (ret > 0) {
864 ret = (ret == 3) ? -ENODEV : -EBUSY; 895 ret = (ret == 3) ? -ENODEV : -EBUSY;
865 goto out; 896 goto out;
866 } 897 }
867 898
868 switch (sda_area.response.code) { 899 switch (sda_area->response.code) {
869 case 0x0101: 900 case 0x0101:
870 ret = -EOPNOTSUPP; 901 ret = -EOPNOTSUPP;
871 break; 902 break;
872 default: 903 default:
873 ret = chsc_error_from_response(sda_area.response.code); 904 ret = chsc_error_from_response(sda_area->response.code);
874 } 905 }
875 if (ret != 0) 906 if (ret != 0)
876 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", 907 CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
877 operation_code, sda_area.response.code); 908 operation_code, sda_area->response.code);
878 out: 909out:
879 spin_unlock(&sda_lock); 910 spin_unlock_irqrestore(&chsc_page_lock, flags);
880 return ret; 911 return ret;
881} 912}
882 913
@@ -895,13 +926,12 @@ chsc_determine_css_characteristics(void)
895 struct chsc_header response; 926 struct chsc_header response;
896 u32 reserved4; 927 u32 reserved4;
897 u32 general_char[510]; 928 u32 general_char[510];
898 u32 chsc_char[518]; 929 u32 chsc_char[508];
899 } __attribute__ ((packed)) *scsc_area; 930 } __attribute__ ((packed)) *scsc_area;
900 931
901 scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 932 spin_lock_irq(&chsc_page_lock);
902 if (!scsc_area) 933 memset(chsc_page, 0, PAGE_SIZE);
903 return -ENOMEM; 934 scsc_area = chsc_page;
904
905 scsc_area->request.length = 0x0010; 935 scsc_area->request.length = 0x0010;
906 scsc_area->request.code = 0x0010; 936 scsc_area->request.code = 0x0010;
907 937
@@ -921,7 +951,7 @@ chsc_determine_css_characteristics(void)
921 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n", 951 CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n",
922 scsc_area->response.code); 952 scsc_area->response.code);
923exit: 953exit:
924 free_page ((unsigned long) scsc_area); 954 spin_unlock_irq(&chsc_page_lock);
925 return result; 955 return result;
926} 956}
927 957
@@ -976,29 +1006,29 @@ int chsc_sstpi(void *page, void *result, size_t size)
976 return (rr->response.code == 0x0001) ? 0 : -EIO; 1006 return (rr->response.code == 0x0001) ? 0 : -EIO;
977} 1007}
978 1008
979static struct {
980 struct chsc_header request;
981 u32 word1;
982 struct subchannel_id sid;
983 u32 word3;
984 struct chsc_header response;
985 u32 word[11];
986} __attribute__ ((packed)) siosl_area __attribute__ ((__aligned__(PAGE_SIZE)));
987
988int chsc_siosl(struct subchannel_id schid) 1009int chsc_siosl(struct subchannel_id schid)
989{ 1010{
1011 struct {
1012 struct chsc_header request;
1013 u32 word1;
1014 struct subchannel_id sid;
1015 u32 word3;
1016 struct chsc_header response;
1017 u32 word[11];
1018 } __attribute__ ((packed)) *siosl_area;
990 unsigned long flags; 1019 unsigned long flags;
991 int ccode; 1020 int ccode;
992 int rc; 1021 int rc;
993 1022
994 spin_lock_irqsave(&siosl_lock, flags); 1023 spin_lock_irqsave(&chsc_page_lock, flags);
995 memset(&siosl_area, 0, sizeof(siosl_area)); 1024 memset(chsc_page, 0, PAGE_SIZE);
996 siosl_area.request.length = 0x0010; 1025 siosl_area = chsc_page;
997 siosl_area.request.code = 0x0046; 1026 siosl_area->request.length = 0x0010;
998 siosl_area.word1 = 0x80000000; 1027 siosl_area->request.code = 0x0046;
999 siosl_area.sid = schid; 1028 siosl_area->word1 = 0x80000000;
1029 siosl_area->sid = schid;
1000 1030
1001 ccode = chsc(&siosl_area); 1031 ccode = chsc(siosl_area);
1002 if (ccode > 0) { 1032 if (ccode > 0) {
1003 if (ccode == 3) 1033 if (ccode == 3)
1004 rc = -ENODEV; 1034 rc = -ENODEV;
@@ -1008,17 +1038,16 @@ int chsc_siosl(struct subchannel_id schid)
1008 schid.ssid, schid.sch_no, ccode); 1038 schid.ssid, schid.sch_no, ccode);
1009 goto out; 1039 goto out;
1010 } 1040 }
1011 rc = chsc_error_from_response(siosl_area.response.code); 1041 rc = chsc_error_from_response(siosl_area->response.code);
1012 if (rc) 1042 if (rc)
1013 CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n", 1043 CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n",
1014 schid.ssid, schid.sch_no, 1044 schid.ssid, schid.sch_no,
1015 siosl_area.response.code); 1045 siosl_area->response.code);
1016 else 1046 else
1017 CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n", 1047 CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n",
1018 schid.ssid, schid.sch_no); 1048 schid.ssid, schid.sch_no);
1019out: 1049out:
1020 spin_unlock_irqrestore(&siosl_lock, flags); 1050 spin_unlock_irqrestore(&chsc_page_lock, flags);
1021
1022 return rc; 1051 return rc;
1023} 1052}
1024EXPORT_SYMBOL_GPL(chsc_siosl); 1053EXPORT_SYMBOL_GPL(chsc_siosl);
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 5453013f094b..3f15b2aaeaea 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -35,6 +35,22 @@ struct channel_path_desc {
35 u8 chpp; 35 u8 chpp;
36} __attribute__ ((packed)); 36} __attribute__ ((packed));
37 37
38struct channel_path_desc_fmt1 {
39 u8 flags;
40 u8 lsn;
41 u8 desc;
42 u8 chpid;
43 u32:24;
44 u8 chpp;
45 u32 unused[3];
46 u16 mdc;
47 u16:13;
48 u8 r:1;
49 u8 s:1;
50 u8 f:1;
51 u32 zeros[2];
52} __attribute__ ((packed));
53
38struct channel_path; 54struct channel_path;
39 55
40struct css_chsc_char { 56struct css_chsc_char {
@@ -57,23 +73,43 @@ struct chsc_ssd_info {
57 struct chp_id chpid[8]; 73 struct chp_id chpid[8];
58 u16 fla[8]; 74 u16 fla[8];
59}; 75};
76
77struct chsc_scpd {
78 struct chsc_header request;
79 u32:2;
80 u32 m:1;
81 u32 c:1;
82 u32 fmt:4;
83 u32 cssid:8;
84 u32:4;
85 u32 rfmt:4;
86 u32 first_chpid:8;
87 u32:24;
88 u32 last_chpid:8;
89 u32 zeroes1;
90 struct chsc_header response;
91 u8 data[PAGE_SIZE - 20];
92} __attribute__ ((packed));
93
94
60extern int chsc_get_ssd_info(struct subchannel_id schid, 95extern int chsc_get_ssd_info(struct subchannel_id schid,
61 struct chsc_ssd_info *ssd); 96 struct chsc_ssd_info *ssd);
62extern int chsc_determine_css_characteristics(void); 97extern int chsc_determine_css_characteristics(void);
63extern int chsc_alloc_sei_area(void); 98extern int chsc_init(void);
64extern void chsc_free_sei_area(void); 99extern void chsc_init_cleanup(void);
65 100
66extern int chsc_enable_facility(int); 101extern int chsc_enable_facility(int);
67struct channel_subsystem; 102struct channel_subsystem;
68extern int chsc_secm(struct channel_subsystem *, int); 103extern int chsc_secm(struct channel_subsystem *, int);
69int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page); 104int __chsc_do_secm(struct channel_subsystem *css, int enable);
70 105
71int chsc_chp_vary(struct chp_id chpid, int on); 106int chsc_chp_vary(struct chp_id chpid, int on);
72int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, 107int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt,
73 int c, int m, 108 int c, int m, void *page);
74 struct chsc_response_struct *resp);
75int chsc_determine_base_channel_path_desc(struct chp_id chpid, 109int chsc_determine_base_channel_path_desc(struct chp_id chpid,
76 struct channel_path_desc *desc); 110 struct channel_path_desc *desc);
111int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid,
112 struct channel_path_desc_fmt1 *desc);
77void chsc_chp_online(struct chp_id chpid); 113void chsc_chp_online(struct chp_id chpid);
78void chsc_chp_offline(struct chp_id chpid); 114void chsc_chp_offline(struct chp_id chpid);
79int chsc_get_channel_measurement_chars(struct channel_path *chp); 115int chsc_get_channel_measurement_chars(struct channel_path *chp);
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index a83877c664a6..e950f1ad4dd1 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -50,7 +50,7 @@ MODULE_LICENSE("GPL");
50 50
51static void chsc_subchannel_irq(struct subchannel *sch) 51static void chsc_subchannel_irq(struct subchannel *sch)
52{ 52{
53 struct chsc_private *private = sch->private; 53 struct chsc_private *private = dev_get_drvdata(&sch->dev);
54 struct chsc_request *request = private->request; 54 struct chsc_request *request = private->request;
55 struct irb *irb = (struct irb *)&S390_lowcore.irb; 55 struct irb *irb = (struct irb *)&S390_lowcore.irb;
56 56
@@ -80,13 +80,14 @@ static int chsc_subchannel_probe(struct subchannel *sch)
80 private = kzalloc(sizeof(*private), GFP_KERNEL); 80 private = kzalloc(sizeof(*private), GFP_KERNEL);
81 if (!private) 81 if (!private)
82 return -ENOMEM; 82 return -ENOMEM;
83 dev_set_drvdata(&sch->dev, private);
83 ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch); 84 ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
84 if (ret) { 85 if (ret) {
85 CHSC_MSG(0, "Failed to enable 0.%x.%04x: %d\n", 86 CHSC_MSG(0, "Failed to enable 0.%x.%04x: %d\n",
86 sch->schid.ssid, sch->schid.sch_no, ret); 87 sch->schid.ssid, sch->schid.sch_no, ret);
88 dev_set_drvdata(&sch->dev, NULL);
87 kfree(private); 89 kfree(private);
88 } else { 90 } else {
89 sch->private = private;
90 if (dev_get_uevent_suppress(&sch->dev)) { 91 if (dev_get_uevent_suppress(&sch->dev)) {
91 dev_set_uevent_suppress(&sch->dev, 0); 92 dev_set_uevent_suppress(&sch->dev, 0);
92 kobject_uevent(&sch->dev.kobj, KOBJ_ADD); 93 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
@@ -100,8 +101,8 @@ static int chsc_subchannel_remove(struct subchannel *sch)
100 struct chsc_private *private; 101 struct chsc_private *private;
101 102
102 cio_disable_subchannel(sch); 103 cio_disable_subchannel(sch);
103 private = sch->private; 104 private = dev_get_drvdata(&sch->dev);
104 sch->private = NULL; 105 dev_set_drvdata(&sch->dev, NULL);
105 if (private->request) { 106 if (private->request) {
106 complete(&private->request->completion); 107 complete(&private->request->completion);
107 put_device(&sch->dev); 108 put_device(&sch->dev);
@@ -147,7 +148,10 @@ static struct css_device_id chsc_subchannel_ids[] = {
147MODULE_DEVICE_TABLE(css, chsc_subchannel_ids); 148MODULE_DEVICE_TABLE(css, chsc_subchannel_ids);
148 149
149static struct css_driver chsc_subchannel_driver = { 150static struct css_driver chsc_subchannel_driver = {
150 .owner = THIS_MODULE, 151 .drv = {
152 .owner = THIS_MODULE,
153 .name = "chsc_subchannel",
154 },
151 .subchannel_type = chsc_subchannel_ids, 155 .subchannel_type = chsc_subchannel_ids,
152 .irq = chsc_subchannel_irq, 156 .irq = chsc_subchannel_irq,
153 .probe = chsc_subchannel_probe, 157 .probe = chsc_subchannel_probe,
@@ -157,7 +161,6 @@ static struct css_driver chsc_subchannel_driver = {
157 .freeze = chsc_subchannel_freeze, 161 .freeze = chsc_subchannel_freeze,
158 .thaw = chsc_subchannel_restore, 162 .thaw = chsc_subchannel_restore,
159 .restore = chsc_subchannel_restore, 163 .restore = chsc_subchannel_restore,
160 .name = "chsc_subchannel",
161}; 164};
162 165
163static int __init chsc_init_dbfs(void) 166static int __init chsc_init_dbfs(void)
@@ -241,7 +244,7 @@ static int chsc_async(struct chsc_async_area *chsc_area,
241 chsc_area->header.key = PAGE_DEFAULT_KEY >> 4; 244 chsc_area->header.key = PAGE_DEFAULT_KEY >> 4;
242 while ((sch = chsc_get_next_subchannel(sch))) { 245 while ((sch = chsc_get_next_subchannel(sch))) {
243 spin_lock(sch->lock); 246 spin_lock(sch->lock);
244 private = sch->private; 247 private = dev_get_drvdata(&sch->dev);
245 if (private->request) { 248 if (private->request) {
246 spin_unlock(sch->lock); 249 spin_unlock(sch->lock);
247 ret = -EBUSY; 250 ret = -EBUSY;
@@ -688,25 +691,31 @@ out_free:
688 691
689static int chsc_ioctl_chpd(void __user *user_chpd) 692static int chsc_ioctl_chpd(void __user *user_chpd)
690{ 693{
694 struct chsc_scpd *scpd_area;
691 struct chsc_cpd_info *chpd; 695 struct chsc_cpd_info *chpd;
692 int ret; 696 int ret;
693 697
694 chpd = kzalloc(sizeof(*chpd), GFP_KERNEL); 698 chpd = kzalloc(sizeof(*chpd), GFP_KERNEL);
695 if (!chpd) 699 scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
696 return -ENOMEM; 700 if (!scpd_area || !chpd) {
701 ret = -ENOMEM;
702 goto out_free;
703 }
697 if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) { 704 if (copy_from_user(chpd, user_chpd, sizeof(*chpd))) {
698 ret = -EFAULT; 705 ret = -EFAULT;
699 goto out_free; 706 goto out_free;
700 } 707 }
701 ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt, 708 ret = chsc_determine_channel_path_desc(chpd->chpid, chpd->fmt,
702 chpd->rfmt, chpd->c, chpd->m, 709 chpd->rfmt, chpd->c, chpd->m,
703 &chpd->chpdb); 710 scpd_area);
704 if (ret) 711 if (ret)
705 goto out_free; 712 goto out_free;
713 memcpy(&chpd->chpdb, &scpd_area->response, scpd_area->response.length);
706 if (copy_to_user(user_chpd, chpd, sizeof(*chpd))) 714 if (copy_to_user(user_chpd, chpd, sizeof(*chpd)))
707 ret = -EFAULT; 715 ret = -EFAULT;
708out_free: 716out_free:
709 kfree(chpd); 717 kfree(chpd);
718 free_page((unsigned long)scpd_area);
710 return ret; 719 return ret;
711} 720}
712 721
@@ -806,6 +815,7 @@ static const struct file_operations chsc_fops = {
806 .open = nonseekable_open, 815 .open = nonseekable_open,
807 .unlocked_ioctl = chsc_ioctl, 816 .unlocked_ioctl = chsc_ioctl,
808 .compat_ioctl = chsc_ioctl, 817 .compat_ioctl = chsc_ioctl,
818 .llseek = no_llseek,
809}; 819};
810 820
811static struct miscdevice chsc_misc_device = { 821static struct miscdevice chsc_misc_device = {
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index f4e6cf3aceb8..cbde448f9947 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -84,29 +84,14 @@ out_unregister:
84 84
85arch_initcall (cio_debug_init); 85arch_initcall (cio_debug_init);
86 86
87int 87int cio_set_options(struct subchannel *sch, int flags)
88cio_set_options (struct subchannel *sch, int flags)
89{ 88{
90 sch->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0; 89 struct io_subchannel_private *priv = to_io_private(sch);
91 sch->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0;
92 sch->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0;
93 return 0;
94}
95 90
96/* FIXME: who wants to use this? */ 91 priv->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0;
97int 92 priv->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0;
98cio_get_options (struct subchannel *sch) 93 priv->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0;
99{ 94 return 0;
100 int flags;
101
102 flags = 0;
103 if (sch->options.suspend)
104 flags |= DOIO_ALLOW_SUSPEND;
105 if (sch->options.prefetch)
106 flags |= DOIO_DENY_PREFETCH;
107 if (sch->options.inter)
108 flags |= DOIO_SUPPRESS_INTER;
109 return flags;
110} 95}
111 96
112static int 97static int
@@ -139,21 +124,21 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
139 __u8 lpm, /* logical path mask */ 124 __u8 lpm, /* logical path mask */
140 __u8 key) /* storage key */ 125 __u8 key) /* storage key */
141{ 126{
127 struct io_subchannel_private *priv = to_io_private(sch);
128 union orb *orb = &priv->orb;
142 int ccode; 129 int ccode;
143 union orb *orb;
144 130
145 CIO_TRACE_EVENT(5, "stIO"); 131 CIO_TRACE_EVENT(5, "stIO");
146 CIO_TRACE_EVENT(5, dev_name(&sch->dev)); 132 CIO_TRACE_EVENT(5, dev_name(&sch->dev));
147 133
148 orb = &to_io_private(sch)->orb;
149 memset(orb, 0, sizeof(union orb)); 134 memset(orb, 0, sizeof(union orb));
150 /* sch is always under 2G. */ 135 /* sch is always under 2G. */
151 orb->cmd.intparm = (u32)(addr_t)sch; 136 orb->cmd.intparm = (u32)(addr_t)sch;
152 orb->cmd.fmt = 1; 137 orb->cmd.fmt = 1;
153 138
154 orb->cmd.pfch = sch->options.prefetch == 0; 139 orb->cmd.pfch = priv->options.prefetch == 0;
155 orb->cmd.spnd = sch->options.suspend; 140 orb->cmd.spnd = priv->options.suspend;
156 orb->cmd.ssic = sch->options.suspend && sch->options.inter; 141 orb->cmd.ssic = priv->options.suspend && priv->options.inter;
157 orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm; 142 orb->cmd.lpm = (lpm != 0) ? lpm : sch->lpm;
158#ifdef CONFIG_64BIT 143#ifdef CONFIG_64BIT
159 /* 144 /*
@@ -619,7 +604,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
619 s390_idle_check(regs, S390_lowcore.int_clock, 604 s390_idle_check(regs, S390_lowcore.int_clock,
620 S390_lowcore.async_enter_timer); 605 S390_lowcore.async_enter_timer);
621 irq_enter(); 606 irq_enter();
622 __get_cpu_var(s390_idle).nohz_delay = 1; 607 __this_cpu_write(s390_idle.nohz_delay, 1);
623 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) 608 if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator)
624 /* Serve timer interrupts first. */ 609 /* Serve timer interrupts first. */
625 clock_comparator_work(); 610 clock_comparator_work();
@@ -630,11 +615,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
630 irb = (struct irb *)&S390_lowcore.irb; 615 irb = (struct irb *)&S390_lowcore.irb;
631 do { 616 do {
632 kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++; 617 kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
633 /* 618 if (tpi_info->adapter_IO) {
634 * Non I/O-subchannel thin interrupts are processed differently
635 */
636 if (tpi_info->adapter_IO == 1 &&
637 tpi_info->int_type == IO_INTERRUPT_TYPE) {
638 do_adapter_IO(tpi_info->isc); 619 do_adapter_IO(tpi_info->isc);
639 continue; 620 continue;
640 } 621 }
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h
index bf7f80f5a330..155a82bcb9e5 100644
--- a/drivers/s390/cio/cio.h
+++ b/drivers/s390/cio/cio.h
@@ -84,13 +84,6 @@ struct subchannel {
84 SUBCHANNEL_TYPE_MSG = 2, 84 SUBCHANNEL_TYPE_MSG = 2,
85 SUBCHANNEL_TYPE_ADM = 3, 85 SUBCHANNEL_TYPE_ADM = 3,
86 } st; /* subchannel type */ 86 } st; /* subchannel type */
87
88 struct {
89 unsigned int suspend:1; /* allow suspend */
90 unsigned int prefetch:1;/* deny prefetch */
91 unsigned int inter:1; /* suppress intermediate interrupts */
92 } __attribute__ ((packed)) options;
93
94 __u8 vpm; /* verified path mask */ 87 __u8 vpm; /* verified path mask */
95 __u8 lpm; /* logical path mask */ 88 __u8 lpm; /* logical path mask */
96 __u8 opm; /* operational path mask */ 89 __u8 opm; /* operational path mask */
@@ -99,14 +92,11 @@ struct subchannel {
99 struct chsc_ssd_info ssd_info; /* subchannel description */ 92 struct chsc_ssd_info ssd_info; /* subchannel description */
100 struct device dev; /* entry in device tree */ 93 struct device dev; /* entry in device tree */
101 struct css_driver *driver; 94 struct css_driver *driver;
102 void *private; /* private per subchannel type data */
103 enum sch_todo todo; 95 enum sch_todo todo;
104 struct work_struct todo_work; 96 struct work_struct todo_work;
105 struct schib_config config; 97 struct schib_config config;
106} __attribute__ ((aligned(8))); 98} __attribute__ ((aligned(8)));
107 99
108#define IO_INTERRUPT_TYPE 0 /* I/O interrupt type */
109
110#define to_subchannel(n) container_of(n, struct subchannel, dev) 100#define to_subchannel(n) container_of(n, struct subchannel, dev)
111 101
112extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id); 102extern int cio_validate_subchannel (struct subchannel *, struct subchannel_id);
@@ -120,7 +110,6 @@ extern int cio_start (struct subchannel *, struct ccw1 *, __u8);
120extern int cio_start_key (struct subchannel *, struct ccw1 *, __u8, __u8); 110extern int cio_start_key (struct subchannel *, struct ccw1 *, __u8, __u8);
121extern int cio_cancel (struct subchannel *); 111extern int cio_cancel (struct subchannel *);
122extern int cio_set_options (struct subchannel *, int); 112extern int cio_set_options (struct subchannel *, int);
123extern int cio_get_options (struct subchannel *);
124extern int cio_update_schib(struct subchannel *sch); 113extern int cio_update_schib(struct subchannel *sch);
125extern int cio_commit_config(struct subchannel *sch); 114extern int cio_commit_config(struct subchannel *sch);
126 115
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index ac94ac751459..c47b25fd3f43 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * driver for channel subsystem 2 * driver for channel subsystem
3 * 3 *
4 * Copyright IBM Corp. 2002, 2009 4 * Copyright IBM Corp. 2002, 2010
5 * 5 *
6 * Author(s): Arnd Bergmann (arndb@de.ibm.com) 6 * Author(s): Arnd Bergmann (arndb@de.ibm.com)
7 * Cornelia Huck (cornelia.huck@de.ibm.com) 7 * Cornelia Huck (cornelia.huck@de.ibm.com)
@@ -35,6 +35,7 @@ int css_init_done = 0;
35int max_ssid; 35int max_ssid;
36 36
37struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; 37struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
38static struct bus_type css_bus_type;
38 39
39int 40int
40for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) 41for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
@@ -577,7 +578,7 @@ static int __unset_registered(struct device *dev, void *data)
577 return 0; 578 return 0;
578} 579}
579 580
580void css_schedule_eval_all_unreg(void) 581static void css_schedule_eval_all_unreg(void)
581{ 582{
582 unsigned long flags; 583 unsigned long flags;
583 struct idset *unreg_set; 584 struct idset *unreg_set;
@@ -618,6 +619,7 @@ EXPORT_SYMBOL_GPL(css_schedule_reprobe);
618static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) 619static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
619{ 620{
620 struct subchannel_id mchk_schid; 621 struct subchannel_id mchk_schid;
622 struct subchannel *sch;
621 623
622 if (overflow) { 624 if (overflow) {
623 css_schedule_eval_all(); 625 css_schedule_eval_all();
@@ -635,8 +637,15 @@ static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow)
635 init_subchannel_id(&mchk_schid); 637 init_subchannel_id(&mchk_schid);
636 mchk_schid.sch_no = crw0->rsid; 638 mchk_schid.sch_no = crw0->rsid;
637 if (crw1) 639 if (crw1)
638 mchk_schid.ssid = (crw1->rsid >> 8) & 3; 640 mchk_schid.ssid = (crw1->rsid >> 4) & 3;
639 641
642 if (crw0->erc == CRW_ERC_PMOD) {
643 sch = get_subchannel_by_schid(mchk_schid);
644 if (sch) {
645 css_update_ssd_info(sch);
646 put_device(&sch->dev);
647 }
648 }
640 /* 649 /*
641 * Since we are always presented with IPI in the CRW, we have to 650 * Since we are always presented with IPI in the CRW, we have to
642 * use stsch() to find out if the subchannel in question has come 651 * use stsch() to find out if the subchannel in question has come
@@ -790,7 +799,6 @@ static struct notifier_block css_reboot_notifier = {
790static int css_power_event(struct notifier_block *this, unsigned long event, 799static int css_power_event(struct notifier_block *this, unsigned long event,
791 void *ptr) 800 void *ptr)
792{ 801{
793 void *secm_area;
794 int ret, i; 802 int ret, i;
795 803
796 switch (event) { 804 switch (event) {
@@ -806,15 +814,8 @@ static int css_power_event(struct notifier_block *this, unsigned long event,
806 mutex_unlock(&css->mutex); 814 mutex_unlock(&css->mutex);
807 continue; 815 continue;
808 } 816 }
809 secm_area = (void *)get_zeroed_page(GFP_KERNEL | 817 if (__chsc_do_secm(css, 0))
810 GFP_DMA);
811 if (secm_area) {
812 if (__chsc_do_secm(css, 0, secm_area))
813 ret = NOTIFY_BAD;
814 free_page((unsigned long)secm_area);
815 } else
816 ret = NOTIFY_BAD; 818 ret = NOTIFY_BAD;
817
818 mutex_unlock(&css->mutex); 819 mutex_unlock(&css->mutex);
819 } 820 }
820 break; 821 break;
@@ -830,15 +831,8 @@ static int css_power_event(struct notifier_block *this, unsigned long event,
830 mutex_unlock(&css->mutex); 831 mutex_unlock(&css->mutex);
831 continue; 832 continue;
832 } 833 }
833 secm_area = (void *)get_zeroed_page(GFP_KERNEL | 834 if (__chsc_do_secm(css, 1))
834 GFP_DMA);
835 if (secm_area) {
836 if (__chsc_do_secm(css, 1, secm_area))
837 ret = NOTIFY_BAD;
838 free_page((unsigned long)secm_area);
839 } else
840 ret = NOTIFY_BAD; 835 ret = NOTIFY_BAD;
841
842 mutex_unlock(&css->mutex); 836 mutex_unlock(&css->mutex);
843 } 837 }
844 /* search for subchannels, which appeared during hibernation */ 838 /* search for subchannels, which appeared during hibernation */
@@ -863,14 +857,11 @@ static int __init css_bus_init(void)
863{ 857{
864 int ret, i; 858 int ret, i;
865 859
866 ret = chsc_determine_css_characteristics(); 860 ret = chsc_init();
867 if (ret == -ENOMEM)
868 goto out;
869
870 ret = chsc_alloc_sei_area();
871 if (ret) 861 if (ret)
872 goto out; 862 return ret;
873 863
864 chsc_determine_css_characteristics();
874 /* Try to enable MSS. */ 865 /* Try to enable MSS. */
875 ret = chsc_enable_facility(CHSC_SDA_OC_MSS); 866 ret = chsc_enable_facility(CHSC_SDA_OC_MSS);
876 if (ret) 867 if (ret)
@@ -956,9 +947,9 @@ out_unregister:
956 } 947 }
957 bus_unregister(&css_bus_type); 948 bus_unregister(&css_bus_type);
958out: 949out:
959 crw_unregister_handler(CRW_RSC_CSS); 950 crw_unregister_handler(CRW_RSC_SCH);
960 chsc_free_sei_area();
961 idset_free(slow_subchannel_set); 951 idset_free(slow_subchannel_set);
952 chsc_init_cleanup();
962 pr_alert("The CSS device driver initialization failed with " 953 pr_alert("The CSS device driver initialization failed with "
963 "errno=%d\n", ret); 954 "errno=%d\n", ret);
964 return ret; 955 return ret;
@@ -978,9 +969,9 @@ static void __init css_bus_cleanup(void)
978 device_unregister(&css->device); 969 device_unregister(&css->device);
979 } 970 }
980 bus_unregister(&css_bus_type); 971 bus_unregister(&css_bus_type);
981 crw_unregister_handler(CRW_RSC_CSS); 972 crw_unregister_handler(CRW_RSC_SCH);
982 chsc_free_sei_area();
983 idset_free(slow_subchannel_set); 973 idset_free(slow_subchannel_set);
974 chsc_init_cleanup();
984 isc_unregister(IO_SCH_ISC); 975 isc_unregister(IO_SCH_ISC);
985} 976}
986 977
@@ -1048,7 +1039,16 @@ subsys_initcall_sync(channel_subsystem_init_sync);
1048 1039
1049void channel_subsystem_reinit(void) 1040void channel_subsystem_reinit(void)
1050{ 1041{
1042 struct channel_path *chp;
1043 struct chp_id chpid;
1044
1051 chsc_enable_facility(CHSC_SDA_OC_MSS); 1045 chsc_enable_facility(CHSC_SDA_OC_MSS);
1046 chp_id_for_each(&chpid) {
1047 chp = chpid_to_chp(chpid);
1048 if (!chp)
1049 continue;
1050 chsc_determine_base_channel_path_desc(chpid, &chp->desc);
1051 }
1052} 1052}
1053 1053
1054#ifdef CONFIG_PROC_FS 1054#ifdef CONFIG_PROC_FS
@@ -1067,6 +1067,7 @@ static ssize_t cio_settle_write(struct file *file, const char __user *buf,
1067static const struct file_operations cio_settle_proc_fops = { 1067static const struct file_operations cio_settle_proc_fops = {
1068 .open = nonseekable_open, 1068 .open = nonseekable_open,
1069 .write = cio_settle_write, 1069 .write = cio_settle_write,
1070 .llseek = no_llseek,
1070}; 1071};
1071 1072
1072static int __init cio_settle_init(void) 1073static int __init cio_settle_init(void)
@@ -1199,6 +1200,7 @@ static int css_pm_restore(struct device *dev)
1199 struct subchannel *sch = to_subchannel(dev); 1200 struct subchannel *sch = to_subchannel(dev);
1200 struct css_driver *drv; 1201 struct css_driver *drv;
1201 1202
1203 css_update_ssd_info(sch);
1202 if (!sch->dev.driver) 1204 if (!sch->dev.driver)
1203 return 0; 1205 return 0;
1204 drv = to_cssdriver(sch->dev.driver); 1206 drv = to_cssdriver(sch->dev.driver);
@@ -1213,7 +1215,7 @@ static const struct dev_pm_ops css_pm_ops = {
1213 .restore = css_pm_restore, 1215 .restore = css_pm_restore,
1214}; 1216};
1215 1217
1216struct bus_type css_bus_type = { 1218static struct bus_type css_bus_type = {
1217 .name = "css", 1219 .name = "css",
1218 .match = css_bus_match, 1220 .match = css_bus_match,
1219 .probe = css_probe, 1221 .probe = css_probe,
@@ -1232,9 +1234,7 @@ struct bus_type css_bus_type = {
1232 */ 1234 */
1233int css_driver_register(struct css_driver *cdrv) 1235int css_driver_register(struct css_driver *cdrv)
1234{ 1236{
1235 cdrv->drv.name = cdrv->name;
1236 cdrv->drv.bus = &css_bus_type; 1237 cdrv->drv.bus = &css_bus_type;
1237 cdrv->drv.owner = cdrv->owner;
1238 return driver_register(&cdrv->drv); 1238 return driver_register(&cdrv->drv);
1239} 1239}
1240EXPORT_SYMBOL_GPL(css_driver_register); 1240EXPORT_SYMBOL_GPL(css_driver_register);
@@ -1252,4 +1252,3 @@ void css_driver_unregister(struct css_driver *cdrv)
1252EXPORT_SYMBOL_GPL(css_driver_unregister); 1252EXPORT_SYMBOL_GPL(css_driver_unregister);
1253 1253
1254MODULE_LICENSE("GPL"); 1254MODULE_LICENSE("GPL");
1255EXPORT_SYMBOL(css_bus_type);
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index 7e37886de231..80ebdddf7747 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -63,7 +63,6 @@ struct subchannel;
63struct chp_link; 63struct chp_link;
64/** 64/**
65 * struct css_driver - device driver for subchannels 65 * struct css_driver - device driver for subchannels
66 * @owner: owning module
67 * @subchannel_type: subchannel type supported by this driver 66 * @subchannel_type: subchannel type supported by this driver
68 * @drv: embedded device driver structure 67 * @drv: embedded device driver structure
69 * @irq: called on interrupts 68 * @irq: called on interrupts
@@ -78,10 +77,8 @@ struct chp_link;
78 * @thaw: undo work done in @freeze 77 * @thaw: undo work done in @freeze
79 * @restore: callback for restoring after hibernation 78 * @restore: callback for restoring after hibernation
80 * @settle: wait for asynchronous work to finish 79 * @settle: wait for asynchronous work to finish
81 * @name: name of the device driver
82 */ 80 */
83struct css_driver { 81struct css_driver {
84 struct module *owner;
85 struct css_device_id *subchannel_type; 82 struct css_device_id *subchannel_type;
86 struct device_driver drv; 83 struct device_driver drv;
87 void (*irq)(struct subchannel *); 84 void (*irq)(struct subchannel *);
@@ -96,16 +93,10 @@ struct css_driver {
96 int (*thaw) (struct subchannel *); 93 int (*thaw) (struct subchannel *);
97 int (*restore)(struct subchannel *); 94 int (*restore)(struct subchannel *);
98 int (*settle)(void); 95 int (*settle)(void);
99 const char *name;
100}; 96};
101 97
102#define to_cssdriver(n) container_of(n, struct css_driver, drv) 98#define to_cssdriver(n) container_of(n, struct css_driver, drv)
103 99
104/*
105 * all css_drivers have the css_bus_type
106 */
107extern struct bus_type css_bus_type;
108
109extern int css_driver_register(struct css_driver *); 100extern int css_driver_register(struct css_driver *);
110extern void css_driver_unregister(struct css_driver *); 101extern void css_driver_unregister(struct css_driver *);
111 102
@@ -140,7 +131,6 @@ struct channel_subsystem {
140}; 131};
141#define to_css(dev) container_of(dev, struct channel_subsystem, device) 132#define to_css(dev) container_of(dev, struct channel_subsystem, device)
142 133
143extern struct bus_type css_bus_type;
144extern struct channel_subsystem *channel_subsystems[]; 134extern struct channel_subsystem *channel_subsystems[];
145 135
146/* Helper functions to build lists for the slow path. */ 136/* Helper functions to build lists for the slow path. */
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 51bd3687d163..8e04c00cf0ad 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -127,7 +127,7 @@ static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env)
127 return ret; 127 return ret;
128} 128}
129 129
130struct bus_type ccw_bus_type; 130static struct bus_type ccw_bus_type;
131 131
132static void io_subchannel_irq(struct subchannel *); 132static void io_subchannel_irq(struct subchannel *);
133static int io_subchannel_probe(struct subchannel *); 133static int io_subchannel_probe(struct subchannel *);
@@ -172,9 +172,11 @@ static int io_subchannel_settle(void)
172} 172}
173 173
174static struct css_driver io_subchannel_driver = { 174static struct css_driver io_subchannel_driver = {
175 .owner = THIS_MODULE, 175 .drv = {
176 .owner = THIS_MODULE,
177 .name = "io_subchannel",
178 },
176 .subchannel_type = io_subchannel_ids, 179 .subchannel_type = io_subchannel_ids,
177 .name = "io_subchannel",
178 .irq = io_subchannel_irq, 180 .irq = io_subchannel_irq,
179 .sch_event = io_subchannel_sch_event, 181 .sch_event = io_subchannel_sch_event,
180 .chp_event = io_subchannel_chp_event, 182 .chp_event = io_subchannel_chp_event,
@@ -539,15 +541,24 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
539 int force, ret; 541 int force, ret;
540 unsigned long i; 542 unsigned long i;
541 543
542 if (!dev_fsm_final_state(cdev) && 544 /* Prevent conflict between multiple on-/offline processing requests. */
543 cdev->private->state != DEV_STATE_DISCONNECTED)
544 return -EAGAIN;
545 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0) 545 if (atomic_cmpxchg(&cdev->private->onoff, 0, 1) != 0)
546 return -EAGAIN; 546 return -EAGAIN;
547 /* Prevent conflict between internal I/Os and on-/offline processing. */
548 if (!dev_fsm_final_state(cdev) &&
549 cdev->private->state != DEV_STATE_DISCONNECTED) {
550 ret = -EAGAIN;
551 goto out_onoff;
552 }
553 /* Prevent conflict between pending work and on-/offline processing.*/
554 if (work_pending(&cdev->private->todo_work)) {
555 ret = -EAGAIN;
556 goto out_onoff;
557 }
547 558
548 if (cdev->drv && !try_module_get(cdev->drv->owner)) { 559 if (cdev->drv && !try_module_get(cdev->drv->driver.owner)) {
549 atomic_set(&cdev->private->onoff, 0); 560 ret = -EINVAL;
550 return -EINVAL; 561 goto out_onoff;
551 } 562 }
552 if (!strncmp(buf, "force\n", count)) { 563 if (!strncmp(buf, "force\n", count)) {
553 force = 1; 564 force = 1;
@@ -571,7 +582,8 @@ static ssize_t online_store (struct device *dev, struct device_attribute *attr,
571 } 582 }
572out: 583out:
573 if (cdev->drv) 584 if (cdev->drv)
574 module_put(cdev->drv->owner); 585 module_put(cdev->drv->driver.owner);
586out_onoff:
575 atomic_set(&cdev->private->onoff, 0); 587 atomic_set(&cdev->private->onoff, 0);
576 return (ret < 0) ? ret : count; 588 return (ret < 0) ? ret : count;
577} 589}
@@ -1030,6 +1042,7 @@ static void io_subchannel_init_fields(struct subchannel *sch)
1030 */ 1042 */
1031static int io_subchannel_probe(struct subchannel *sch) 1043static int io_subchannel_probe(struct subchannel *sch)
1032{ 1044{
1045 struct io_subchannel_private *io_priv;
1033 struct ccw_device *cdev; 1046 struct ccw_device *cdev;
1034 int rc; 1047 int rc;
1035 1048
@@ -1073,10 +1086,11 @@ static int io_subchannel_probe(struct subchannel *sch)
1073 if (rc) 1086 if (rc)
1074 goto out_schedule; 1087 goto out_schedule;
1075 /* Allocate I/O subchannel private data. */ 1088 /* Allocate I/O subchannel private data. */
1076 sch->private = kzalloc(sizeof(struct io_subchannel_private), 1089 io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA);
1077 GFP_KERNEL | GFP_DMA); 1090 if (!io_priv)
1078 if (!sch->private)
1079 goto out_schedule; 1091 goto out_schedule;
1092
1093 set_io_private(sch, io_priv);
1080 css_schedule_eval(sch->schid); 1094 css_schedule_eval(sch->schid);
1081 return 0; 1095 return 0;
1082 1096
@@ -1090,6 +1104,7 @@ out_schedule:
1090static int 1104static int
1091io_subchannel_remove (struct subchannel *sch) 1105io_subchannel_remove (struct subchannel *sch)
1092{ 1106{
1107 struct io_subchannel_private *io_priv = to_io_private(sch);
1093 struct ccw_device *cdev; 1108 struct ccw_device *cdev;
1094 1109
1095 cdev = sch_get_cdev(sch); 1110 cdev = sch_get_cdev(sch);
@@ -1099,11 +1114,12 @@ io_subchannel_remove (struct subchannel *sch)
1099 /* Set ccw device to not operational and drop reference. */ 1114 /* Set ccw device to not operational and drop reference. */
1100 spin_lock_irq(cdev->ccwlock); 1115 spin_lock_irq(cdev->ccwlock);
1101 sch_set_cdev(sch, NULL); 1116 sch_set_cdev(sch, NULL);
1117 set_io_private(sch, NULL);
1102 cdev->private->state = DEV_STATE_NOT_OPER; 1118 cdev->private->state = DEV_STATE_NOT_OPER;
1103 spin_unlock_irq(cdev->ccwlock); 1119 spin_unlock_irq(cdev->ccwlock);
1104 ccw_device_unregister(cdev); 1120 ccw_device_unregister(cdev);
1105out_free: 1121out_free:
1106 kfree(sch->private); 1122 kfree(io_priv);
1107 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); 1123 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1108 return 0; 1124 return 0;
1109} 1125}
@@ -1147,6 +1163,7 @@ err:
1147static int io_subchannel_chp_event(struct subchannel *sch, 1163static int io_subchannel_chp_event(struct subchannel *sch,
1148 struct chp_link *link, int event) 1164 struct chp_link *link, int event)
1149{ 1165{
1166 struct ccw_device *cdev = sch_get_cdev(sch);
1150 int mask; 1167 int mask;
1151 1168
1152 mask = chp_ssd_get_mask(&sch->ssd_info, link); 1169 mask = chp_ssd_get_mask(&sch->ssd_info, link);
@@ -1156,22 +1173,30 @@ static int io_subchannel_chp_event(struct subchannel *sch,
1156 case CHP_VARY_OFF: 1173 case CHP_VARY_OFF:
1157 sch->opm &= ~mask; 1174 sch->opm &= ~mask;
1158 sch->lpm &= ~mask; 1175 sch->lpm &= ~mask;
1176 if (cdev)
1177 cdev->private->path_gone_mask |= mask;
1159 io_subchannel_terminate_path(sch, mask); 1178 io_subchannel_terminate_path(sch, mask);
1160 break; 1179 break;
1161 case CHP_VARY_ON: 1180 case CHP_VARY_ON:
1162 sch->opm |= mask; 1181 sch->opm |= mask;
1163 sch->lpm |= mask; 1182 sch->lpm |= mask;
1183 if (cdev)
1184 cdev->private->path_new_mask |= mask;
1164 io_subchannel_verify(sch); 1185 io_subchannel_verify(sch);
1165 break; 1186 break;
1166 case CHP_OFFLINE: 1187 case CHP_OFFLINE:
1167 if (cio_update_schib(sch)) 1188 if (cio_update_schib(sch))
1168 return -ENODEV; 1189 return -ENODEV;
1190 if (cdev)
1191 cdev->private->path_gone_mask |= mask;
1169 io_subchannel_terminate_path(sch, mask); 1192 io_subchannel_terminate_path(sch, mask);
1170 break; 1193 break;
1171 case CHP_ONLINE: 1194 case CHP_ONLINE:
1172 if (cio_update_schib(sch)) 1195 if (cio_update_schib(sch))
1173 return -ENODEV; 1196 return -ENODEV;
1174 sch->lpm |= mask & sch->opm; 1197 sch->lpm |= mask & sch->opm;
1198 if (cdev)
1199 cdev->private->path_new_mask |= mask;
1175 io_subchannel_verify(sch); 1200 io_subchannel_verify(sch);
1176 break; 1201 break;
1177 } 1202 }
@@ -1196,6 +1221,7 @@ static void io_subchannel_quiesce(struct subchannel *sch)
1196 cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO)); 1221 cdev->handler(cdev, cdev->private->intparm, ERR_PTR(-EIO));
1197 while (ret == -EBUSY) { 1222 while (ret == -EBUSY) {
1198 cdev->private->state = DEV_STATE_QUIESCE; 1223 cdev->private->state = DEV_STATE_QUIESCE;
1224 cdev->private->iretry = 255;
1199 ret = ccw_device_cancel_halt_clear(cdev); 1225 ret = ccw_device_cancel_halt_clear(cdev);
1200 if (ret == -EBUSY) { 1226 if (ret == -EBUSY) {
1201 ccw_device_set_timeout(cdev, HZ/10); 1227 ccw_device_set_timeout(cdev, HZ/10);
@@ -1295,10 +1321,12 @@ static int purge_fn(struct device *dev, void *data)
1295 1321
1296 spin_lock_irq(cdev->ccwlock); 1322 spin_lock_irq(cdev->ccwlock);
1297 if (is_blacklisted(id->ssid, id->devno) && 1323 if (is_blacklisted(id->ssid, id->devno) &&
1298 (cdev->private->state == DEV_STATE_OFFLINE)) { 1324 (cdev->private->state == DEV_STATE_OFFLINE) &&
1325 (atomic_cmpxchg(&cdev->private->onoff, 0, 1) == 0)) {
1299 CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid, 1326 CIO_MSG_EVENT(3, "ccw: purging 0.%x.%04x\n", id->ssid,
1300 id->devno); 1327 id->devno);
1301 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 1328 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1329 atomic_set(&cdev->private->onoff, 0);
1302 } 1330 }
1303 spin_unlock_irq(cdev->ccwlock); 1331 spin_unlock_irq(cdev->ccwlock);
1304 /* Abort loop in case of pending signal. */ 1332 /* Abort loop in case of pending signal. */
@@ -1445,7 +1473,16 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
1445 break; 1473 break;
1446 case IO_SCH_UNREG_ATTACH: 1474 case IO_SCH_UNREG_ATTACH:
1447 case IO_SCH_UNREG: 1475 case IO_SCH_UNREG:
1448 if (cdev) 1476 if (!cdev)
1477 break;
1478 if (cdev->private->state == DEV_STATE_SENSE_ID) {
1479 /*
1480 * Note: delayed work triggered by this event
1481 * and repeated calls to sch_event are synchronized
1482 * by the above check for work_pending(cdev).
1483 */
1484 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
1485 } else
1449 ccw_device_set_notoper(cdev); 1486 ccw_device_set_notoper(cdev);
1450 break; 1487 break;
1451 case IO_SCH_NOP: 1488 case IO_SCH_NOP:
@@ -1468,9 +1505,13 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
1468 goto out; 1505 goto out;
1469 break; 1506 break;
1470 case IO_SCH_UNREG_ATTACH: 1507 case IO_SCH_UNREG_ATTACH:
1508 if (cdev->private->flags.resuming) {
1509 /* Device will be handled later. */
1510 rc = 0;
1511 goto out;
1512 }
1471 /* Unregister ccw device. */ 1513 /* Unregister ccw device. */
1472 if (!cdev->private->flags.resuming) 1514 ccw_device_unregister(cdev);
1473 ccw_device_unregister(cdev);
1474 break; 1515 break;
1475 default: 1516 default:
1476 break; 1517 break;
@@ -1530,11 +1571,12 @@ spinlock_t * cio_get_console_lock(void)
1530static int ccw_device_console_enable(struct ccw_device *cdev, 1571static int ccw_device_console_enable(struct ccw_device *cdev,
1531 struct subchannel *sch) 1572 struct subchannel *sch)
1532{ 1573{
1574 struct io_subchannel_private *io_priv = cio_get_console_priv();
1533 int rc; 1575 int rc;
1534 1576
1535 /* Attach subchannel private data. */ 1577 /* Attach subchannel private data. */
1536 sch->private = cio_get_console_priv(); 1578 memset(io_priv, 0, sizeof(*io_priv));
1537 memset(sch->private, 0, sizeof(struct io_subchannel_private)); 1579 set_io_private(sch, io_priv);
1538 io_subchannel_init_fields(sch); 1580 io_subchannel_init_fields(sch);
1539 rc = cio_commit_config(sch); 1581 rc = cio_commit_config(sch);
1540 if (rc) 1582 if (rc)
@@ -1812,6 +1854,7 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev)
1812 * available again. Kick re-detection. 1854 * available again. Kick re-detection.
1813 */ 1855 */
1814 cdev->private->flags.resuming = 1; 1856 cdev->private->flags.resuming = 1;
1857 cdev->private->path_new_mask = LPM_ANYPATH;
1815 css_schedule_eval(sch->schid); 1858 css_schedule_eval(sch->schid);
1816 spin_unlock_irq(sch->lock); 1859 spin_unlock_irq(sch->lock);
1817 css_complete_work(); 1860 css_complete_work();
@@ -1939,7 +1982,7 @@ static const struct dev_pm_ops ccw_pm_ops = {
1939 .restore = ccw_device_pm_restore, 1982 .restore = ccw_device_pm_restore,
1940}; 1983};
1941 1984
1942struct bus_type ccw_bus_type = { 1985static struct bus_type ccw_bus_type = {
1943 .name = "ccw", 1986 .name = "ccw",
1944 .match = ccw_bus_match, 1987 .match = ccw_bus_match,
1945 .uevent = ccw_uevent, 1988 .uevent = ccw_uevent,
@@ -1962,8 +2005,6 @@ int ccw_driver_register(struct ccw_driver *cdriver)
1962 struct device_driver *drv = &cdriver->driver; 2005 struct device_driver *drv = &cdriver->driver;
1963 2006
1964 drv->bus = &ccw_bus_type; 2007 drv->bus = &ccw_bus_type;
1965 drv->name = cdriver->name;
1966 drv->owner = cdriver->owner;
1967 2008
1968 return driver_register(drv); 2009 return driver_register(drv);
1969} 2010}
@@ -2081,5 +2122,4 @@ EXPORT_SYMBOL(ccw_device_set_offline);
2081EXPORT_SYMBOL(ccw_driver_register); 2122EXPORT_SYMBOL(ccw_driver_register);
2082EXPORT_SYMBOL(ccw_driver_unregister); 2123EXPORT_SYMBOL(ccw_driver_unregister);
2083EXPORT_SYMBOL(get_ccwdev_by_busid); 2124EXPORT_SYMBOL(get_ccwdev_by_busid);
2084EXPORT_SYMBOL(ccw_bus_type);
2085EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id); 2125EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index 379de2d1ec49..7e297c7bb5ff 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -133,7 +133,6 @@ void ccw_device_set_notoper(struct ccw_device *cdev);
133/* qdio needs this. */ 133/* qdio needs this. */
134void ccw_device_set_timeout(struct ccw_device *, int); 134void ccw_device_set_timeout(struct ccw_device *, int);
135extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *); 135extern struct subchannel_id ccw_device_get_subchannel_id(struct ccw_device *);
136extern struct bus_type ccw_bus_type;
137 136
138/* Channel measurement facility related */ 137/* Channel measurement facility related */
139void retry_set_schib(struct ccw_device *cdev); 138void retry_set_schib(struct ccw_device *cdev);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index c9b852647f01..52c233fa2b12 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -174,7 +174,10 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev)
174 ret = cio_clear (sch); 174 ret = cio_clear (sch);
175 return (ret == 0) ? -EBUSY : ret; 175 return (ret == 0) ? -EBUSY : ret;
176 } 176 }
177 panic("Can't stop i/o on subchannel.\n"); 177 /* Function was unsuccessful */
178 CIO_MSG_EVENT(0, "0.%x.%04x: could not stop I/O\n",
179 cdev->private->dev_id.ssid, cdev->private->dev_id.devno);
180 return -EIO;
178} 181}
179 182
180void ccw_device_update_sense_data(struct ccw_device *cdev) 183void ccw_device_update_sense_data(struct ccw_device *cdev)
@@ -315,7 +318,7 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err)
315 318
316/** 319/**
317 * ccw_device_notify() - inform the device's driver about an event 320 * ccw_device_notify() - inform the device's driver about an event
318 * @cdev: device for which an event occured 321 * @cdev: device for which an event occurred
319 * @event: event that occurred 322 * @event: event that occurred
320 * 323 *
321 * Returns: 324 * Returns:
@@ -349,9 +352,13 @@ out:
349 352
350static void ccw_device_oper_notify(struct ccw_device *cdev) 353static void ccw_device_oper_notify(struct ccw_device *cdev)
351{ 354{
355 struct subchannel *sch = to_subchannel(cdev->dev.parent);
356
352 if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) { 357 if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) {
353 /* Reenable channel measurements, if needed. */ 358 /* Reenable channel measurements, if needed. */
354 ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF); 359 ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF);
360 /* Save indication for new paths. */
361 cdev->private->path_new_mask = sch->vpm;
355 return; 362 return;
356 } 363 }
357 /* Driver doesn't want device back. */ 364 /* Driver doesn't want device back. */
@@ -401,9 +408,10 @@ ccw_device_done(struct ccw_device *cdev, int state)
401 CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel " 408 CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel "
402 "%04x\n", cdev->private->dev_id.devno, 409 "%04x\n", cdev->private->dev_id.devno,
403 sch->schid.sch_no); 410 sch->schid.sch_no);
404 if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) 411 if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK) {
412 cdev->private->state = DEV_STATE_NOT_OPER;
405 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 413 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
406 else 414 } else
407 ccw_device_set_disconnected(cdev); 415 ccw_device_set_disconnected(cdev);
408 cdev->private->flags.donotify = 0; 416 cdev->private->flags.donotify = 0;
409 break; 417 break;
@@ -462,6 +470,32 @@ static void ccw_device_request_event(struct ccw_device *cdev, enum dev_event e)
462 } 470 }
463} 471}
464 472
473static void ccw_device_report_path_events(struct ccw_device *cdev)
474{
475 struct subchannel *sch = to_subchannel(cdev->dev.parent);
476 int path_event[8];
477 int chp, mask;
478
479 for (chp = 0, mask = 0x80; chp < 8; chp++, mask >>= 1) {
480 path_event[chp] = PE_NONE;
481 if (mask & cdev->private->path_gone_mask & ~(sch->vpm))
482 path_event[chp] |= PE_PATH_GONE;
483 if (mask & cdev->private->path_new_mask & sch->vpm)
484 path_event[chp] |= PE_PATH_AVAILABLE;
485 if (mask & cdev->private->pgid_reset_mask & sch->vpm)
486 path_event[chp] |= PE_PATHGROUP_ESTABLISHED;
487 }
488 if (cdev->online && cdev->drv->path_event)
489 cdev->drv->path_event(cdev, path_event);
490}
491
492static void ccw_device_reset_path_events(struct ccw_device *cdev)
493{
494 cdev->private->path_gone_mask = 0;
495 cdev->private->path_new_mask = 0;
496 cdev->private->pgid_reset_mask = 0;
497}
498
465void 499void
466ccw_device_verify_done(struct ccw_device *cdev, int err) 500ccw_device_verify_done(struct ccw_device *cdev, int err)
467{ 501{
@@ -498,6 +532,7 @@ callback:
498 &cdev->private->irb); 532 &cdev->private->irb);
499 memset(&cdev->private->irb, 0, sizeof(struct irb)); 533 memset(&cdev->private->irb, 0, sizeof(struct irb));
500 } 534 }
535 ccw_device_report_path_events(cdev);
501 break; 536 break;
502 case -ETIME: 537 case -ETIME:
503 case -EUSERS: 538 case -EUSERS:
@@ -516,6 +551,7 @@ callback:
516 ccw_device_done(cdev, DEV_STATE_NOT_OPER); 551 ccw_device_done(cdev, DEV_STATE_NOT_OPER);
517 break; 552 break;
518 } 553 }
554 ccw_device_reset_path_events(cdev);
519} 555}
520 556
521/* 557/*
@@ -653,7 +689,7 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
653 (scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) { 689 (scsw_stctl(&cdev->private->irb.scsw) & SCSW_STCTL_STATUS_PEND)) {
654 /* 690 /*
655 * No final status yet or final status not yet delivered 691 * No final status yet or final status not yet delivered
656 * to the device driver. Can't do path verfication now, 692 * to the device driver. Can't do path verification now,
657 * delay until final status was delivered. 693 * delay until final status was delivered.
658 */ 694 */
659 cdev->private->flags.doverify = 1; 695 cdev->private->flags.doverify = 1;
@@ -734,13 +770,14 @@ ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event)
734 int ret; 770 int ret;
735 771
736 ccw_device_set_timeout(cdev, 0); 772 ccw_device_set_timeout(cdev, 0);
773 cdev->private->iretry = 255;
737 ret = ccw_device_cancel_halt_clear(cdev); 774 ret = ccw_device_cancel_halt_clear(cdev);
738 if (ret == -EBUSY) { 775 if (ret == -EBUSY) {
739 ccw_device_set_timeout(cdev, 3*HZ); 776 ccw_device_set_timeout(cdev, 3*HZ);
740 cdev->private->state = DEV_STATE_TIMEOUT_KILL; 777 cdev->private->state = DEV_STATE_TIMEOUT_KILL;
741 return; 778 return;
742 } 779 }
743 if (ret == -ENODEV) 780 if (ret)
744 dev_fsm_event(cdev, DEV_EVENT_NOTOPER); 781 dev_fsm_event(cdev, DEV_EVENT_NOTOPER);
745 else if (cdev->handler) 782 else if (cdev->handler)
746 cdev->handler(cdev, cdev->private->intparm, 783 cdev->handler(cdev, cdev->private->intparm,
@@ -804,9 +841,6 @@ call_handler:
804static void 841static void
805ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event) 842ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event)
806{ 843{
807 struct subchannel *sch;
808
809 sch = to_subchannel(cdev->dev.parent);
810 ccw_device_set_timeout(cdev, 0); 844 ccw_device_set_timeout(cdev, 0);
811 /* Start delayed path verification. */ 845 /* Start delayed path verification. */
812 ccw_device_online_verify(cdev, 0); 846 ccw_device_online_verify(cdev, 0);
@@ -837,6 +871,7 @@ void ccw_device_kill_io(struct ccw_device *cdev)
837{ 871{
838 int ret; 872 int ret;
839 873
874 cdev->private->iretry = 255;
840 ret = ccw_device_cancel_halt_clear(cdev); 875 ret = ccw_device_cancel_halt_clear(cdev);
841 if (ret == -EBUSY) { 876 if (ret == -EBUSY) {
842 ccw_device_set_timeout(cdev, 3*HZ); 877 ccw_device_set_timeout(cdev, 3*HZ);
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 6da84543dfe9..f98698d5735e 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -418,12 +418,9 @@ int ccw_device_resume(struct ccw_device *cdev)
418int 418int
419ccw_device_call_handler(struct ccw_device *cdev) 419ccw_device_call_handler(struct ccw_device *cdev)
420{ 420{
421 struct subchannel *sch;
422 unsigned int stctl; 421 unsigned int stctl;
423 int ending_status; 422 int ending_status;
424 423
425 sch = to_subchannel(cdev->dev.parent);
426
427 /* 424 /*
428 * we allow for the device action handler if . 425 * we allow for the device action handler if .
429 * - we received ending status 426 * - we received ending status
@@ -687,6 +684,46 @@ int ccw_device_tm_start_timeout(struct ccw_device *cdev, struct tcw *tcw,
687EXPORT_SYMBOL(ccw_device_tm_start_timeout); 684EXPORT_SYMBOL(ccw_device_tm_start_timeout);
688 685
689/** 686/**
687 * ccw_device_get_mdc - accumulate max data count
688 * @cdev: ccw device for which the max data count is accumulated
689 * @mask: mask of paths to use
690 *
691 * Return the number of 64K-bytes blocks all paths at least support
692 * for a transport command. Return values <= 0 indicate failures.
693 */
694int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask)
695{
696 struct subchannel *sch = to_subchannel(cdev->dev.parent);
697 struct channel_path_desc_fmt1 desc;
698 struct chp_id chpid;
699 int mdc = 0, ret, i;
700
701 /* Adjust requested path mask to excluded varied off paths. */
702 if (mask)
703 mask &= sch->lpm;
704 else
705 mask = sch->lpm;
706
707 chp_id_init(&chpid);
708 for (i = 0; i < 8; i++) {
709 if (!(mask & (0x80 >> i)))
710 continue;
711 chpid.id = sch->schib.pmcw.chpid[i];
712 ret = chsc_determine_fmt1_channel_path_desc(chpid, &desc);
713 if (ret)
714 return ret;
715 if (!desc.f)
716 return 0;
717 if (!desc.r)
718 mdc = 1;
719 mdc = mdc ? min(mdc, (int)desc.mdc) : desc.mdc;
720 }
721
722 return mdc;
723}
724EXPORT_SYMBOL(ccw_device_get_mdc);
725
726/**
690 * ccw_device_tm_intrg - perform interrogate function 727 * ccw_device_tm_intrg - perform interrogate function
691 * @cdev: ccw device on which to perform the interrogate function 728 * @cdev: ccw device on which to perform the interrogate function
692 * 729 *
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 82a5ad0d63f6..07a4fd29f096 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -213,6 +213,17 @@ static void spid_start(struct ccw_device *cdev)
213 spid_do(cdev); 213 spid_do(cdev);
214} 214}
215 215
216static int pgid_is_reset(struct pgid *p)
217{
218 char *c;
219
220 for (c = (char *)p + 1; c < (char *)(p + 1); c++) {
221 if (*c != 0)
222 return 0;
223 }
224 return 1;
225}
226
216static int pgid_cmp(struct pgid *p1, struct pgid *p2) 227static int pgid_cmp(struct pgid *p1, struct pgid *p2)
217{ 228{
218 return memcmp((char *) p1 + 1, (char *) p2 + 1, 229 return memcmp((char *) p1 + 1, (char *) p2 + 1,
@@ -223,7 +234,7 @@ static int pgid_cmp(struct pgid *p1, struct pgid *p2)
223 * Determine pathgroup state from PGID data. 234 * Determine pathgroup state from PGID data.
224 */ 235 */
225static void pgid_analyze(struct ccw_device *cdev, struct pgid **p, 236static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
226 int *mismatch, int *reserved, int *reset) 237 int *mismatch, int *reserved, u8 *reset)
227{ 238{
228 struct pgid *pgid = &cdev->private->pgid[0]; 239 struct pgid *pgid = &cdev->private->pgid[0];
229 struct pgid *first = NULL; 240 struct pgid *first = NULL;
@@ -238,9 +249,8 @@ static void pgid_analyze(struct ccw_device *cdev, struct pgid **p,
238 continue; 249 continue;
239 if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE) 250 if (pgid->inf.ps.state2 == SNID_STATE2_RESVD_ELSE)
240 *reserved = 1; 251 *reserved = 1;
241 if (pgid->inf.ps.state1 == SNID_STATE1_RESET) { 252 if (pgid_is_reset(pgid)) {
242 /* A PGID was reset. */ 253 *reset |= lpm;
243 *reset = 1;
244 continue; 254 continue;
245 } 255 }
246 if (!first) { 256 if (!first) {
@@ -307,7 +317,7 @@ static void snid_done(struct ccw_device *cdev, int rc)
307 struct pgid *pgid; 317 struct pgid *pgid;
308 int mismatch = 0; 318 int mismatch = 0;
309 int reserved = 0; 319 int reserved = 0;
310 int reset = 0; 320 u8 reset = 0;
311 u8 donepm; 321 u8 donepm;
312 322
313 if (rc) 323 if (rc)
@@ -321,11 +331,12 @@ static void snid_done(struct ccw_device *cdev, int rc)
321 donepm = pgid_to_donepm(cdev); 331 donepm = pgid_to_donepm(cdev);
322 sch->vpm = donepm & sch->opm; 332 sch->vpm = donepm & sch->opm;
323 cdev->private->pgid_todo_mask &= ~donepm; 333 cdev->private->pgid_todo_mask &= ~donepm;
334 cdev->private->pgid_reset_mask |= reset;
324 pgid_fill(cdev, pgid); 335 pgid_fill(cdev, pgid);
325 } 336 }
326out: 337out:
327 CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x " 338 CIO_MSG_EVENT(2, "snid: device 0.%x.%04x: rc=%d pvm=%02x vpm=%02x "
328 "todo=%02x mism=%d rsvd=%d reset=%d\n", id->ssid, 339 "todo=%02x mism=%d rsvd=%d reset=%02x\n", id->ssid,
329 id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm, 340 id->devno, rc, cdev->private->pgid_valid_mask, sch->vpm,
330 cdev->private->pgid_todo_mask, mismatch, reserved, reset); 341 cdev->private->pgid_todo_mask, mismatch, reserved, reset);
331 switch (rc) { 342 switch (rc) {
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h
index 469ef93f2302..ba31ad88f4f7 100644
--- a/drivers/s390/cio/io_sch.h
+++ b/drivers/s390/cio/io_sch.h
@@ -5,68 +5,36 @@
5#include <asm/schid.h> 5#include <asm/schid.h>
6#include <asm/ccwdev.h> 6#include <asm/ccwdev.h>
7#include "css.h" 7#include "css.h"
8 8#include "orb.h"
9/*
10 * command-mode operation request block
11 */
12struct cmd_orb {
13 u32 intparm; /* interruption parameter */
14 u32 key : 4; /* flags, like key, suspend control, etc. */
15 u32 spnd : 1; /* suspend control */
16 u32 res1 : 1; /* reserved */
17 u32 mod : 1; /* modification control */
18 u32 sync : 1; /* synchronize control */
19 u32 fmt : 1; /* format control */
20 u32 pfch : 1; /* prefetch control */
21 u32 isic : 1; /* initial-status-interruption control */
22 u32 alcc : 1; /* address-limit-checking control */
23 u32 ssic : 1; /* suppress-suspended-interr. control */
24 u32 res2 : 1; /* reserved */
25 u32 c64 : 1; /* IDAW/QDIO 64 bit control */
26 u32 i2k : 1; /* IDAW 2/4kB block size control */
27 u32 lpm : 8; /* logical path mask */
28 u32 ils : 1; /* incorrect length */
29 u32 zero : 6; /* reserved zeros */
30 u32 orbx : 1; /* ORB extension control */
31 u32 cpa; /* channel program address */
32} __attribute__ ((packed, aligned(4)));
33
34/*
35 * transport-mode operation request block
36 */
37struct tm_orb {
38 u32 intparm;
39 u32 key:4;
40 u32 :9;
41 u32 b:1;
42 u32 :2;
43 u32 lpm:8;
44 u32 :7;
45 u32 x:1;
46 u32 tcw;
47 u32 prio:8;
48 u32 :8;
49 u32 rsvpgm:8;
50 u32 :8;
51 u32 :32;
52 u32 :32;
53 u32 :32;
54 u32 :32;
55} __attribute__ ((packed, aligned(4)));
56
57union orb {
58 struct cmd_orb cmd;
59 struct tm_orb tm;
60} __attribute__ ((packed, aligned(4)));
61 9
62struct io_subchannel_private { 10struct io_subchannel_private {
63 union orb orb; /* operation request block */ 11 union orb orb; /* operation request block */
64 struct ccw1 sense_ccw; /* static ccw for sense command */ 12 struct ccw1 sense_ccw; /* static ccw for sense command */
65} __attribute__ ((aligned(8))); 13 struct ccw_device *cdev;/* pointer to the child ccw device */
14 struct {
15 unsigned int suspend:1; /* allow suspend */
16 unsigned int prefetch:1;/* deny prefetch */
17 unsigned int inter:1; /* suppress intermediate interrupts */
18 } __packed options;
19} __aligned(8);
66 20
67#define to_io_private(n) ((struct io_subchannel_private *)n->private) 21#define to_io_private(n) ((struct io_subchannel_private *) \
68#define sch_get_cdev(n) (dev_get_drvdata(&n->dev)) 22 dev_get_drvdata(&(n)->dev))
69#define sch_set_cdev(n, c) (dev_set_drvdata(&n->dev, c)) 23#define set_io_private(n, p) (dev_set_drvdata(&(n)->dev, p))
24
25static inline struct ccw_device *sch_get_cdev(struct subchannel *sch)
26{
27 struct io_subchannel_private *priv = to_io_private(sch);
28 return priv ? priv->cdev : NULL;
29}
30
31static inline void sch_set_cdev(struct subchannel *sch,
32 struct ccw_device *cdev)
33{
34 struct io_subchannel_private *priv = to_io_private(sch);
35 if (priv)
36 priv->cdev = cdev;
37}
70 38
71#define MAX_CIWS 8 39#define MAX_CIWS 8
72 40
@@ -151,8 +119,11 @@ struct ccw_device_private {
151 struct subchannel_id schid; /* subchannel number */ 119 struct subchannel_id schid; /* subchannel number */
152 struct ccw_request req; /* internal I/O request */ 120 struct ccw_request req; /* internal I/O request */
153 int iretry; 121 int iretry;
154 u8 pgid_valid_mask; /* mask of valid PGIDs */ 122 u8 pgid_valid_mask; /* mask of valid PGIDs */
155 u8 pgid_todo_mask; /* mask of PGIDs to be adjusted */ 123 u8 pgid_todo_mask; /* mask of PGIDs to be adjusted */
124 u8 pgid_reset_mask; /* mask of PGIDs which were reset */
125 u8 path_gone_mask; /* mask of paths, that became unavailable */
126 u8 path_new_mask; /* mask of paths, that became available */
156 struct { 127 struct {
157 unsigned int fast:1; /* post with "channel end" */ 128 unsigned int fast:1; /* post with "channel end" */
158 unsigned int repall:1; /* report every interrupt status */ 129 unsigned int repall:1; /* report every interrupt status */
@@ -188,23 +159,6 @@ struct ccw_device_private {
188 void *cmb_wait; /* deferred cmb enable/disable */ 159 void *cmb_wait; /* deferred cmb enable/disable */
189}; 160};
190 161
191static inline int ssch(struct subchannel_id schid, union orb *addr)
192{
193 register struct subchannel_id reg1 asm("1") = schid;
194 int ccode = -EIO;
195
196 asm volatile(
197 " ssch 0(%2)\n"
198 "0: ipm %0\n"
199 " srl %0,28\n"
200 "1:\n"
201 EX_TABLE(0b, 1b)
202 : "+d" (ccode)
203 : "d" (reg1), "a" (addr), "m" (*addr)
204 : "cc", "memory");
205 return ccode;
206}
207
208static inline int rsch(struct subchannel_id schid) 162static inline int rsch(struct subchannel_id schid)
209{ 163{
210 register struct subchannel_id reg1 asm("1") = schid; 164 register struct subchannel_id reg1 asm("1") = schid;
@@ -220,21 +174,6 @@ static inline int rsch(struct subchannel_id schid)
220 return ccode; 174 return ccode;
221} 175}
222 176
223static inline int csch(struct subchannel_id schid)
224{
225 register struct subchannel_id reg1 asm("1") = schid;
226 int ccode;
227
228 asm volatile(
229 " csch\n"
230 " ipm %0\n"
231 " srl %0,28"
232 : "=d" (ccode)
233 : "d" (reg1)
234 : "cc");
235 return ccode;
236}
237
238static inline int hsch(struct subchannel_id schid) 177static inline int hsch(struct subchannel_id schid)
239{ 178{
240 register struct subchannel_id reg1 asm("1") = schid; 179 register struct subchannel_id reg1 asm("1") = schid;
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h
index fac06155773f..4d80fc67a06b 100644
--- a/drivers/s390/cio/ioasm.h
+++ b/drivers/s390/cio/ioasm.h
@@ -3,6 +3,8 @@
3 3
4#include <asm/chpid.h> 4#include <asm/chpid.h>
5#include <asm/schid.h> 5#include <asm/schid.h>
6#include "orb.h"
7#include "cio.h"
6 8
7/* 9/*
8 * TPI info structure 10 * TPI info structure
@@ -87,6 +89,38 @@ static inline int tsch(struct subchannel_id schid, struct irb *addr)
87 return ccode; 89 return ccode;
88} 90}
89 91
92static inline int ssch(struct subchannel_id schid, union orb *addr)
93{
94 register struct subchannel_id reg1 asm("1") = schid;
95 int ccode = -EIO;
96
97 asm volatile(
98 " ssch 0(%2)\n"
99 "0: ipm %0\n"
100 " srl %0,28\n"
101 "1:\n"
102 EX_TABLE(0b, 1b)
103 : "+d" (ccode)
104 : "d" (reg1), "a" (addr), "m" (*addr)
105 : "cc", "memory");
106 return ccode;
107}
108
109static inline int csch(struct subchannel_id schid)
110{
111 register struct subchannel_id reg1 asm("1") = schid;
112 int ccode;
113
114 asm volatile(
115 " csch\n"
116 " ipm %0\n"
117 " srl %0,28"
118 : "=d" (ccode)
119 : "d" (reg1)
120 : "cc");
121 return ccode;
122}
123
90static inline int tpi(struct tpi_info *addr) 124static inline int tpi(struct tpi_info *addr)
91{ 125{
92 int ccode; 126 int ccode;
diff --git a/drivers/s390/cio/itcw.c b/drivers/s390/cio/itcw.c
index a0ae29564774..358ee16d10a2 100644
--- a/drivers/s390/cio/itcw.c
+++ b/drivers/s390/cio/itcw.c
@@ -93,6 +93,7 @@ EXPORT_SYMBOL(itcw_get_tcw);
93size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws) 93size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws)
94{ 94{
95 size_t len; 95 size_t len;
96 int cross_count;
96 97
97 /* Main data. */ 98 /* Main data. */
98 len = sizeof(struct itcw); 99 len = sizeof(struct itcw);
@@ -105,12 +106,27 @@ size_t itcw_calc_size(int intrg, int max_tidaws, int intrg_max_tidaws)
105 /* TSB */ sizeof(struct tsb) + 106 /* TSB */ sizeof(struct tsb) +
106 /* TIDAL */ intrg_max_tidaws * sizeof(struct tidaw); 107 /* TIDAL */ intrg_max_tidaws * sizeof(struct tidaw);
107 } 108 }
109
108 /* Maximum required alignment padding. */ 110 /* Maximum required alignment padding. */
109 len += /* Initial TCW */ 63 + /* Interrogate TCCB */ 7; 111 len += /* Initial TCW */ 63 + /* Interrogate TCCB */ 7;
110 /* Maximum padding for structures that may not cross 4k boundary. */ 112
111 if ((max_tidaws > 0) || (intrg_max_tidaws > 0)) 113 /* TIDAW lists may not cross a 4k boundary. To cross a
112 len += max(max_tidaws, intrg_max_tidaws) * 114 * boundary we need to add a TTIC TIDAW. We need to reserve
113 sizeof(struct tidaw) - 1; 115 * one additional TIDAW for a TTIC that we may need to add due
116 * to the placement of the data chunk in memory, and a further
117 * TIDAW for each page boundary that the TIDAW list may cross
118 * due to it's own size.
119 */
120 if (max_tidaws) {
121 cross_count = 1 + ((max_tidaws * sizeof(struct tidaw) - 1)
122 >> PAGE_SHIFT);
123 len += cross_count * sizeof(struct tidaw);
124 }
125 if (intrg_max_tidaws) {
126 cross_count = 1 + ((intrg_max_tidaws * sizeof(struct tidaw) - 1)
127 >> PAGE_SHIFT);
128 len += cross_count * sizeof(struct tidaw);
129 }
114 return len; 130 return len;
115} 131}
116EXPORT_SYMBOL(itcw_calc_size); 132EXPORT_SYMBOL(itcw_calc_size);
@@ -165,6 +181,7 @@ struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
165 void *chunk; 181 void *chunk;
166 addr_t start; 182 addr_t start;
167 addr_t end; 183 addr_t end;
184 int cross_count;
168 185
169 /* Check for 2G limit. */ 186 /* Check for 2G limit. */
170 start = (addr_t) buffer; 187 start = (addr_t) buffer;
@@ -177,8 +194,17 @@ struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
177 if (IS_ERR(chunk)) 194 if (IS_ERR(chunk))
178 return chunk; 195 return chunk;
179 itcw = chunk; 196 itcw = chunk;
180 itcw->max_tidaws = max_tidaws; 197 /* allow for TTIC tidaws that may be needed to cross a page boundary */
181 itcw->intrg_max_tidaws = intrg_max_tidaws; 198 cross_count = 0;
199 if (max_tidaws)
200 cross_count = 1 + ((max_tidaws * sizeof(struct tidaw) - 1)
201 >> PAGE_SHIFT);
202 itcw->max_tidaws = max_tidaws + cross_count;
203 cross_count = 0;
204 if (intrg_max_tidaws)
205 cross_count = 1 + ((intrg_max_tidaws * sizeof(struct tidaw) - 1)
206 >> PAGE_SHIFT);
207 itcw->intrg_max_tidaws = intrg_max_tidaws + cross_count;
182 /* Main TCW. */ 208 /* Main TCW. */
183 chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0); 209 chunk = fit_chunk(&start, end, sizeof(struct tcw), 64, 0);
184 if (IS_ERR(chunk)) 210 if (IS_ERR(chunk))
@@ -198,7 +224,7 @@ struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
198 /* Data TIDAL. */ 224 /* Data TIDAL. */
199 if (max_tidaws > 0) { 225 if (max_tidaws > 0) {
200 chunk = fit_chunk(&start, end, sizeof(struct tidaw) * 226 chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
201 max_tidaws, 16, 1); 227 itcw->max_tidaws, 16, 0);
202 if (IS_ERR(chunk)) 228 if (IS_ERR(chunk))
203 return chunk; 229 return chunk;
204 tcw_set_data(itcw->tcw, chunk, 1); 230 tcw_set_data(itcw->tcw, chunk, 1);
@@ -206,7 +232,7 @@ struct itcw *itcw_init(void *buffer, size_t size, int op, int intrg,
206 /* Interrogate data TIDAL. */ 232 /* Interrogate data TIDAL. */
207 if (intrg && (intrg_max_tidaws > 0)) { 233 if (intrg && (intrg_max_tidaws > 0)) {
208 chunk = fit_chunk(&start, end, sizeof(struct tidaw) * 234 chunk = fit_chunk(&start, end, sizeof(struct tidaw) *
209 intrg_max_tidaws, 16, 1); 235 itcw->intrg_max_tidaws, 16, 0);
210 if (IS_ERR(chunk)) 236 if (IS_ERR(chunk))
211 return chunk; 237 return chunk;
212 tcw_set_data(itcw->intrg_tcw, chunk, 1); 238 tcw_set_data(itcw->intrg_tcw, chunk, 1);
@@ -283,13 +309,29 @@ EXPORT_SYMBOL(itcw_add_dcw);
283 * the new tidaw on success or -%ENOSPC if the new tidaw would exceed the 309 * the new tidaw on success or -%ENOSPC if the new tidaw would exceed the
284 * available space. 310 * available space.
285 * 311 *
286 * Note: the tidaw-list is assumed to be contiguous with no ttics. The 312 * Note: TTIC tidaws are automatically added when needed, so explicitly calling
287 * last-tidaw flag for the last tidaw in the list will be set by itcw_finalize. 313 * this interface with the TTIC flag is not supported. The last-tidaw flag
314 * for the last tidaw in the list will be set by itcw_finalize.
288 */ 315 */
289struct tidaw *itcw_add_tidaw(struct itcw *itcw, u8 flags, void *addr, u32 count) 316struct tidaw *itcw_add_tidaw(struct itcw *itcw, u8 flags, void *addr, u32 count)
290{ 317{
318 struct tidaw *following;
319
291 if (itcw->num_tidaws >= itcw->max_tidaws) 320 if (itcw->num_tidaws >= itcw->max_tidaws)
292 return ERR_PTR(-ENOSPC); 321 return ERR_PTR(-ENOSPC);
322 /*
323 * Is the tidaw, which follows the one we are about to fill, on the next
324 * page? Then we have to insert a TTIC tidaw first, that points to the
325 * tidaw on the new page.
326 */
327 following = ((struct tidaw *) tcw_get_data(itcw->tcw))
328 + itcw->num_tidaws + 1;
329 if (itcw->num_tidaws && !((unsigned long) following & ~PAGE_MASK)) {
330 tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++,
331 TIDAW_FLAGS_TTIC, following, 0);
332 if (itcw->num_tidaws >= itcw->max_tidaws)
333 return ERR_PTR(-ENOSPC);
334 }
293 return tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++, flags, addr, count); 335 return tcw_add_tidaw(itcw->tcw, itcw->num_tidaws++, flags, addr, count);
294} 336}
295EXPORT_SYMBOL(itcw_add_tidaw); 337EXPORT_SYMBOL(itcw_add_tidaw);
diff --git a/drivers/s390/cio/orb.h b/drivers/s390/cio/orb.h
new file mode 100644
index 000000000000..45a9865c2b36
--- /dev/null
+++ b/drivers/s390/cio/orb.h
@@ -0,0 +1,67 @@
1/*
2 * Orb related data structures.
3 *
4 * Copyright IBM Corp. 2007, 2011
5 *
6 * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com>
7 * Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
8 * Sebastian Ott <sebott@linux.vnet.ibm.com>
9 */
10
11#ifndef S390_ORB_H
12#define S390_ORB_H
13
14/*
15 * Command-mode operation request block
16 */
17struct cmd_orb {
18 u32 intparm; /* interruption parameter */
19 u32 key:4; /* flags, like key, suspend control, etc. */
20 u32 spnd:1; /* suspend control */
21 u32 res1:1; /* reserved */
22 u32 mod:1; /* modification control */
23 u32 sync:1; /* synchronize control */
24 u32 fmt:1; /* format control */
25 u32 pfch:1; /* prefetch control */
26 u32 isic:1; /* initial-status-interruption control */
27 u32 alcc:1; /* address-limit-checking control */
28 u32 ssic:1; /* suppress-suspended-interr. control */
29 u32 res2:1; /* reserved */
30 u32 c64:1; /* IDAW/QDIO 64 bit control */
31 u32 i2k:1; /* IDAW 2/4kB block size control */
32 u32 lpm:8; /* logical path mask */
33 u32 ils:1; /* incorrect length */
34 u32 zero:6; /* reserved zeros */
35 u32 orbx:1; /* ORB extension control */
36 u32 cpa; /* channel program address */
37} __packed __aligned(4);
38
39/*
40 * Transport-mode operation request block
41 */
42struct tm_orb {
43 u32 intparm;
44 u32 key:4;
45 u32:9;
46 u32 b:1;
47 u32:2;
48 u32 lpm:8;
49 u32:7;
50 u32 x:1;
51 u32 tcw;
52 u32 prio:8;
53 u32:8;
54 u32 rsvpgm:8;
55 u32:8;
56 u32:32;
57 u32:32;
58 u32:32;
59 u32:32;
60} __packed __aligned(4);
61
62union orb {
63 struct cmd_orb cmd;
64 struct tm_orb tm;
65} __packed __aligned(4);
66
67#endif /* S390_ORB_H */
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index f0037eefd44e..7bc643f3f5ab 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -91,6 +91,12 @@ enum qdio_irq_states {
91#define AC1_SC_QEBSM_AVAILABLE 0x02 /* available for subchannel */ 91#define AC1_SC_QEBSM_AVAILABLE 0x02 /* available for subchannel */
92#define AC1_SC_QEBSM_ENABLED 0x01 /* enabled for subchannel */ 92#define AC1_SC_QEBSM_ENABLED 0x01 /* enabled for subchannel */
93 93
94/* SIGA flags */
95#define QDIO_SIGA_WRITE 0x00
96#define QDIO_SIGA_READ 0x01
97#define QDIO_SIGA_SYNC 0x02
98#define QDIO_SIGA_QEBSM_FLAG 0x80
99
94#ifdef CONFIG_64BIT 100#ifdef CONFIG_64BIT
95static inline int do_sqbs(u64 token, unsigned char state, int queue, 101static inline int do_sqbs(u64 token, unsigned char state, int queue,
96 int *start, int *count) 102 int *start, int *count)
@@ -142,10 +148,9 @@ struct siga_flag {
142 u8 input:1; 148 u8 input:1;
143 u8 output:1; 149 u8 output:1;
144 u8 sync:1; 150 u8 sync:1;
145 u8 no_sync_ti:1; 151 u8 sync_after_ai:1;
146 u8 no_sync_out_ti:1; 152 u8 sync_out_after_pci:1;
147 u8 no_sync_out_pci:1; 153 u8:3;
148 u8:2;
149} __attribute__ ((packed)); 154} __attribute__ ((packed));
150 155
151struct chsc_ssqd_area { 156struct chsc_ssqd_area {
@@ -202,12 +207,14 @@ struct qdio_dev_perf_stat {
202 unsigned int inbound_queue_full; 207 unsigned int inbound_queue_full;
203 unsigned int outbound_call; 208 unsigned int outbound_call;
204 unsigned int outbound_handler; 209 unsigned int outbound_handler;
210 unsigned int outbound_queue_full;
205 unsigned int fast_requeue; 211 unsigned int fast_requeue;
206 unsigned int target_full; 212 unsigned int target_full;
207 unsigned int eqbs; 213 unsigned int eqbs;
208 unsigned int eqbs_partial; 214 unsigned int eqbs_partial;
209 unsigned int sqbs; 215 unsigned int sqbs;
210 unsigned int sqbs_partial; 216 unsigned int sqbs_partial;
217 unsigned int int_discarded;
211} ____cacheline_aligned; 218} ____cacheline_aligned;
212 219
213struct qdio_queue_perf_stat { 220struct qdio_queue_perf_stat {
@@ -222,6 +229,10 @@ struct qdio_queue_perf_stat {
222 unsigned int nr_sbal_total; 229 unsigned int nr_sbal_total;
223}; 230};
224 231
232enum qdio_queue_irq_states {
233 QDIO_QUEUE_IRQS_DISABLED,
234};
235
225struct qdio_input_q { 236struct qdio_input_q {
226 /* input buffer acknowledgement flag */ 237 /* input buffer acknowledgement flag */
227 int polling; 238 int polling;
@@ -231,15 +242,19 @@ struct qdio_input_q {
231 int ack_count; 242 int ack_count;
232 /* last time of noticing incoming data */ 243 /* last time of noticing incoming data */
233 u64 timestamp; 244 u64 timestamp;
245 /* upper-layer polling flag */
246 unsigned long queue_irq_state;
247 /* callback to start upper-layer polling */
248 void (*queue_start_poll) (struct ccw_device *, int, unsigned long);
234}; 249};
235 250
236struct qdio_output_q { 251struct qdio_output_q {
237 /* PCIs are enabled for the queue */ 252 /* PCIs are enabled for the queue */
238 int pci_out_enabled; 253 int pci_out_enabled;
239 /* IQDIO: output multiple buffers (enhanced SIGA) */
240 int use_enh_siga;
241 /* timer to check for more outbound work */ 254 /* timer to check for more outbound work */
242 struct timer_list timer; 255 struct timer_list timer;
256 /* used SBALs before tasklet schedule */
257 int scan_threshold;
243}; 258};
244 259
245/* 260/*
@@ -374,12 +389,13 @@ static inline int multicast_outbound(struct qdio_q *q)
374 (q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) 389 (q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
375#define is_qebsm(q) (q->irq_ptr->sch_token != 0) 390#define is_qebsm(q) (q->irq_ptr->sch_token != 0)
376 391
377#define need_siga_sync_thinint(q) (!q->irq_ptr->siga_flag.no_sync_ti)
378#define need_siga_sync_out_thinint(q) (!q->irq_ptr->siga_flag.no_sync_out_ti)
379#define need_siga_in(q) (q->irq_ptr->siga_flag.input) 392#define need_siga_in(q) (q->irq_ptr->siga_flag.input)
380#define need_siga_out(q) (q->irq_ptr->siga_flag.output) 393#define need_siga_out(q) (q->irq_ptr->siga_flag.output)
381#define need_siga_sync(q) (q->irq_ptr->siga_flag.sync) 394#define need_siga_sync(q) (unlikely(q->irq_ptr->siga_flag.sync))
382#define siga_syncs_out_pci(q) (q->irq_ptr->siga_flag.no_sync_out_pci) 395#define need_siga_sync_after_ai(q) \
396 (unlikely(q->irq_ptr->siga_flag.sync_after_ai))
397#define need_siga_sync_out_after_pci(q) \
398 (unlikely(q->irq_ptr->siga_flag.sync_out_after_pci))
383 399
384#define for_each_input_queue(irq_ptr, q, i) \ 400#define for_each_input_queue(irq_ptr, q, i) \
385 for (i = 0, q = irq_ptr->input_qs[0]; \ 401 for (i = 0, q = irq_ptr->input_qs[0]; \
@@ -399,6 +415,26 @@ static inline int multicast_outbound(struct qdio_q *q)
399#define sub_buf(bufnr, dec) \ 415#define sub_buf(bufnr, dec) \
400 ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK) 416 ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK)
401 417
418#define queue_irqs_enabled(q) \
419 (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) == 0)
420#define queue_irqs_disabled(q) \
421 (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) != 0)
422
423#define TIQDIO_SHARED_IND 63
424
425/* device state change indicators */
426struct indicator_t {
427 u32 ind; /* u32 because of compare-and-swap performance */
428 atomic_t count; /* use count, 0 or 1 for non-shared indicators */
429};
430
431extern struct indicator_t *q_indicators;
432
433static inline int shared_ind(u32 *dsci)
434{
435 return dsci == &q_indicators[TIQDIO_SHARED_IND].ind;
436}
437
402/* prototypes for thin interrupt */ 438/* prototypes for thin interrupt */
403void qdio_setup_thinint(struct qdio_irq *irq_ptr); 439void qdio_setup_thinint(struct qdio_irq *irq_ptr);
404int qdio_establish_thinint(struct qdio_irq *irq_ptr); 440int qdio_establish_thinint(struct qdio_irq *irq_ptr);
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 6ce83f56d537..f8b03a636e49 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -56,9 +56,16 @@ static int qstat_show(struct seq_file *m, void *v)
56 56
57 seq_printf(m, "DSCI: %d nr_used: %d\n", 57 seq_printf(m, "DSCI: %d nr_used: %d\n",
58 *(u32 *)q->irq_ptr->dsci, atomic_read(&q->nr_buf_used)); 58 *(u32 *)q->irq_ptr->dsci, atomic_read(&q->nr_buf_used));
59 seq_printf(m, "ftc: %d last_move: %d\n", q->first_to_check, q->last_move); 59 seq_printf(m, "ftc: %d last_move: %d\n",
60 seq_printf(m, "polling: %d ack start: %d ack count: %d\n", 60 q->first_to_check, q->last_move);
61 q->u.in.polling, q->u.in.ack_start, q->u.in.ack_count); 61 if (q->is_input_q) {
62 seq_printf(m, "polling: %d ack start: %d ack count: %d\n",
63 q->u.in.polling, q->u.in.ack_start,
64 q->u.in.ack_count);
65 seq_printf(m, "IRQs disabled: %u\n",
66 test_bit(QDIO_QUEUE_IRQS_DISABLED,
67 &q->u.in.queue_irq_state));
68 }
62 seq_printf(m, "SBAL states:\n"); 69 seq_printf(m, "SBAL states:\n");
63 seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); 70 seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n");
64 71
@@ -113,22 +120,6 @@ static int qstat_show(struct seq_file *m, void *v)
113 return 0; 120 return 0;
114} 121}
115 122
116static ssize_t qstat_seq_write(struct file *file, const char __user *buf,
117 size_t count, loff_t *off)
118{
119 struct seq_file *seq = file->private_data;
120 struct qdio_q *q = seq->private;
121
122 if (!q)
123 return 0;
124 if (q->is_input_q)
125 xchg(q->irq_ptr->dsci, 1);
126 local_bh_disable();
127 tasklet_schedule(&q->tasklet);
128 local_bh_enable();
129 return count;
130}
131
132static int qstat_seq_open(struct inode *inode, struct file *filp) 123static int qstat_seq_open(struct inode *inode, struct file *filp)
133{ 124{
134 return single_open(filp, qstat_show, 125 return single_open(filp, qstat_show,
@@ -139,7 +130,6 @@ static const struct file_operations debugfs_fops = {
139 .owner = THIS_MODULE, 130 .owner = THIS_MODULE,
140 .open = qstat_seq_open, 131 .open = qstat_seq_open,
141 .read = seq_read, 132 .read = seq_read,
142 .write = qstat_seq_write,
143 .llseek = seq_lseek, 133 .llseek = seq_lseek,
144 .release = single_release, 134 .release = single_release,
145}; 135};
@@ -161,12 +151,14 @@ static char *qperf_names[] = {
161 "Inbound queue full", 151 "Inbound queue full",
162 "Outbound calls", 152 "Outbound calls",
163 "Outbound handler", 153 "Outbound handler",
154 "Outbound queue full",
164 "Outbound fast_requeue", 155 "Outbound fast_requeue",
165 "Outbound target_full", 156 "Outbound target_full",
166 "QEBSM eqbs", 157 "QEBSM eqbs",
167 "QEBSM eqbs partial", 158 "QEBSM eqbs partial",
168 "QEBSM sqbs", 159 "QEBSM sqbs",
169 "QEBSM sqbs partial" 160 "QEBSM sqbs partial",
161 "Discarded interrupts"
170}; 162};
171 163
172static int qperf_show(struct seq_file *m, void *v) 164static int qperf_show(struct seq_file *m, void *v)
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 00520f9a7a8e..570d4da10696 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -14,6 +14,7 @@
14#include <linux/timer.h> 14#include <linux/timer.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/gfp.h> 16#include <linux/gfp.h>
17#include <linux/kernel_stat.h>
17#include <asm/atomic.h> 18#include <asm/atomic.h>
18#include <asm/debug.h> 19#include <asm/debug.h>
19#include <asm/qdio.h> 20#include <asm/qdio.h>
@@ -29,11 +30,12 @@ MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
29MODULE_DESCRIPTION("QDIO base support"); 30MODULE_DESCRIPTION("QDIO base support");
30MODULE_LICENSE("GPL"); 31MODULE_LICENSE("GPL");
31 32
32static inline int do_siga_sync(struct subchannel_id schid, 33static inline int do_siga_sync(unsigned long schid,
33 unsigned int out_mask, unsigned int in_mask) 34 unsigned int out_mask, unsigned int in_mask,
35 unsigned int fc)
34{ 36{
35 register unsigned long __fc asm ("0") = 2; 37 register unsigned long __fc asm ("0") = fc;
36 register struct subchannel_id __schid asm ("1") = schid; 38 register unsigned long __schid asm ("1") = schid;
37 register unsigned long out asm ("2") = out_mask; 39 register unsigned long out asm ("2") = out_mask;
38 register unsigned long in asm ("3") = in_mask; 40 register unsigned long in asm ("3") = in_mask;
39 int cc; 41 int cc;
@@ -47,10 +49,11 @@ static inline int do_siga_sync(struct subchannel_id schid,
47 return cc; 49 return cc;
48} 50}
49 51
50static inline int do_siga_input(struct subchannel_id schid, unsigned int mask) 52static inline int do_siga_input(unsigned long schid, unsigned int mask,
53 unsigned int fc)
51{ 54{
52 register unsigned long __fc asm ("0") = 1; 55 register unsigned long __fc asm ("0") = fc;
53 register struct subchannel_id __schid asm ("1") = schid; 56 register unsigned long __schid asm ("1") = schid;
54 register unsigned long __mask asm ("2") = mask; 57 register unsigned long __mask asm ("2") = mask;
55 int cc; 58 int cc;
56 59
@@ -279,16 +282,20 @@ void qdio_init_buf_states(struct qdio_irq *irq_ptr)
279static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output, 282static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
280 unsigned int input) 283 unsigned int input)
281{ 284{
285 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
286 unsigned int fc = QDIO_SIGA_SYNC;
282 int cc; 287 int cc;
283 288
284 if (!need_siga_sync(q))
285 return 0;
286
287 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr); 289 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
288 qperf_inc(q, siga_sync); 290 qperf_inc(q, siga_sync);
289 291
290 cc = do_siga_sync(q->irq_ptr->schid, output, input); 292 if (is_qebsm(q)) {
291 if (cc) 293 schid = q->irq_ptr->sch_token;
294 fc |= QDIO_SIGA_QEBSM_FLAG;
295 }
296
297 cc = do_siga_sync(schid, output, input, fc);
298 if (unlikely(cc))
292 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc); 299 DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc);
293 return cc; 300 return cc;
294} 301}
@@ -301,38 +308,22 @@ static inline int qdio_siga_sync_q(struct qdio_q *q)
301 return qdio_siga_sync(q, q->mask, 0); 308 return qdio_siga_sync(q, q->mask, 0);
302} 309}
303 310
304static inline int qdio_siga_sync_out(struct qdio_q *q)
305{
306 return qdio_siga_sync(q, ~0U, 0);
307}
308
309static inline int qdio_siga_sync_all(struct qdio_q *q)
310{
311 return qdio_siga_sync(q, ~0U, ~0U);
312}
313
314static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit) 311static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit)
315{ 312{
316 unsigned long schid; 313 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
317 unsigned int fc = 0; 314 unsigned int fc = QDIO_SIGA_WRITE;
318 u64 start_time = 0; 315 u64 start_time = 0;
319 int cc; 316 int cc;
320 317
321 if (q->u.out.use_enh_siga)
322 fc = 3;
323
324 if (is_qebsm(q)) { 318 if (is_qebsm(q)) {
325 schid = q->irq_ptr->sch_token; 319 schid = q->irq_ptr->sch_token;
326 fc |= 0x80; 320 fc |= QDIO_SIGA_QEBSM_FLAG;
327 } 321 }
328 else
329 schid = *((u32 *)&q->irq_ptr->schid);
330
331again: 322again:
332 cc = do_siga_output(schid, q->mask, busy_bit, fc); 323 cc = do_siga_output(schid, q->mask, busy_bit, fc);
333 324
334 /* hipersocket busy condition */ 325 /* hipersocket busy condition */
335 if (*busy_bit) { 326 if (unlikely(*busy_bit)) {
336 WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2); 327 WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
337 328
338 if (!start_time) { 329 if (!start_time) {
@@ -347,32 +338,41 @@ again:
347 338
348static inline int qdio_siga_input(struct qdio_q *q) 339static inline int qdio_siga_input(struct qdio_q *q)
349{ 340{
341 unsigned long schid = *((u32 *) &q->irq_ptr->schid);
342 unsigned int fc = QDIO_SIGA_READ;
350 int cc; 343 int cc;
351 344
352 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr); 345 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
353 qperf_inc(q, siga_read); 346 qperf_inc(q, siga_read);
354 347
355 cc = do_siga_input(q->irq_ptr->schid, q->mask); 348 if (is_qebsm(q)) {
356 if (cc) 349 schid = q->irq_ptr->sch_token;
350 fc |= QDIO_SIGA_QEBSM_FLAG;
351 }
352
353 cc = do_siga_input(schid, q->mask, fc);
354 if (unlikely(cc))
357 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc); 355 DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc);
358 return cc; 356 return cc;
359} 357}
360 358
361static inline void qdio_sync_after_thinint(struct qdio_q *q) 359#define qdio_siga_sync_out(q) qdio_siga_sync(q, ~0U, 0)
360#define qdio_siga_sync_all(q) qdio_siga_sync(q, ~0U, ~0U)
361
362static inline void qdio_sync_queues(struct qdio_q *q)
362{ 363{
363 if (pci_out_supported(q)) { 364 /* PCI capable outbound queues will also be scanned so sync them too */
364 if (need_siga_sync_thinint(q)) 365 if (pci_out_supported(q))
365 qdio_siga_sync_all(q); 366 qdio_siga_sync_all(q);
366 else if (need_siga_sync_out_thinint(q)) 367 else
367 qdio_siga_sync_out(q);
368 } else
369 qdio_siga_sync_q(q); 368 qdio_siga_sync_q(q);
370} 369}
371 370
372int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr, 371int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
373 unsigned char *state) 372 unsigned char *state)
374{ 373{
375 qdio_siga_sync_q(q); 374 if (need_siga_sync(q))
375 qdio_siga_sync_q(q);
376 return get_buf_states(q, bufnr, state, 1, 0); 376 return get_buf_states(q, bufnr, state, 1, 0);
377} 377}
378 378
@@ -407,13 +407,16 @@ static inline void account_sbals(struct qdio_q *q, int count)
407 q->q_stats.nr_sbals[pos]++; 407 q->q_stats.nr_sbals[pos]++;
408} 408}
409 409
410static void announce_buffer_error(struct qdio_q *q, int count) 410static void process_buffer_error(struct qdio_q *q, int count)
411{ 411{
412 unsigned char state = (q->is_input_q) ? SLSB_P_INPUT_NOT_INIT :
413 SLSB_P_OUTPUT_NOT_INIT;
414
412 q->qdio_error |= QDIO_ERROR_SLSB_STATE; 415 q->qdio_error |= QDIO_ERROR_SLSB_STATE;
413 416
414 /* special handling for no target buffer empty */ 417 /* special handling for no target buffer empty */
415 if ((!q->is_input_q && 418 if ((!q->is_input_q &&
416 (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) { 419 (q->sbal[q->first_to_check]->element[15].sflags) == 0x10)) {
417 qperf_inc(q, target_full); 420 qperf_inc(q, target_full);
418 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", 421 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
419 q->first_to_check); 422 q->first_to_check);
@@ -424,8 +427,14 @@ static void announce_buffer_error(struct qdio_q *q, int count)
424 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr); 427 DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr);
425 DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count); 428 DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count);
426 DBF_ERROR("F14:%2x F15:%2x", 429 DBF_ERROR("F14:%2x F15:%2x",
427 q->sbal[q->first_to_check]->element[14].flags & 0xff, 430 q->sbal[q->first_to_check]->element[14].sflags,
428 q->sbal[q->first_to_check]->element[15].flags & 0xff); 431 q->sbal[q->first_to_check]->element[15].sflags);
432
433 /*
434 * Interrupts may be avoided as long as the error is present
435 * so change the buffer state immediately to avoid starvation.
436 */
437 set_buf_states(q, q->first_to_check, state, count);
429} 438}
430 439
431static inline void inbound_primed(struct qdio_q *q, int count) 440static inline void inbound_primed(struct qdio_q *q, int count)
@@ -476,7 +485,7 @@ static inline void inbound_primed(struct qdio_q *q, int count)
476static int get_inbound_buffer_frontier(struct qdio_q *q) 485static int get_inbound_buffer_frontier(struct qdio_q *q)
477{ 486{
478 int count, stop; 487 int count, stop;
479 unsigned char state; 488 unsigned char state = 0;
480 489
481 /* 490 /*
482 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved 491 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
@@ -506,8 +515,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
506 account_sbals(q, count); 515 account_sbals(q, count);
507 break; 516 break;
508 case SLSB_P_INPUT_ERROR: 517 case SLSB_P_INPUT_ERROR:
509 announce_buffer_error(q, count); 518 process_buffer_error(q, count);
510 /* process the buffer, the upper layer will take care of it */
511 q->first_to_check = add_buf(q->first_to_check, count); 519 q->first_to_check = add_buf(q->first_to_check, count);
512 atomic_sub(count, &q->nr_buf_used); 520 atomic_sub(count, &q->nr_buf_used);
513 if (q->irq_ptr->perf_stat_enabled) 521 if (q->irq_ptr->perf_stat_enabled)
@@ -549,7 +557,8 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
549 if (!atomic_read(&q->nr_buf_used)) 557 if (!atomic_read(&q->nr_buf_used))
550 return 1; 558 return 1;
551 559
552 qdio_siga_sync_q(q); 560 if (need_siga_sync(q))
561 qdio_siga_sync_q(q);
553 get_buf_state(q, q->first_to_check, &state, 0); 562 get_buf_state(q, q->first_to_check, &state, 0);
554 563
555 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR) 564 if (state == SLSB_P_INPUT_PRIMED || state == SLSB_P_INPUT_ERROR)
@@ -642,11 +651,14 @@ void qdio_inbound_processing(unsigned long data)
642static int get_outbound_buffer_frontier(struct qdio_q *q) 651static int get_outbound_buffer_frontier(struct qdio_q *q)
643{ 652{
644 int count, stop; 653 int count, stop;
645 unsigned char state; 654 unsigned char state = 0;
646 655
647 if (((queue_type(q) != QDIO_IQDIO_QFMT) && !pci_out_supported(q)) || 656 if (need_siga_sync(q))
648 (queue_type(q) == QDIO_IQDIO_QFMT && multicast_outbound(q))) 657 if (((queue_type(q) != QDIO_IQDIO_QFMT) &&
649 qdio_siga_sync_q(q); 658 !pci_out_supported(q)) ||
659 (queue_type(q) == QDIO_IQDIO_QFMT &&
660 multicast_outbound(q)))
661 qdio_siga_sync_q(q);
650 662
651 /* 663 /*
652 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved 664 * Don't check 128 buffers, as otherwise qdio_inbound_q_moved
@@ -673,8 +685,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
673 account_sbals(q, count); 685 account_sbals(q, count);
674 break; 686 break;
675 case SLSB_P_OUTPUT_ERROR: 687 case SLSB_P_OUTPUT_ERROR:
676 announce_buffer_error(q, count); 688 process_buffer_error(q, count);
677 /* process the buffer, the upper layer will take care of it */
678 q->first_to_check = add_buf(q->first_to_check, count); 689 q->first_to_check = add_buf(q->first_to_check, count);
679 atomic_sub(count, &q->nr_buf_used); 690 atomic_sub(count, &q->nr_buf_used);
680 if (q->irq_ptr->perf_stat_enabled) 691 if (q->irq_ptr->perf_stat_enabled)
@@ -818,7 +829,8 @@ static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
818static void __tiqdio_inbound_processing(struct qdio_q *q) 829static void __tiqdio_inbound_processing(struct qdio_q *q)
819{ 830{
820 qperf_inc(q, tasklet_inbound); 831 qperf_inc(q, tasklet_inbound);
821 qdio_sync_after_thinint(q); 832 if (need_siga_sync(q) && need_siga_sync_after_ai(q))
833 qdio_sync_queues(q);
822 834
823 /* 835 /*
824 * The interrupt could be caused by a PCI request. Check the 836 * The interrupt could be caused by a PCI request. Check the
@@ -884,19 +896,28 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
884 if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) 896 if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
885 return; 897 return;
886 898
887 for_each_input_queue(irq_ptr, q, i) 899 for_each_input_queue(irq_ptr, q, i) {
888 tasklet_schedule(&q->tasklet); 900 if (q->u.in.queue_start_poll) {
901 /* skip if polling is enabled or already in work */
902 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
903 &q->u.in.queue_irq_state)) {
904 qperf_inc(q, int_discarded);
905 continue;
906 }
907 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
908 q->irq_ptr->int_parm);
909 } else
910 tasklet_schedule(&q->tasklet);
911 }
889 912
890 if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)) 913 if (!pci_out_supported(q))
891 return; 914 return;
892 915
893 for_each_output_queue(irq_ptr, q, i) { 916 for_each_output_queue(irq_ptr, q, i) {
894 if (qdio_outbound_q_done(q)) 917 if (qdio_outbound_q_done(q))
895 continue; 918 continue;
896 919 if (need_siga_sync(q) && need_siga_sync_out_after_pci(q))
897 if (!siga_syncs_out_pci(q))
898 qdio_siga_sync_q(q); 920 qdio_siga_sync_q(q);
899
900 tasklet_schedule(&q->tasklet); 921 tasklet_schedule(&q->tasklet);
901 } 922 }
902} 923}
@@ -959,6 +980,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
959 return; 980 return;
960 } 981 }
961 982
983 kstat_cpu(smp_processor_id()).irqs[IOINT_QDI]++;
962 if (irq_ptr->perf_stat_enabled) 984 if (irq_ptr->perf_stat_enabled)
963 irq_ptr->perf_stat.qdio_int++; 985 irq_ptr->perf_stat.qdio_int++;
964 986
@@ -1262,7 +1284,6 @@ int qdio_establish(struct qdio_initialize *init_data)
1262 } 1284 }
1263 1285
1264 qdio_setup_ssqd_info(irq_ptr); 1286 qdio_setup_ssqd_info(irq_ptr);
1265 DBF_EVENT("qDmmwc:%2x", irq_ptr->ssqd_desc.mmwc);
1266 DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac); 1287 DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac);
1267 1288
1268 /* qebsm is now setup if available, initialize buffer states */ 1289 /* qebsm is now setup if available, initialize buffer states */
@@ -1425,7 +1446,7 @@ set:
1425static int handle_outbound(struct qdio_q *q, unsigned int callflags, 1446static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1426 int bufnr, int count) 1447 int bufnr, int count)
1427{ 1448{
1428 unsigned char state; 1449 unsigned char state = 0;
1429 int used, rc = 0; 1450 int used, rc = 0;
1430 1451
1431 qperf_inc(q, outbound_call); 1452 qperf_inc(q, outbound_call);
@@ -1434,52 +1455,38 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1434 used = atomic_add_return(count, &q->nr_buf_used); 1455 used = atomic_add_return(count, &q->nr_buf_used);
1435 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q); 1456 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
1436 1457
1458 if (used == QDIO_MAX_BUFFERS_PER_Q)
1459 qperf_inc(q, outbound_queue_full);
1460
1437 if (callflags & QDIO_FLAG_PCI_OUT) { 1461 if (callflags & QDIO_FLAG_PCI_OUT) {
1438 q->u.out.pci_out_enabled = 1; 1462 q->u.out.pci_out_enabled = 1;
1439 qperf_inc(q, pci_request_int); 1463 qperf_inc(q, pci_request_int);
1440 } 1464 } else
1441 else
1442 q->u.out.pci_out_enabled = 0; 1465 q->u.out.pci_out_enabled = 0;
1443 1466
1444 if (queue_type(q) == QDIO_IQDIO_QFMT) { 1467 if (queue_type(q) == QDIO_IQDIO_QFMT) {
1445 if (multicast_outbound(q)) 1468 /* One SIGA-W per buffer required for unicast HiperSockets. */
1469 WARN_ON_ONCE(count > 1 && !multicast_outbound(q));
1470
1471 rc = qdio_kick_outbound_q(q);
1472 } else if (need_siga_sync(q)) {
1473 rc = qdio_siga_sync_q(q);
1474 } else {
1475 /* try to fast requeue buffers */
1476 get_buf_state(q, prev_buf(bufnr), &state, 0);
1477 if (state != SLSB_CU_OUTPUT_PRIMED)
1446 rc = qdio_kick_outbound_q(q); 1478 rc = qdio_kick_outbound_q(q);
1447 else 1479 else
1448 if ((q->irq_ptr->ssqd_desc.mmwc > 1) && 1480 qperf_inc(q, fast_requeue);
1449 (count > 1) &&
1450 (count <= q->irq_ptr->ssqd_desc.mmwc)) {
1451 /* exploit enhanced SIGA */
1452 q->u.out.use_enh_siga = 1;
1453 rc = qdio_kick_outbound_q(q);
1454 } else {
1455 /*
1456 * One siga-w per buffer required for unicast
1457 * HiperSockets.
1458 */
1459 q->u.out.use_enh_siga = 0;
1460 while (count--) {
1461 rc = qdio_kick_outbound_q(q);
1462 if (rc)
1463 goto out;
1464 }
1465 }
1466 goto out;
1467 } 1481 }
1468 1482
1469 if (need_siga_sync(q)) { 1483 /* in case of SIGA errors we must process the error immediately */
1470 qdio_siga_sync_q(q); 1484 if (used >= q->u.out.scan_threshold || rc)
1471 goto out; 1485 tasklet_schedule(&q->tasklet);
1472 }
1473
1474 /* try to fast requeue buffers */
1475 get_buf_state(q, prev_buf(bufnr), &state, 0);
1476 if (state != SLSB_CU_OUTPUT_PRIMED)
1477 rc = qdio_kick_outbound_q(q);
1478 else 1486 else
1479 qperf_inc(q, fast_requeue); 1487 /* free the SBALs in case of no further traffic */
1480 1488 if (!timer_pending(&q->u.out.timer))
1481out: 1489 mod_timer(&q->u.out.timer, jiffies + HZ);
1482 tasklet_schedule(&q->tasklet);
1483 return rc; 1490 return rc;
1484} 1491}
1485 1492
@@ -1508,7 +1515,8 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1508 1515
1509 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE) 1516 if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)
1510 return -EBUSY; 1517 return -EBUSY;
1511 1518 if (!count)
1519 return 0;
1512 if (callflags & QDIO_FLAG_SYNC_INPUT) 1520 if (callflags & QDIO_FLAG_SYNC_INPUT)
1513 return handle_inbound(irq_ptr->input_qs[q_nr], 1521 return handle_inbound(irq_ptr->input_qs[q_nr],
1514 callflags, bufnr, count); 1522 callflags, bufnr, count);
@@ -1519,30 +1527,155 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags,
1519} 1527}
1520EXPORT_SYMBOL_GPL(do_QDIO); 1528EXPORT_SYMBOL_GPL(do_QDIO);
1521 1529
1530/**
1531 * qdio_start_irq - process input buffers
1532 * @cdev: associated ccw_device for the qdio subchannel
1533 * @nr: input queue number
1534 *
1535 * Return codes
1536 * 0 - success
1537 * 1 - irqs not started since new data is available
1538 */
1539int qdio_start_irq(struct ccw_device *cdev, int nr)
1540{
1541 struct qdio_q *q;
1542 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1543
1544 if (!irq_ptr)
1545 return -ENODEV;
1546 q = irq_ptr->input_qs[nr];
1547
1548 WARN_ON(queue_irqs_enabled(q));
1549
1550 if (!shared_ind(q->irq_ptr->dsci))
1551 xchg(q->irq_ptr->dsci, 0);
1552
1553 qdio_stop_polling(q);
1554 clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state);
1555
1556 /*
1557 * We need to check again to not lose initiative after
1558 * resetting the ACK state.
1559 */
1560 if (!shared_ind(q->irq_ptr->dsci) && *q->irq_ptr->dsci)
1561 goto rescan;
1562 if (!qdio_inbound_q_done(q))
1563 goto rescan;
1564 return 0;
1565
1566rescan:
1567 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1568 &q->u.in.queue_irq_state))
1569 return 0;
1570 else
1571 return 1;
1572
1573}
1574EXPORT_SYMBOL(qdio_start_irq);
1575
1576/**
1577 * qdio_get_next_buffers - process input buffers
1578 * @cdev: associated ccw_device for the qdio subchannel
1579 * @nr: input queue number
1580 * @bufnr: first filled buffer number
1581 * @error: buffers are in error state
1582 *
1583 * Return codes
1584 * < 0 - error
1585 * = 0 - no new buffers found
1586 * > 0 - number of processed buffers
1587 */
1588int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr,
1589 int *error)
1590{
1591 struct qdio_q *q;
1592 int start, end;
1593 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1594
1595 if (!irq_ptr)
1596 return -ENODEV;
1597 q = irq_ptr->input_qs[nr];
1598 WARN_ON(queue_irqs_enabled(q));
1599
1600 /*
1601 * Cannot rely on automatic sync after interrupt since queues may
1602 * also be examined without interrupt.
1603 */
1604 if (need_siga_sync(q))
1605 qdio_sync_queues(q);
1606
1607 /* check the PCI capable outbound queues. */
1608 qdio_check_outbound_after_thinint(q);
1609
1610 if (!qdio_inbound_q_moved(q))
1611 return 0;
1612
1613 /* Note: upper-layer MUST stop processing immediately here ... */
1614 if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE))
1615 return -EIO;
1616
1617 start = q->first_to_kick;
1618 end = q->first_to_check;
1619 *bufnr = start;
1620 *error = q->qdio_error;
1621
1622 /* for the next time */
1623 q->first_to_kick = end;
1624 q->qdio_error = 0;
1625 return sub_buf(end, start);
1626}
1627EXPORT_SYMBOL(qdio_get_next_buffers);
1628
1629/**
1630 * qdio_stop_irq - disable interrupt processing for the device
1631 * @cdev: associated ccw_device for the qdio subchannel
1632 * @nr: input queue number
1633 *
1634 * Return codes
1635 * 0 - interrupts were already disabled
1636 * 1 - interrupts successfully disabled
1637 */
1638int qdio_stop_irq(struct ccw_device *cdev, int nr)
1639{
1640 struct qdio_q *q;
1641 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1642
1643 if (!irq_ptr)
1644 return -ENODEV;
1645 q = irq_ptr->input_qs[nr];
1646
1647 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
1648 &q->u.in.queue_irq_state))
1649 return 0;
1650 else
1651 return 1;
1652}
1653EXPORT_SYMBOL(qdio_stop_irq);
1654
1522static int __init init_QDIO(void) 1655static int __init init_QDIO(void)
1523{ 1656{
1524 int rc; 1657 int rc;
1525 1658
1526 rc = qdio_setup_init(); 1659 rc = qdio_debug_init();
1527 if (rc) 1660 if (rc)
1528 return rc; 1661 return rc;
1662 rc = qdio_setup_init();
1663 if (rc)
1664 goto out_debug;
1529 rc = tiqdio_allocate_memory(); 1665 rc = tiqdio_allocate_memory();
1530 if (rc) 1666 if (rc)
1531 goto out_cache; 1667 goto out_cache;
1532 rc = qdio_debug_init();
1533 if (rc)
1534 goto out_ti;
1535 rc = tiqdio_register_thinints(); 1668 rc = tiqdio_register_thinints();
1536 if (rc) 1669 if (rc)
1537 goto out_debug; 1670 goto out_ti;
1538 return 0; 1671 return 0;
1539 1672
1540out_debug:
1541 qdio_debug_exit();
1542out_ti: 1673out_ti:
1543 tiqdio_free_memory(); 1674 tiqdio_free_memory();
1544out_cache: 1675out_cache:
1545 qdio_setup_exit(); 1676 qdio_setup_exit();
1677out_debug:
1678 qdio_debug_exit();
1546 return rc; 1679 return rc;
1547} 1680}
1548 1681
@@ -1550,8 +1683,8 @@ static void __exit exit_QDIO(void)
1550{ 1683{
1551 tiqdio_unregister_thinints(); 1684 tiqdio_unregister_thinints();
1552 tiqdio_free_memory(); 1685 tiqdio_free_memory();
1553 qdio_debug_exit();
1554 qdio_setup_exit(); 1686 qdio_setup_exit();
1687 qdio_debug_exit();
1555} 1688}
1556 1689
1557module_init(init_QDIO); 1690module_init(init_QDIO);
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 34c7e4046df4..89107d0938c4 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -161,6 +161,7 @@ static void setup_queues(struct qdio_irq *irq_ptr,
161 setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i); 161 setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i);
162 162
163 q->is_input_q = 1; 163 q->is_input_q = 1;
164 q->u.in.queue_start_poll = qdio_init->queue_start_poll;
164 setup_storage_lists(q, irq_ptr, input_sbal_array, i); 165 setup_storage_lists(q, irq_ptr, input_sbal_array, i);
165 input_sbal_array += QDIO_MAX_BUFFERS_PER_Q; 166 input_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
166 167
@@ -177,6 +178,7 @@ static void setup_queues(struct qdio_irq *irq_ptr,
177 setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i); 178 setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i);
178 179
179 q->is_input_q = 0; 180 q->is_input_q = 0;
181 q->u.out.scan_threshold = qdio_init->scan_threshold;
180 setup_storage_lists(q, irq_ptr, output_sbal_array, i); 182 setup_storage_lists(q, irq_ptr, output_sbal_array, i);
181 output_sbal_array += QDIO_MAX_BUFFERS_PER_Q; 183 output_sbal_array += QDIO_MAX_BUFFERS_PER_Q;
182 184
@@ -195,14 +197,10 @@ static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac)
195 irq_ptr->siga_flag.output = 1; 197 irq_ptr->siga_flag.output = 1;
196 if (qdioac & AC1_SIGA_SYNC_NEEDED) 198 if (qdioac & AC1_SIGA_SYNC_NEEDED)
197 irq_ptr->siga_flag.sync = 1; 199 irq_ptr->siga_flag.sync = 1;
198 if (qdioac & AC1_AUTOMATIC_SYNC_ON_THININT) 200 if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_THININT))
199 irq_ptr->siga_flag.no_sync_ti = 1; 201 irq_ptr->siga_flag.sync_after_ai = 1;
200 if (qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI) 202 if (!(qdioac & AC1_AUTOMATIC_SYNC_ON_OUT_PCI))
201 irq_ptr->siga_flag.no_sync_out_pci = 1; 203 irq_ptr->siga_flag.sync_out_after_pci = 1;
202
203 if (irq_ptr->siga_flag.no_sync_out_pci &&
204 irq_ptr->siga_flag.no_sync_ti)
205 irq_ptr->siga_flag.no_sync_out_ti = 1;
206} 204}
207 205
208static void check_and_setup_qebsm(struct qdio_irq *irq_ptr, 206static void check_and_setup_qebsm(struct qdio_irq *irq_ptr,
@@ -450,7 +448,7 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
450 char s[80]; 448 char s[80];
451 449
452 snprintf(s, 80, "qdio: %s %s on SC %x using " 450 snprintf(s, 80, "qdio: %s %s on SC %x using "
453 "AI:%d QEBSM:%d PCI:%d TDD:%d SIGA:%s%s%s%s%s%s\n", 451 "AI:%d QEBSM:%d PCI:%d TDD:%d SIGA:%s%s%s%s%s\n",
454 dev_name(&cdev->dev), 452 dev_name(&cdev->dev),
455 (irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" : 453 (irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" :
456 ((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"), 454 ((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"),
@@ -462,9 +460,8 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
462 (irq_ptr->siga_flag.input) ? "R" : " ", 460 (irq_ptr->siga_flag.input) ? "R" : " ",
463 (irq_ptr->siga_flag.output) ? "W" : " ", 461 (irq_ptr->siga_flag.output) ? "W" : " ",
464 (irq_ptr->siga_flag.sync) ? "S" : " ", 462 (irq_ptr->siga_flag.sync) ? "S" : " ",
465 (!irq_ptr->siga_flag.no_sync_ti) ? "A" : " ", 463 (irq_ptr->siga_flag.sync_after_ai) ? "A" : " ",
466 (!irq_ptr->siga_flag.no_sync_out_ti) ? "O" : " ", 464 (irq_ptr->siga_flag.sync_out_after_pci) ? "P" : " ");
467 (!irq_ptr->siga_flag.no_sync_out_pci) ? "P" : " ");
468 printk(KERN_INFO "%s", s); 465 printk(KERN_INFO "%s", s);
469} 466}
470 467
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 8daf1b99f153..5c4e741d8221 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -8,6 +8,7 @@
8 */ 8 */
9#include <linux/io.h> 9#include <linux/io.h>
10#include <linux/slab.h> 10#include <linux/slab.h>
11#include <linux/kernel_stat.h>
11#include <asm/atomic.h> 12#include <asm/atomic.h>
12#include <asm/debug.h> 13#include <asm/debug.h>
13#include <asm/qdio.h> 14#include <asm/qdio.h>
@@ -25,35 +26,17 @@
25 */ 26 */
26#define TIQDIO_NR_NONSHARED_IND 63 27#define TIQDIO_NR_NONSHARED_IND 63
27#define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1) 28#define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1)
28#define TIQDIO_SHARED_IND 63
29 29
30/* list of thin interrupt input queues */ 30/* list of thin interrupt input queues */
31static LIST_HEAD(tiq_list); 31static LIST_HEAD(tiq_list);
32DEFINE_MUTEX(tiq_list_lock); 32DEFINE_MUTEX(tiq_list_lock);
33 33
34/* adapter local summary indicator */ 34/* adapter local summary indicator */
35static unsigned char *tiqdio_alsi; 35static u8 *tiqdio_alsi;
36 36
37/* device state change indicators */ 37struct indicator_t *q_indicators;
38struct indicator_t {
39 u32 ind; /* u32 because of compare-and-swap performance */
40 atomic_t count; /* use count, 0 or 1 for non-shared indicators */
41};
42static struct indicator_t *q_indicators;
43 38
44static int css_qdio_omit_svs; 39static u64 last_ai_time;
45
46static inline unsigned long do_clear_global_summary(void)
47{
48 register unsigned long __fn asm("1") = 3;
49 register unsigned long __tmp asm("2");
50 register unsigned long __time asm("3");
51
52 asm volatile(
53 " .insn rre,0xb2650000,2,0"
54 : "+d" (__fn), "=d" (__tmp), "=d" (__time));
55 return __time;
56}
57 40
58/* returns addr for the device state change indicator */ 41/* returns addr for the device state change indicator */
59static u32 *get_indicator(void) 42static u32 *get_indicator(void)
@@ -87,10 +70,6 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr)
87 struct qdio_q *q; 70 struct qdio_q *q;
88 int i; 71 int i;
89 72
90 /* No TDD facility? If we must use SIGA-s we can also omit SVS. */
91 if (!css_qdio_omit_svs && irq_ptr->siga_flag.sync)
92 css_qdio_omit_svs = 1;
93
94 mutex_lock(&tiq_list_lock); 73 mutex_lock(&tiq_list_lock);
95 for_each_input_queue(irq_ptr, q, i) 74 for_each_input_queue(irq_ptr, q, i)
96 list_add_rcu(&q->entry, &tiq_list); 75 list_add_rcu(&q->entry, &tiq_list);
@@ -116,65 +95,68 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr)
116 } 95 }
117} 96}
118 97
119static inline int shared_ind(struct qdio_irq *irq_ptr) 98static inline u32 shared_ind_set(void)
120{ 99{
121 return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; 100 return q_indicators[TIQDIO_SHARED_IND].ind;
122} 101}
123 102
124/** 103/**
125 * tiqdio_thinint_handler - thin interrupt handler for qdio 104 * tiqdio_thinint_handler - thin interrupt handler for qdio
126 * @ind: pointer to adapter local summary indicator 105 * @alsi: pointer to adapter local summary indicator
127 * @drv_data: NULL 106 * @data: NULL
128 */ 107 */
129static void tiqdio_thinint_handler(void *ind, void *drv_data) 108static void tiqdio_thinint_handler(void *alsi, void *data)
130{ 109{
110 u32 si_used = shared_ind_set();
131 struct qdio_q *q; 111 struct qdio_q *q;
132 112
133 /* 113 last_ai_time = S390_lowcore.int_clock;
134 * SVS only when needed: issue SVS to benefit from iqdio interrupt 114 kstat_cpu(smp_processor_id()).irqs[IOINT_QAI]++;
135 * avoidance (SVS clears adapter interrupt suppression overwrite)
136 */
137 if (!css_qdio_omit_svs)
138 do_clear_global_summary();
139
140 /*
141 * reset local summary indicator (tiqdio_alsi) to stop adapter
142 * interrupts for now
143 */
144 xchg((u8 *)ind, 0);
145 115
146 /* protect tiq_list entries, only changed in activate or shutdown */ 116 /* protect tiq_list entries, only changed in activate or shutdown */
147 rcu_read_lock(); 117 rcu_read_lock();
148 118
149 /* check for work on all inbound thinint queues */ 119 /* check for work on all inbound thinint queues */
150 list_for_each_entry_rcu(q, &tiq_list, entry) 120 list_for_each_entry_rcu(q, &tiq_list, entry) {
121
151 /* only process queues from changed sets */ 122 /* only process queues from changed sets */
152 if (*q->irq_ptr->dsci) { 123 if (unlikely(shared_ind(q->irq_ptr->dsci))) {
153 qperf_inc(q, adapter_int); 124 if (!si_used)
125 continue;
126 } else if (!*q->irq_ptr->dsci)
127 continue;
154 128
129 if (q->u.in.queue_start_poll) {
130 /* skip if polling is enabled or already in work */
131 if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
132 &q->u.in.queue_irq_state)) {
133 qperf_inc(q, int_discarded);
134 continue;
135 }
136
137 /* avoid dsci clear here, done after processing */
138 q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
139 q->irq_ptr->int_parm);
140 } else {
155 /* only clear it if the indicator is non-shared */ 141 /* only clear it if the indicator is non-shared */
156 if (!shared_ind(q->irq_ptr)) 142 if (!shared_ind(q->irq_ptr->dsci))
157 xchg(q->irq_ptr->dsci, 0); 143 xchg(q->irq_ptr->dsci, 0);
158 /* 144 /*
159 * don't call inbound processing directly since 145 * Call inbound processing but not directly
160 * that could starve other thinint queues 146 * since that could starve other thinint queues.
161 */ 147 */
162 tasklet_schedule(&q->tasklet); 148 tasklet_schedule(&q->tasklet);
163 } 149 }
164 150 qperf_inc(q, adapter_int);
151 }
165 rcu_read_unlock(); 152 rcu_read_unlock();
166 153
167 /* 154 /*
168 * if we used the shared indicator clear it now after all queues 155 * If the shared indicator was used clear it now after all queues
169 * were processed 156 * were processed.
170 */ 157 */
171 if (atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) { 158 if (si_used && shared_ind_set())
172 xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0); 159 xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0);
173
174 /* prevent racing */
175 if (*tiqdio_alsi)
176 xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1 << 7);
177 }
178} 160}
179 161
180static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) 162static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
@@ -259,12 +241,6 @@ int qdio_establish_thinint(struct qdio_irq *irq_ptr)
259{ 241{
260 if (!is_thinint_irq(irq_ptr)) 242 if (!is_thinint_irq(irq_ptr))
261 return 0; 243 return 0;
262
263 /* Check for aif time delay disablement. If installed,
264 * omit SVS even under LPAR
265 */
266 if (css_general_characteristics.aif_tdd)
267 css_qdio_omit_svs = 1;
268 return set_subchannel_ind(irq_ptr, 0); 244 return set_subchannel_ind(irq_ptr, 0);
269} 245}
270 246
@@ -282,8 +258,8 @@ void qdio_shutdown_thinint(struct qdio_irq *irq_ptr)
282 return; 258 return;
283 259
284 /* reset adapter interrupt indicators */ 260 /* reset adapter interrupt indicators */
285 put_indicator(irq_ptr->dsci);
286 set_subchannel_ind(irq_ptr, 1); 261 set_subchannel_ind(irq_ptr, 1);
262 put_indicator(irq_ptr->dsci);
287} 263}
288 264
289void __exit tiqdio_unregister_thinints(void) 265void __exit tiqdio_unregister_thinints(void)