aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/cio/chsc.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/cio/chsc.c')
-rw-r--r--drivers/s390/cio/chsc.c1024
1 files changed, 267 insertions, 757 deletions
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 6f05a44e3817..ea92ac4d6577 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -15,202 +15,124 @@
15#include <linux/device.h> 15#include <linux/device.h>
16 16
17#include <asm/cio.h> 17#include <asm/cio.h>
18#include <asm/chpid.h>
18 19
19#include "css.h" 20#include "css.h"
20#include "cio.h" 21#include "cio.h"
21#include "cio_debug.h" 22#include "cio_debug.h"
22#include "ioasm.h" 23#include "ioasm.h"
24#include "chp.h"
23#include "chsc.h" 25#include "chsc.h"
24 26
25static void *sei_page; 27static void *sei_page;
26 28
27static int new_channel_path(int chpid); 29struct chsc_ssd_area {
28 30 struct chsc_header request;
29static inline void 31 u16 :10;
30set_chp_logically_online(int chp, int onoff) 32 u16 ssid:2;
31{ 33 u16 :4;
32 css[0]->chps[chp]->state = onoff; 34 u16 f_sch; /* first subchannel */
33} 35 u16 :16;
34 36 u16 l_sch; /* last subchannel */
35static int 37 u32 :32;
36get_chp_status(int chp) 38 struct chsc_header response;
37{ 39 u32 :32;
38 return (css[0]->chps[chp] ? css[0]->chps[chp]->state : -ENODEV); 40 u8 sch_valid : 1;
39} 41 u8 dev_valid : 1;
40 42 u8 st : 3; /* subchannel type */
41void 43 u8 zeroes : 3;
42chsc_validate_chpids(struct subchannel *sch) 44 u8 unit_addr; /* unit address */
43{ 45 u16 devno; /* device number */
44 int mask, chp; 46 u8 path_mask;
45 47 u8 fla_valid_mask;
46 for (chp = 0; chp <= 7; chp++) { 48 u16 sch; /* subchannel */
47 mask = 0x80 >> chp; 49 u8 chpid[8]; /* chpids 0-7 */
48 if (!get_chp_status(sch->schib.pmcw.chpid[chp])) 50 u16 fla[8]; /* full link addresses 0-7 */
49 /* disable using this path */ 51} __attribute__ ((packed));
50 sch->opm &= ~mask;
51 }
52}
53
54void
55chpid_is_actually_online(int chp)
56{
57 int state;
58
59 state = get_chp_status(chp);
60 if (state < 0) {
61 need_rescan = 1;
62 queue_work(slow_path_wq, &slow_path_work);
63 } else
64 WARN_ON(!state);
65}
66 52
67/* FIXME: this is _always_ called for every subchannel. shouldn't we 53int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd)
68 * process more than one at a time? */
69static int
70chsc_get_sch_desc_irq(struct subchannel *sch, void *page)
71{ 54{
72 int ccode, j; 55 unsigned long page;
73 56 struct chsc_ssd_area *ssd_area;
74 struct { 57 int ccode;
75 struct chsc_header request; 58 int ret;
76 u16 reserved1a:10; 59 int i;
77 u16 ssid:2; 60 int mask;
78 u16 reserved1b:4;
79 u16 f_sch; /* first subchannel */
80 u16 reserved2;
81 u16 l_sch; /* last subchannel */
82 u32 reserved3;
83 struct chsc_header response;
84 u32 reserved4;
85 u8 sch_valid : 1;
86 u8 dev_valid : 1;
87 u8 st : 3; /* subchannel type */
88 u8 zeroes : 3;
89 u8 unit_addr; /* unit address */
90 u16 devno; /* device number */
91 u8 path_mask;
92 u8 fla_valid_mask;
93 u16 sch; /* subchannel */
94 u8 chpid[8]; /* chpids 0-7 */
95 u16 fla[8]; /* full link addresses 0-7 */
96 } __attribute__ ((packed)) *ssd_area;
97
98 ssd_area = page;
99 61
62 page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
63 if (!page)
64 return -ENOMEM;
65 ssd_area = (struct chsc_ssd_area *) page;
100 ssd_area->request.length = 0x0010; 66 ssd_area->request.length = 0x0010;
101 ssd_area->request.code = 0x0004; 67 ssd_area->request.code = 0x0004;
102 68 ssd_area->ssid = schid.ssid;
103 ssd_area->ssid = sch->schid.ssid; 69 ssd_area->f_sch = schid.sch_no;
104 ssd_area->f_sch = sch->schid.sch_no; 70 ssd_area->l_sch = schid.sch_no;
105 ssd_area->l_sch = sch->schid.sch_no;
106 71
107 ccode = chsc(ssd_area); 72 ccode = chsc(ssd_area);
73 /* Check response. */
108 if (ccode > 0) { 74 if (ccode > 0) {
109 pr_debug("chsc returned with ccode = %d\n", ccode); 75 ret = (ccode == 3) ? -ENODEV : -EBUSY;
110 return (ccode == 3) ? -ENODEV : -EBUSY; 76 goto out_free;
111 } 77 }
112 78 if (ssd_area->response.code != 0x0001) {
113 switch (ssd_area->response.code) { 79 CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n",
114 case 0x0001: /* everything ok */ 80 schid.ssid, schid.sch_no,
115 break;
116 case 0x0002:
117 CIO_CRW_EVENT(2, "Invalid command!\n");
118 return -EINVAL;
119 case 0x0003:
120 CIO_CRW_EVENT(2, "Error in chsc request block!\n");
121 return -EINVAL;
122 case 0x0004:
123 CIO_CRW_EVENT(2, "Model does not provide ssd\n");
124 return -EOPNOTSUPP;
125 default:
126 CIO_CRW_EVENT(2, "Unknown CHSC response %d\n",
127 ssd_area->response.code); 81 ssd_area->response.code);
128 return -EIO; 82 ret = -EIO;
83 goto out_free;
129 } 84 }
130 85 if (!ssd_area->sch_valid) {
131 /* 86 ret = -ENODEV;
132 * ssd_area->st stores the type of the detected 87 goto out_free;
133 * subchannel, with the following definitions:
134 *
135 * 0: I/O subchannel: All fields have meaning
136 * 1: CHSC subchannel: Only sch_val, st and sch
137 * have meaning
138 * 2: Message subchannel: All fields except unit_addr
139 * have meaning
140 * 3: ADM subchannel: Only sch_val, st and sch
141 * have meaning
142 *
143 * Other types are currently undefined.
144 */
145 if (ssd_area->st > 3) { /* uhm, that looks strange... */
146 CIO_CRW_EVENT(0, "Strange subchannel type %d"
147 " for sch 0.%x.%04x\n", ssd_area->st,
148 sch->schid.ssid, sch->schid.sch_no);
149 /*
150 * There may have been a new subchannel type defined in the
151 * time since this code was written; since we don't know which
152 * fields have meaning and what to do with it we just jump out
153 */
154 return 0;
155 } else {
156 const char *type[4] = {"I/O", "chsc", "message", "ADM"};
157 CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n",
158 sch->schid.ssid, sch->schid.sch_no,
159 type[ssd_area->st]);
160
161 sch->ssd_info.valid = 1;
162 sch->ssd_info.type = ssd_area->st;
163 } 88 }
164 89 /* Copy data */
165 if (ssd_area->st == 0 || ssd_area->st == 2) { 90 ret = 0;
166 for (j = 0; j < 8; j++) { 91 memset(ssd, 0, sizeof(struct chsc_ssd_info));
167 if (!((0x80 >> j) & ssd_area->path_mask & 92 if ((ssd_area->st != 0) && (ssd_area->st != 2))
168 ssd_area->fla_valid_mask)) 93 goto out_free;
169 continue; 94 ssd->path_mask = ssd_area->path_mask;
170 sch->ssd_info.chpid[j] = ssd_area->chpid[j]; 95 ssd->fla_valid_mask = ssd_area->fla_valid_mask;
171 sch->ssd_info.fla[j] = ssd_area->fla[j]; 96 for (i = 0; i < 8; i++) {
97 mask = 0x80 >> i;
98 if (ssd_area->path_mask & mask) {
99 chp_id_init(&ssd->chpid[i]);
100 ssd->chpid[i].id = ssd_area->chpid[i];
172 } 101 }
102 if (ssd_area->fla_valid_mask & mask)
103 ssd->fla[i] = ssd_area->fla[i];
173 } 104 }
174 return 0; 105out_free:
106 free_page(page);
107 return ret;
175} 108}
176 109
177int 110static int check_for_io_on_path(struct subchannel *sch, int mask)
178css_get_ssd_info(struct subchannel *sch)
179{ 111{
180 int ret; 112 int cc;
181 void *page;
182 113
183 page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); 114 cc = stsch(sch->schid, &sch->schib);
184 if (!page) 115 if (cc)
185 return -ENOMEM; 116 return 0;
186 spin_lock_irq(sch->lock); 117 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == mask)
187 ret = chsc_get_sch_desc_irq(sch, page); 118 return 1;
188 if (ret) { 119 return 0;
189 static int cio_chsc_err_msg; 120}
190 121
191 if (!cio_chsc_err_msg) { 122static void terminate_internal_io(struct subchannel *sch)
192 printk(KERN_ERR 123{
193 "chsc_get_sch_descriptions:" 124 if (cio_clear(sch)) {
194 " Error %d while doing chsc; " 125 /* Recheck device in case clear failed. */
195 "processing some machine checks may " 126 sch->lpm = 0;
196 "not work\n", ret); 127 if (device_trigger_verify(sch) != 0)
197 cio_chsc_err_msg = 1; 128 css_schedule_eval(sch->schid);
198 } 129 return;
199 }
200 spin_unlock_irq(sch->lock);
201 free_page((unsigned long)page);
202 if (!ret) {
203 int j, chpid, mask;
204 /* Allocate channel path structures, if needed. */
205 for (j = 0; j < 8; j++) {
206 mask = 0x80 >> j;
207 chpid = sch->ssd_info.chpid[j];
208 if ((sch->schib.pmcw.pim & mask) &&
209 (get_chp_status(chpid) < 0))
210 new_channel_path(chpid);
211 }
212 } 130 }
213 return ret; 131 /* Request retry of internal operation. */
132 device_set_intretry(sch);
133 /* Call handler. */
134 if (sch->driver && sch->driver->termination)
135 sch->driver->termination(&sch->dev);
214} 136}
215 137
216static int 138static int
@@ -219,7 +141,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
219 int j; 141 int j;
220 int mask; 142 int mask;
221 struct subchannel *sch; 143 struct subchannel *sch;
222 struct channel_path *chpid; 144 struct chp_id *chpid;
223 struct schib schib; 145 struct schib schib;
224 146
225 sch = to_subchannel(dev); 147 sch = to_subchannel(dev);
@@ -243,106 +165,50 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
243 if (sch->schib.pmcw.pim == 0x80) 165 if (sch->schib.pmcw.pim == 0x80)
244 goto out_unreg; 166 goto out_unreg;
245 167
246 if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) && 168 if (check_for_io_on_path(sch, mask)) {
247 (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) && 169 if (device_is_online(sch))
248 (sch->schib.pmcw.lpum == mask)) { 170 device_kill_io(sch);
249 int cc; 171 else {
250 172 terminate_internal_io(sch);
251 cc = cio_clear(sch); 173 /* Re-start path verification. */
252 if (cc == -ENODEV) 174 if (sch->driver && sch->driver->verify)
175 sch->driver->verify(&sch->dev);
176 }
177 } else {
178 /* trigger path verification. */
179 if (sch->driver && sch->driver->verify)
180 sch->driver->verify(&sch->dev);
181 else if (sch->lpm == mask)
253 goto out_unreg; 182 goto out_unreg;
254 /* Request retry of internal operation. */
255 device_set_intretry(sch);
256 /* Call handler. */
257 if (sch->driver && sch->driver->termination)
258 sch->driver->termination(&sch->dev);
259 goto out_unlock;
260 } 183 }
261 184
262 /* trigger path verification. */
263 if (sch->driver && sch->driver->verify)
264 sch->driver->verify(&sch->dev);
265 else if (sch->lpm == mask)
266 goto out_unreg;
267out_unlock:
268 spin_unlock_irq(sch->lock); 185 spin_unlock_irq(sch->lock);
269 return 0; 186 return 0;
187
270out_unreg: 188out_unreg:
271 spin_unlock_irq(sch->lock);
272 sch->lpm = 0; 189 sch->lpm = 0;
273 if (css_enqueue_subchannel_slow(sch->schid)) { 190 spin_unlock_irq(sch->lock);
274 css_clear_subchannel_slow_list(); 191 css_schedule_eval(sch->schid);
275 need_rescan = 1;
276 }
277 return 0; 192 return 0;
278} 193}
279 194
280static void 195void chsc_chp_offline(struct chp_id chpid)
281s390_set_chpid_offline( __u8 chpid)
282{ 196{
283 char dbf_txt[15]; 197 char dbf_txt[15];
284 struct device *dev;
285 198
286 sprintf(dbf_txt, "chpr%x", chpid); 199 sprintf(dbf_txt, "chpr%x.%02x", chpid.cssid, chpid.id);
287 CIO_TRACE_EVENT(2, dbf_txt); 200 CIO_TRACE_EVENT(2, dbf_txt);
288 201
289 if (get_chp_status(chpid) <= 0) 202 if (chp_get_status(chpid) <= 0)
290 return; 203 return;
291 dev = get_device(&css[0]->chps[chpid]->dev); 204 bus_for_each_dev(&css_bus_type, NULL, &chpid,
292 bus_for_each_dev(&css_bus_type, NULL, to_channelpath(dev),
293 s390_subchannel_remove_chpid); 205 s390_subchannel_remove_chpid);
294
295 if (need_rescan || css_slow_subchannels_exist())
296 queue_work(slow_path_wq, &slow_path_work);
297 put_device(dev);
298}
299
300struct res_acc_data {
301 struct channel_path *chp;
302 u32 fla_mask;
303 u16 fla;
304};
305
306static int
307s390_process_res_acc_sch(struct res_acc_data *res_data, struct subchannel *sch)
308{
309 int found;
310 int chp;
311 int ccode;
312
313 found = 0;
314 for (chp = 0; chp <= 7; chp++)
315 /*
316 * check if chpid is in information updated by ssd
317 */
318 if (sch->ssd_info.valid &&
319 sch->ssd_info.chpid[chp] == res_data->chp->id &&
320 (sch->ssd_info.fla[chp] & res_data->fla_mask)
321 == res_data->fla) {
322 found = 1;
323 break;
324 }
325
326 if (found == 0)
327 return 0;
328
329 /*
330 * Do a stsch to update our subchannel structure with the
331 * new path information and eventually check for logically
332 * offline chpids.
333 */
334 ccode = stsch(sch->schid, &sch->schib);
335 if (ccode > 0)
336 return 0;
337
338 return 0x80 >> chp;
339} 206}
340 207
341static int 208static int
342s390_process_res_acc_new_sch(struct subchannel_id schid) 209s390_process_res_acc_new_sch(struct subchannel_id schid)
343{ 210{
344 struct schib schib; 211 struct schib schib;
345 int ret;
346 /* 212 /*
347 * We don't know the device yet, but since a path 213 * We don't know the device yet, but since a path
348 * may be available now to the device we'll have 214 * may be available now to the device we'll have
@@ -353,14 +219,35 @@ s390_process_res_acc_new_sch(struct subchannel_id schid)
353 */ 219 */
354 if (stsch_err(schid, &schib)) 220 if (stsch_err(schid, &schib))
355 /* We're through */ 221 /* We're through */
356 return need_rescan ? -EAGAIN : -ENXIO; 222 return -ENXIO;
357 223
358 /* Put it on the slow path. */ 224 /* Put it on the slow path. */
359 ret = css_enqueue_subchannel_slow(schid); 225 css_schedule_eval(schid);
360 if (ret) { 226 return 0;
361 css_clear_subchannel_slow_list(); 227}
362 need_rescan = 1; 228
363 return -EAGAIN; 229struct res_acc_data {
230 struct chp_id chpid;
231 u32 fla_mask;
232 u16 fla;
233};
234
235static int get_res_chpid_mask(struct chsc_ssd_info *ssd,
236 struct res_acc_data *data)
237{
238 int i;
239 int mask;
240
241 for (i = 0; i < 8; i++) {
242 mask = 0x80 >> i;
243 if (!(ssd->path_mask & mask))
244 continue;
245 if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid))
246 continue;
247 if ((ssd->fla_valid_mask & mask) &&
248 ((ssd->fla[i] & data->fla_mask) != data->fla))
249 continue;
250 return mask;
364 } 251 }
365 return 0; 252 return 0;
366} 253}
@@ -379,14 +266,11 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
379 return s390_process_res_acc_new_sch(schid); 266 return s390_process_res_acc_new_sch(schid);
380 267
381 spin_lock_irq(sch->lock); 268 spin_lock_irq(sch->lock);
382 269 chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data);
383 chp_mask = s390_process_res_acc_sch(res_data, sch); 270 if (chp_mask == 0)
384 271 goto out;
385 if (chp_mask == 0) { 272 if (stsch(sch->schid, &sch->schib))
386 spin_unlock_irq(sch->lock); 273 goto out;
387 put_device(&sch->dev);
388 return 0;
389 }
390 old_lpm = sch->lpm; 274 old_lpm = sch->lpm;
391 sch->lpm = ((sch->schib.pmcw.pim & 275 sch->lpm = ((sch->schib.pmcw.pim &
392 sch->schib.pmcw.pam & 276 sch->schib.pmcw.pam &
@@ -396,20 +280,18 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
396 device_trigger_reprobe(sch); 280 device_trigger_reprobe(sch);
397 else if (sch->driver && sch->driver->verify) 281 else if (sch->driver && sch->driver->verify)
398 sch->driver->verify(&sch->dev); 282 sch->driver->verify(&sch->dev);
399 283out:
400 spin_unlock_irq(sch->lock); 284 spin_unlock_irq(sch->lock);
401 put_device(&sch->dev); 285 put_device(&sch->dev);
402 return 0; 286 return 0;
403} 287}
404 288
405 289static void s390_process_res_acc (struct res_acc_data *res_data)
406static int
407s390_process_res_acc (struct res_acc_data *res_data)
408{ 290{
409 int rc;
410 char dbf_txt[15]; 291 char dbf_txt[15];
411 292
412 sprintf(dbf_txt, "accpr%x", res_data->chp->id); 293 sprintf(dbf_txt, "accpr%x.%02x", res_data->chpid.cssid,
294 res_data->chpid.id);
413 CIO_TRACE_EVENT( 2, dbf_txt); 295 CIO_TRACE_EVENT( 2, dbf_txt);
414 if (res_data->fla != 0) { 296 if (res_data->fla != 0) {
415 sprintf(dbf_txt, "fla%x", res_data->fla); 297 sprintf(dbf_txt, "fla%x", res_data->fla);
@@ -423,12 +305,7 @@ s390_process_res_acc (struct res_acc_data *res_data)
423 * The more information we have (info), the less scanning 305 * The more information we have (info), the less scanning
424 * will we have to do. 306 * will we have to do.
425 */ 307 */
426 rc = for_each_subchannel(__s390_process_res_acc, res_data); 308 for_each_subchannel(__s390_process_res_acc, res_data);
427 if (css_slow_subchannels_exist())
428 rc = -EAGAIN;
429 else if (rc != -EAGAIN)
430 rc = 0;
431 return rc;
432} 309}
433 310
434static int 311static int
@@ -480,43 +357,45 @@ struct chsc_sei_area {
480 /* ccdf has to be big enough for a link-incident record */ 357 /* ccdf has to be big enough for a link-incident record */
481} __attribute__ ((packed)); 358} __attribute__ ((packed));
482 359
483static int chsc_process_sei_link_incident(struct chsc_sei_area *sei_area) 360static void chsc_process_sei_link_incident(struct chsc_sei_area *sei_area)
484{ 361{
485 int chpid; 362 struct chp_id chpid;
363 int id;
486 364
487 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n", 365 CIO_CRW_EVENT(4, "chsc: link incident (rs=%02x, rs_id=%04x)\n",
488 sei_area->rs, sei_area->rsid); 366 sei_area->rs, sei_area->rsid);
489 if (sei_area->rs != 4) 367 if (sei_area->rs != 4)
490 return 0; 368 return;
491 chpid = __get_chpid_from_lir(sei_area->ccdf); 369 id = __get_chpid_from_lir(sei_area->ccdf);
492 if (chpid < 0) 370 if (id < 0)
493 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n"); 371 CIO_CRW_EVENT(4, "chsc: link incident - invalid LIR\n");
494 else 372 else {
495 s390_set_chpid_offline(chpid); 373 chp_id_init(&chpid);
496 374 chpid.id = id;
497 return 0; 375 chsc_chp_offline(chpid);
376 }
498} 377}
499 378
500static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area) 379static void chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
501{ 380{
502 struct res_acc_data res_data; 381 struct res_acc_data res_data;
503 struct device *dev; 382 struct chp_id chpid;
504 int status; 383 int status;
505 int rc;
506 384
507 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, " 385 CIO_CRW_EVENT(4, "chsc: resource accessibility event (rs=%02x, "
508 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid); 386 "rs_id=%04x)\n", sei_area->rs, sei_area->rsid);
509 if (sei_area->rs != 4) 387 if (sei_area->rs != 4)
510 return 0; 388 return;
389 chp_id_init(&chpid);
390 chpid.id = sei_area->rsid;
511 /* allocate a new channel path structure, if needed */ 391 /* allocate a new channel path structure, if needed */
512 status = get_chp_status(sei_area->rsid); 392 status = chp_get_status(chpid);
513 if (status < 0) 393 if (status < 0)
514 new_channel_path(sei_area->rsid); 394 chp_new(chpid);
515 else if (!status) 395 else if (!status)
516 return 0; 396 return;
517 dev = get_device(&css[0]->chps[sei_area->rsid]->dev);
518 memset(&res_data, 0, sizeof(struct res_acc_data)); 397 memset(&res_data, 0, sizeof(struct res_acc_data));
519 res_data.chp = to_channelpath(dev); 398 res_data.chpid = chpid;
520 if ((sei_area->vf & 0xc0) != 0) { 399 if ((sei_area->vf & 0xc0) != 0) {
521 res_data.fla = sei_area->fla; 400 res_data.fla = sei_area->fla;
522 if ((sei_area->vf & 0xc0) == 0xc0) 401 if ((sei_area->vf & 0xc0) == 0xc0)
@@ -526,51 +405,82 @@ static int chsc_process_sei_res_acc(struct chsc_sei_area *sei_area)
526 /* link address */ 405 /* link address */
527 res_data.fla_mask = 0xff00; 406 res_data.fla_mask = 0xff00;
528 } 407 }
529 rc = s390_process_res_acc(&res_data); 408 s390_process_res_acc(&res_data);
530 put_device(dev);
531
532 return rc;
533} 409}
534 410
535static int chsc_process_sei(struct chsc_sei_area *sei_area) 411struct chp_config_data {
412 u8 map[32];
413 u8 op;
414 u8 pc;
415};
416
417static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area)
536{ 418{
537 int rc; 419 struct chp_config_data *data;
420 struct chp_id chpid;
421 int num;
422
423 CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n");
424 if (sei_area->rs != 0)
425 return;
426 data = (struct chp_config_data *) &(sei_area->ccdf);
427 chp_id_init(&chpid);
428 for (num = 0; num <= __MAX_CHPID; num++) {
429 if (!chp_test_bit(data->map, num))
430 continue;
431 chpid.id = num;
432 printk(KERN_WARNING "cio: processing configure event %d for "
433 "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id);
434 switch (data->op) {
435 case 0:
436 chp_cfg_schedule(chpid, 1);
437 break;
438 case 1:
439 chp_cfg_schedule(chpid, 0);
440 break;
441 case 2:
442 chp_cfg_cancel_deconfigure(chpid);
443 break;
444 }
445 }
446}
538 447
448static void chsc_process_sei(struct chsc_sei_area *sei_area)
449{
539 /* Check if we might have lost some information. */ 450 /* Check if we might have lost some information. */
540 if (sei_area->flags & 0x40) 451 if (sei_area->flags & 0x40) {
541 CIO_CRW_EVENT(2, "chsc: event overflow\n"); 452 CIO_CRW_EVENT(2, "chsc: event overflow\n");
453 css_schedule_eval_all();
454 }
542 /* which kind of information was stored? */ 455 /* which kind of information was stored? */
543 rc = 0;
544 switch (sei_area->cc) { 456 switch (sei_area->cc) {
545 case 1: /* link incident*/ 457 case 1: /* link incident*/
546 rc = chsc_process_sei_link_incident(sei_area); 458 chsc_process_sei_link_incident(sei_area);
547 break; 459 break;
548 case 2: /* i/o resource accessibiliy */ 460 case 2: /* i/o resource accessibiliy */
549 rc = chsc_process_sei_res_acc(sei_area); 461 chsc_process_sei_res_acc(sei_area);
462 break;
463 case 8: /* channel-path-configuration notification */
464 chsc_process_sei_chp_config(sei_area);
550 break; 465 break;
551 default: /* other stuff */ 466 default: /* other stuff */
552 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n", 467 CIO_CRW_EVENT(4, "chsc: unhandled sei content code %d\n",
553 sei_area->cc); 468 sei_area->cc);
554 break; 469 break;
555 } 470 }
556
557 return rc;
558} 471}
559 472
560int chsc_process_crw(void) 473void chsc_process_crw(void)
561{ 474{
562 struct chsc_sei_area *sei_area; 475 struct chsc_sei_area *sei_area;
563 int ret;
564 int rc;
565 476
566 if (!sei_page) 477 if (!sei_page)
567 return 0; 478 return;
568 /* Access to sei_page is serialized through machine check handler 479 /* Access to sei_page is serialized through machine check handler
569 * thread, so no need for locking. */ 480 * thread, so no need for locking. */
570 sei_area = sei_page; 481 sei_area = sei_page;
571 482
572 CIO_TRACE_EVENT( 2, "prcss"); 483 CIO_TRACE_EVENT( 2, "prcss");
573 ret = 0;
574 do { 484 do {
575 memset(sei_area, 0, sizeof(*sei_area)); 485 memset(sei_area, 0, sizeof(*sei_area));
576 sei_area->request.length = 0x0010; 486 sei_area->request.length = 0x0010;
@@ -580,37 +490,26 @@ int chsc_process_crw(void)
580 490
581 if (sei_area->response.code == 0x0001) { 491 if (sei_area->response.code == 0x0001) {
582 CIO_CRW_EVENT(4, "chsc: sei successful\n"); 492 CIO_CRW_EVENT(4, "chsc: sei successful\n");
583 rc = chsc_process_sei(sei_area); 493 chsc_process_sei(sei_area);
584 if (rc)
585 ret = rc;
586 } else { 494 } else {
587 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n", 495 CIO_CRW_EVENT(2, "chsc: sei failed (rc=%04x)\n",
588 sei_area->response.code); 496 sei_area->response.code);
589 ret = 0;
590 break; 497 break;
591 } 498 }
592 } while (sei_area->flags & 0x80); 499 } while (sei_area->flags & 0x80);
593
594 return ret;
595} 500}
596 501
597static int 502static int
598__chp_add_new_sch(struct subchannel_id schid) 503__chp_add_new_sch(struct subchannel_id schid)
599{ 504{
600 struct schib schib; 505 struct schib schib;
601 int ret;
602 506
603 if (stsch_err(schid, &schib)) 507 if (stsch_err(schid, &schib))
604 /* We're through */ 508 /* We're through */
605 return need_rescan ? -EAGAIN : -ENXIO; 509 return -ENXIO;
606 510
607 /* Put it on the slow path. */ 511 /* Put it on the slow path. */
608 ret = css_enqueue_subchannel_slow(schid); 512 css_schedule_eval(schid);
609 if (ret) {
610 css_clear_subchannel_slow_list();
611 need_rescan = 1;
612 return -EAGAIN;
613 }
614 return 0; 513 return 0;
615} 514}
616 515
@@ -619,10 +518,10 @@ static int
619__chp_add(struct subchannel_id schid, void *data) 518__chp_add(struct subchannel_id schid, void *data)
620{ 519{
621 int i, mask; 520 int i, mask;
622 struct channel_path *chp; 521 struct chp_id *chpid;
623 struct subchannel *sch; 522 struct subchannel *sch;
624 523
625 chp = data; 524 chpid = data;
626 sch = get_subchannel_by_schid(schid); 525 sch = get_subchannel_by_schid(schid);
627 if (!sch) 526 if (!sch)
628 /* Check if the subchannel is now available. */ 527 /* Check if the subchannel is now available. */
@@ -631,7 +530,7 @@ __chp_add(struct subchannel_id schid, void *data)
631 for (i=0; i<8; i++) { 530 for (i=0; i<8; i++) {
632 mask = 0x80 >> i; 531 mask = 0x80 >> i;
633 if ((sch->schib.pmcw.pim & mask) && 532 if ((sch->schib.pmcw.pim & mask) &&
634 (sch->schib.pmcw.chpid[i] == chp->id)) { 533 (sch->schib.pmcw.chpid[i] == chpid->id)) {
635 if (stsch(sch->schid, &sch->schib) != 0) { 534 if (stsch(sch->schid, &sch->schib) != 0) {
636 /* Endgame. */ 535 /* Endgame. */
637 spin_unlock_irq(sch->lock); 536 spin_unlock_irq(sch->lock);
@@ -657,122 +556,58 @@ __chp_add(struct subchannel_id schid, void *data)
657 return 0; 556 return 0;
658} 557}
659 558
660static int 559void chsc_chp_online(struct chp_id chpid)
661chp_add(int chpid)
662{ 560{
663 int rc;
664 char dbf_txt[15]; 561 char dbf_txt[15];
665 struct device *dev;
666 562
667 if (!get_chp_status(chpid)) 563 sprintf(dbf_txt, "cadd%x.%02x", chpid.cssid, chpid.id);
668 return 0; /* no need to do the rest */
669
670 sprintf(dbf_txt, "cadd%x", chpid);
671 CIO_TRACE_EVENT(2, dbf_txt); 564 CIO_TRACE_EVENT(2, dbf_txt);
672 565
673 dev = get_device(&css[0]->chps[chpid]->dev); 566 if (chp_get_status(chpid) != 0)
674 rc = for_each_subchannel(__chp_add, to_channelpath(dev)); 567 for_each_subchannel(__chp_add, &chpid);
675 if (css_slow_subchannels_exist())
676 rc = -EAGAIN;
677 if (rc != -EAGAIN)
678 rc = 0;
679 put_device(dev);
680 return rc;
681} 568}
682 569
683/* 570static void __s390_subchannel_vary_chpid(struct subchannel *sch,
684 * Handling of crw machine checks with channel path source. 571 struct chp_id chpid, int on)
685 */
686int
687chp_process_crw(int chpid, int on)
688{
689 if (on == 0) {
690 /* Path has gone. We use the link incident routine.*/
691 s390_set_chpid_offline(chpid);
692 return 0; /* De-register is async anyway. */
693 }
694 /*
695 * Path has come. Allocate a new channel path structure,
696 * if needed.
697 */
698 if (get_chp_status(chpid) < 0)
699 new_channel_path(chpid);
700 /* Avoid the extra overhead in process_rec_acc. */
701 return chp_add(chpid);
702}
703
704static int check_for_io_on_path(struct subchannel *sch, int index)
705{
706 int cc;
707
708 cc = stsch(sch->schid, &sch->schib);
709 if (cc)
710 return 0;
711 if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index))
712 return 1;
713 return 0;
714}
715
716static void terminate_internal_io(struct subchannel *sch)
717{
718 if (cio_clear(sch)) {
719 /* Recheck device in case clear failed. */
720 sch->lpm = 0;
721 if (device_trigger_verify(sch) != 0) {
722 if(css_enqueue_subchannel_slow(sch->schid)) {
723 css_clear_subchannel_slow_list();
724 need_rescan = 1;
725 }
726 }
727 return;
728 }
729 /* Request retry of internal operation. */
730 device_set_intretry(sch);
731 /* Call handler. */
732 if (sch->driver && sch->driver->termination)
733 sch->driver->termination(&sch->dev);
734}
735
736static void
737__s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
738{ 572{
739 int chp, old_lpm; 573 int chp, old_lpm;
574 int mask;
740 unsigned long flags; 575 unsigned long flags;
741 576
742 if (!sch->ssd_info.valid)
743 return;
744
745 spin_lock_irqsave(sch->lock, flags); 577 spin_lock_irqsave(sch->lock, flags);
746 old_lpm = sch->lpm; 578 old_lpm = sch->lpm;
747 for (chp = 0; chp < 8; chp++) { 579 for (chp = 0; chp < 8; chp++) {
748 if (sch->ssd_info.chpid[chp] != chpid) 580 mask = 0x80 >> chp;
581 if (!(sch->ssd_info.path_mask & mask))
582 continue;
583 if (!chp_id_is_equal(&sch->ssd_info.chpid[chp], &chpid))
749 continue; 584 continue;
750 585
751 if (on) { 586 if (on) {
752 sch->opm |= (0x80 >> chp); 587 sch->opm |= mask;
753 sch->lpm |= (0x80 >> chp); 588 sch->lpm |= mask;
754 if (!old_lpm) 589 if (!old_lpm)
755 device_trigger_reprobe(sch); 590 device_trigger_reprobe(sch);
756 else if (sch->driver && sch->driver->verify) 591 else if (sch->driver && sch->driver->verify)
757 sch->driver->verify(&sch->dev); 592 sch->driver->verify(&sch->dev);
758 break; 593 break;
759 } 594 }
760 sch->opm &= ~(0x80 >> chp); 595 sch->opm &= ~mask;
761 sch->lpm &= ~(0x80 >> chp); 596 sch->lpm &= ~mask;
762 if (check_for_io_on_path(sch, chp)) { 597 if (check_for_io_on_path(sch, mask)) {
763 if (device_is_online(sch)) 598 if (device_is_online(sch))
764 /* Path verification is done after killing. */ 599 /* Path verification is done after killing. */
765 device_kill_io(sch); 600 device_kill_io(sch);
766 else 601 else {
767 /* Kill and retry internal I/O. */ 602 /* Kill and retry internal I/O. */
768 terminate_internal_io(sch); 603 terminate_internal_io(sch);
769 } else if (!sch->lpm) { 604 /* Re-start path verification. */
770 if (device_trigger_verify(sch) != 0) { 605 if (sch->driver && sch->driver->verify)
771 if (css_enqueue_subchannel_slow(sch->schid)) { 606 sch->driver->verify(&sch->dev);
772 css_clear_subchannel_slow_list();
773 need_rescan = 1;
774 }
775 } 607 }
608 } else if (!sch->lpm) {
609 if (device_trigger_verify(sch) != 0)
610 css_schedule_eval(sch->schid);
776 } else if (sch->driver && sch->driver->verify) 611 } else if (sch->driver && sch->driver->verify)
777 sch->driver->verify(&sch->dev); 612 sch->driver->verify(&sch->dev);
778 break; 613 break;
@@ -780,11 +615,10 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
780 spin_unlock_irqrestore(sch->lock, flags); 615 spin_unlock_irqrestore(sch->lock, flags);
781} 616}
782 617
783static int 618static int s390_subchannel_vary_chpid_off(struct device *dev, void *data)
784s390_subchannel_vary_chpid_off(struct device *dev, void *data)
785{ 619{
786 struct subchannel *sch; 620 struct subchannel *sch;
787 __u8 *chpid; 621 struct chp_id *chpid;
788 622
789 sch = to_subchannel(dev); 623 sch = to_subchannel(dev);
790 chpid = data; 624 chpid = data;
@@ -793,11 +627,10 @@ s390_subchannel_vary_chpid_off(struct device *dev, void *data)
793 return 0; 627 return 0;
794} 628}
795 629
796static int 630static int s390_subchannel_vary_chpid_on(struct device *dev, void *data)
797s390_subchannel_vary_chpid_on(struct device *dev, void *data)
798{ 631{
799 struct subchannel *sch; 632 struct subchannel *sch;
800 __u8 *chpid; 633 struct chp_id *chpid;
801 634
802 sch = to_subchannel(dev); 635 sch = to_subchannel(dev);
803 chpid = data; 636 chpid = data;
@@ -821,40 +654,17 @@ __s390_vary_chpid_on(struct subchannel_id schid, void *data)
821 /* We're through */ 654 /* We're through */
822 return -ENXIO; 655 return -ENXIO;
823 /* Put it on the slow path. */ 656 /* Put it on the slow path. */
824 if (css_enqueue_subchannel_slow(schid)) { 657 css_schedule_eval(schid);
825 css_clear_subchannel_slow_list();
826 need_rescan = 1;
827 return -EAGAIN;
828 }
829 return 0; 658 return 0;
830} 659}
831 660
832/* 661/**
833 * Function: s390_vary_chpid 662 * chsc_chp_vary - propagate channel-path vary operation to subchannels
834 * Varies the specified chpid online or offline 663 * @chpid: channl-path ID
664 * @on: non-zero for vary online, zero for vary offline
835 */ 665 */
836static int 666int chsc_chp_vary(struct chp_id chpid, int on)
837s390_vary_chpid( __u8 chpid, int on)
838{ 667{
839 char dbf_text[15];
840 int status;
841
842 sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid);
843 CIO_TRACE_EVENT( 2, dbf_text);
844
845 status = get_chp_status(chpid);
846 if (status < 0) {
847 printk(KERN_ERR "Can't vary unknown chpid %02X\n", chpid);
848 return -EINVAL;
849 }
850
851 if (!on && !status) {
852 printk(KERN_ERR "chpid %x is already offline\n", chpid);
853 return -EINVAL;
854 }
855
856 set_chp_logically_online(chpid, on);
857
858 /* 668 /*
859 * Redo PathVerification on the devices the chpid connects to 669 * Redo PathVerification on the devices the chpid connects to
860 */ 670 */
@@ -865,118 +675,9 @@ s390_vary_chpid( __u8 chpid, int on)
865 if (on) 675 if (on)
866 /* Scan for new devices on varied on path. */ 676 /* Scan for new devices on varied on path. */
867 for_each_subchannel(__s390_vary_chpid_on, NULL); 677 for_each_subchannel(__s390_vary_chpid_on, NULL);
868 if (need_rescan || css_slow_subchannels_exist())
869 queue_work(slow_path_wq, &slow_path_work);
870 return 0; 678 return 0;
871} 679}
872 680
873/*
874 * Channel measurement related functions
875 */
876static ssize_t
877chp_measurement_chars_read(struct kobject *kobj, char *buf, loff_t off,
878 size_t count)
879{
880 struct channel_path *chp;
881 unsigned int size;
882
883 chp = to_channelpath(container_of(kobj, struct device, kobj));
884 if (!chp->cmg_chars)
885 return 0;
886
887 size = sizeof(struct cmg_chars);
888
889 if (off > size)
890 return 0;
891 if (off + count > size)
892 count = size - off;
893 memcpy(buf, chp->cmg_chars + off, count);
894 return count;
895}
896
897static struct bin_attribute chp_measurement_chars_attr = {
898 .attr = {
899 .name = "measurement_chars",
900 .mode = S_IRUSR,
901 .owner = THIS_MODULE,
902 },
903 .size = sizeof(struct cmg_chars),
904 .read = chp_measurement_chars_read,
905};
906
907static void
908chp_measurement_copy_block(struct cmg_entry *buf,
909 struct channel_subsystem *css, int chpid)
910{
911 void *area;
912 struct cmg_entry *entry, reference_buf;
913 int idx;
914
915 if (chpid < 128) {
916 area = css->cub_addr1;
917 idx = chpid;
918 } else {
919 area = css->cub_addr2;
920 idx = chpid - 128;
921 }
922 entry = area + (idx * sizeof(struct cmg_entry));
923 do {
924 memcpy(buf, entry, sizeof(*entry));
925 memcpy(&reference_buf, entry, sizeof(*entry));
926 } while (reference_buf.values[0] != buf->values[0]);
927}
928
929static ssize_t
930chp_measurement_read(struct kobject *kobj, char *buf, loff_t off, size_t count)
931{
932 struct channel_path *chp;
933 struct channel_subsystem *css;
934 unsigned int size;
935
936 chp = to_channelpath(container_of(kobj, struct device, kobj));
937 css = to_css(chp->dev.parent);
938
939 size = sizeof(struct cmg_entry);
940
941 /* Only allow single reads. */
942 if (off || count < size)
943 return 0;
944 chp_measurement_copy_block((struct cmg_entry *)buf, css, chp->id);
945 count = size;
946 return count;
947}
948
949static struct bin_attribute chp_measurement_attr = {
950 .attr = {
951 .name = "measurement",
952 .mode = S_IRUSR,
953 .owner = THIS_MODULE,
954 },
955 .size = sizeof(struct cmg_entry),
956 .read = chp_measurement_read,
957};
958
959static void
960chsc_remove_chp_cmg_attr(struct channel_path *chp)
961{
962 device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
963 device_remove_bin_file(&chp->dev, &chp_measurement_attr);
964}
965
966static int
967chsc_add_chp_cmg_attr(struct channel_path *chp)
968{
969 int ret;
970
971 ret = device_create_bin_file(&chp->dev, &chp_measurement_chars_attr);
972 if (ret)
973 return ret;
974 ret = device_create_bin_file(&chp->dev, &chp_measurement_attr);
975 if (ret)
976 device_remove_bin_file(&chp->dev, &chp_measurement_chars_attr);
977 return ret;
978}
979
980static void 681static void
981chsc_remove_cmg_attr(struct channel_subsystem *css) 682chsc_remove_cmg_attr(struct channel_subsystem *css)
982{ 683{
@@ -985,7 +686,7 @@ chsc_remove_cmg_attr(struct channel_subsystem *css)
985 for (i = 0; i <= __MAX_CHPID; i++) { 686 for (i = 0; i <= __MAX_CHPID; i++) {
986 if (!css->chps[i]) 687 if (!css->chps[i])
987 continue; 688 continue;
988 chsc_remove_chp_cmg_attr(css->chps[i]); 689 chp_remove_cmg_attr(css->chps[i]);
989 } 690 }
990} 691}
991 692
@@ -998,7 +699,7 @@ chsc_add_cmg_attr(struct channel_subsystem *css)
998 for (i = 0; i <= __MAX_CHPID; i++) { 699 for (i = 0; i <= __MAX_CHPID; i++) {
999 if (!css->chps[i]) 700 if (!css->chps[i])
1000 continue; 701 continue;
1001 ret = chsc_add_chp_cmg_attr(css->chps[i]); 702 ret = chp_add_cmg_attr(css->chps[i]);
1002 if (ret) 703 if (ret)
1003 goto cleanup; 704 goto cleanup;
1004 } 705 }
@@ -1007,12 +708,11 @@ cleanup:
1007 for (--i; i >= 0; i--) { 708 for (--i; i >= 0; i--) {
1008 if (!css->chps[i]) 709 if (!css->chps[i])
1009 continue; 710 continue;
1010 chsc_remove_chp_cmg_attr(css->chps[i]); 711 chp_remove_cmg_attr(css->chps[i]);
1011 } 712 }
1012 return ret; 713 return ret;
1013} 714}
1014 715
1015
1016static int 716static int
1017__chsc_do_secm(struct channel_subsystem *css, int enable, void *page) 717__chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
1018{ 718{
@@ -1118,7 +818,7 @@ chsc_secm(struct channel_subsystem *css, int enable)
1118 } else 818 } else
1119 chsc_remove_cmg_attr(css); 819 chsc_remove_cmg_attr(css);
1120 } 820 }
1121 if (enable && !css->cm_enabled) { 821 if (!css->cm_enabled) {
1122 free_page((unsigned long)css->cub_addr1); 822 free_page((unsigned long)css->cub_addr1);
1123 free_page((unsigned long)css->cub_addr2); 823 free_page((unsigned long)css->cub_addr2);
1124 } 824 }
@@ -1127,109 +827,8 @@ chsc_secm(struct channel_subsystem *css, int enable)
1127 return ret; 827 return ret;
1128} 828}
1129 829
1130/* 830int chsc_determine_channel_path_description(struct chp_id chpid,
1131 * Files for the channel path entries. 831 struct channel_path_desc *desc)
1132 */
1133static ssize_t
1134chp_status_show(struct device *dev, struct device_attribute *attr, char *buf)
1135{
1136 struct channel_path *chp = container_of(dev, struct channel_path, dev);
1137
1138 if (!chp)
1139 return 0;
1140 return (get_chp_status(chp->id) ? sprintf(buf, "online\n") :
1141 sprintf(buf, "offline\n"));
1142}
1143
1144static ssize_t
1145chp_status_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
1146{
1147 struct channel_path *cp = container_of(dev, struct channel_path, dev);
1148 char cmd[10];
1149 int num_args;
1150 int error;
1151
1152 num_args = sscanf(buf, "%5s", cmd);
1153 if (!num_args)
1154 return count;
1155
1156 if (!strnicmp(cmd, "on", 2))
1157 error = s390_vary_chpid(cp->id, 1);
1158 else if (!strnicmp(cmd, "off", 3))
1159 error = s390_vary_chpid(cp->id, 0);
1160 else
1161 error = -EINVAL;
1162
1163 return error < 0 ? error : count;
1164
1165}
1166
1167static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write);
1168
1169static ssize_t
1170chp_type_show(struct device *dev, struct device_attribute *attr, char *buf)
1171{
1172 struct channel_path *chp = container_of(dev, struct channel_path, dev);
1173
1174 if (!chp)
1175 return 0;
1176 return sprintf(buf, "%x\n", chp->desc.desc);
1177}
1178
1179static DEVICE_ATTR(type, 0444, chp_type_show, NULL);
1180
1181static ssize_t
1182chp_cmg_show(struct device *dev, struct device_attribute *attr, char *buf)
1183{
1184 struct channel_path *chp = to_channelpath(dev);
1185
1186 if (!chp)
1187 return 0;
1188 if (chp->cmg == -1) /* channel measurements not available */
1189 return sprintf(buf, "unknown\n");
1190 return sprintf(buf, "%x\n", chp->cmg);
1191}
1192
1193static DEVICE_ATTR(cmg, 0444, chp_cmg_show, NULL);
1194
1195static ssize_t
1196chp_shared_show(struct device *dev, struct device_attribute *attr, char *buf)
1197{
1198 struct channel_path *chp = to_channelpath(dev);
1199
1200 if (!chp)
1201 return 0;
1202 if (chp->shared == -1) /* channel measurements not available */
1203 return sprintf(buf, "unknown\n");
1204 return sprintf(buf, "%x\n", chp->shared);
1205}
1206
1207static DEVICE_ATTR(shared, 0444, chp_shared_show, NULL);
1208
1209static struct attribute * chp_attrs[] = {
1210 &dev_attr_status.attr,
1211 &dev_attr_type.attr,
1212 &dev_attr_cmg.attr,
1213 &dev_attr_shared.attr,
1214 NULL,
1215};
1216
1217static struct attribute_group chp_attr_group = {
1218 .attrs = chp_attrs,
1219};
1220
1221static void
1222chp_release(struct device *dev)
1223{
1224 struct channel_path *cp;
1225
1226 cp = container_of(dev, struct channel_path, dev);
1227 kfree(cp);
1228}
1229
1230static int
1231chsc_determine_channel_path_description(int chpid,
1232 struct channel_path_desc *desc)
1233{ 832{
1234 int ccode, ret; 833 int ccode, ret;
1235 834
@@ -1252,8 +851,8 @@ chsc_determine_channel_path_description(int chpid,
1252 scpd_area->request.length = 0x0010; 851 scpd_area->request.length = 0x0010;
1253 scpd_area->request.code = 0x0002; 852 scpd_area->request.code = 0x0002;
1254 853
1255 scpd_area->first_chpid = chpid; 854 scpd_area->first_chpid = chpid.id;
1256 scpd_area->last_chpid = chpid; 855 scpd_area->last_chpid = chpid.id;
1257 856
1258 ccode = chsc(scpd_area); 857 ccode = chsc(scpd_area);
1259 if (ccode > 0) { 858 if (ccode > 0) {
@@ -1316,8 +915,7 @@ chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv,
1316 } 915 }
1317} 916}
1318 917
1319static int 918int chsc_get_channel_measurement_chars(struct channel_path *chp)
1320chsc_get_channel_measurement_chars(struct channel_path *chp)
1321{ 919{
1322 int ccode, ret; 920 int ccode, ret;
1323 921
@@ -1349,8 +947,8 @@ chsc_get_channel_measurement_chars(struct channel_path *chp)
1349 scmc_area->request.length = 0x0010; 947 scmc_area->request.length = 0x0010;
1350 scmc_area->request.code = 0x0022; 948 scmc_area->request.code = 0x0022;
1351 949
1352 scmc_area->first_chpid = chp->id; 950 scmc_area->first_chpid = chp->chpid.id;
1353 scmc_area->last_chpid = chp->id; 951 scmc_area->last_chpid = chp->chpid.id;
1354 952
1355 ccode = chsc(scmc_area); 953 ccode = chsc(scmc_area);
1356 if (ccode > 0) { 954 if (ccode > 0) {
@@ -1392,94 +990,6 @@ out:
1392 return ret; 990 return ret;
1393} 991}
1394 992
1395/*
1396 * Entries for chpids on the system bus.
1397 * This replaces /proc/chpids.
1398 */
1399static int
1400new_channel_path(int chpid)
1401{
1402 struct channel_path *chp;
1403 int ret;
1404
1405 chp = kzalloc(sizeof(struct channel_path), GFP_KERNEL);
1406 if (!chp)
1407 return -ENOMEM;
1408
1409 /* fill in status, etc. */
1410 chp->id = chpid;
1411 chp->state = 1;
1412 chp->dev.parent = &css[0]->device;
1413 chp->dev.release = chp_release;
1414 snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid);
1415
1416 /* Obtain channel path description and fill it in. */
1417 ret = chsc_determine_channel_path_description(chpid, &chp->desc);
1418 if (ret)
1419 goto out_free;
1420 /* Get channel-measurement characteristics. */
1421 if (css_characteristics_avail && css_chsc_characteristics.scmc
1422 && css_chsc_characteristics.secm) {
1423 ret = chsc_get_channel_measurement_chars(chp);
1424 if (ret)
1425 goto out_free;
1426 } else {
1427 static int msg_done;
1428
1429 if (!msg_done) {
1430 printk(KERN_WARNING "cio: Channel measurements not "
1431 "available, continuing.\n");
1432 msg_done = 1;
1433 }
1434 chp->cmg = -1;
1435 }
1436
1437 /* make it known to the system */
1438 ret = device_register(&chp->dev);
1439 if (ret) {
1440 printk(KERN_WARNING "%s: could not register %02x\n",
1441 __func__, chpid);
1442 goto out_free;
1443 }
1444 ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group);
1445 if (ret) {
1446 device_unregister(&chp->dev);
1447 goto out_free;
1448 }
1449 mutex_lock(&css[0]->mutex);
1450 if (css[0]->cm_enabled) {
1451 ret = chsc_add_chp_cmg_attr(chp);
1452 if (ret) {
1453 sysfs_remove_group(&chp->dev.kobj, &chp_attr_group);
1454 device_unregister(&chp->dev);
1455 mutex_unlock(&css[0]->mutex);
1456 goto out_free;
1457 }
1458 }
1459 css[0]->chps[chpid] = chp;
1460 mutex_unlock(&css[0]->mutex);
1461 return ret;
1462out_free:
1463 kfree(chp);
1464 return ret;
1465}
1466
1467void *
1468chsc_get_chp_desc(struct subchannel *sch, int chp_no)
1469{
1470 struct channel_path *chp;
1471 struct channel_path_desc *desc;
1472
1473 chp = css[0]->chps[sch->schib.pmcw.chpid[chp_no]];
1474 if (!chp)
1475 return NULL;
1476 desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL);
1477 if (!desc)
1478 return NULL;
1479 memcpy(desc, &chp->desc, sizeof(struct channel_path_desc));
1480 return desc;
1481}
1482
1483static int __init 993static int __init
1484chsc_alloc_sei_area(void) 994chsc_alloc_sei_area(void)
1485{ 995{