diff options
author | Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | 2007-04-27 10:01:35 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2007-04-27 10:01:40 -0400 |
commit | 7ad6a24970325294a22a08446d473384c15b928e (patch) | |
tree | c8f1e25035b207e2a45a29138309acaee20d6cb6 /drivers/s390/cio/chsc.c | |
parent | 83b3370c79b91b9be3f6540c3c914e689134b45f (diff) |
[S390] cio: fix subchannel channel-path data usage
Ensure that channel-path related subchannel data is only retrieved and
used when it is valid and that it is updated when it may have changed.
Signed-off-by: Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390/cio/chsc.c')
-rw-r--r-- | drivers/s390/cio/chsc.c | 303 |
1 files changed, 107 insertions, 196 deletions
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 89a130a62654..0841e16b6a82 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c | |||
@@ -26,155 +26,84 @@ | |||
26 | 26 | ||
27 | static void *sei_page; | 27 | static void *sei_page; |
28 | 28 | ||
29 | /* FIXME: this is _always_ called for every subchannel. shouldn't we | 29 | struct chsc_ssd_area { |
30 | * process more than one at a time? */ | 30 | struct chsc_header request; |
31 | static int | 31 | u16 :10; |
32 | chsc_get_sch_desc_irq(struct subchannel *sch, void *page) | 32 | u16 ssid:2; |
33 | { | 33 | u16 :4; |
34 | int ccode, j; | 34 | u16 f_sch; /* first subchannel */ |
35 | u16 :16; | ||
36 | u16 l_sch; /* last subchannel */ | ||
37 | u32 :32; | ||
38 | struct chsc_header response; | ||
39 | u32 :32; | ||
40 | u8 sch_valid : 1; | ||
41 | u8 dev_valid : 1; | ||
42 | u8 st : 3; /* subchannel type */ | ||
43 | u8 zeroes : 3; | ||
44 | u8 unit_addr; /* unit address */ | ||
45 | u16 devno; /* device number */ | ||
46 | u8 path_mask; | ||
47 | u8 fla_valid_mask; | ||
48 | u16 sch; /* subchannel */ | ||
49 | u8 chpid[8]; /* chpids 0-7 */ | ||
50 | u16 fla[8]; /* full link addresses 0-7 */ | ||
51 | } __attribute__ ((packed)); | ||
35 | 52 | ||
36 | struct { | 53 | int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) |
37 | struct chsc_header request; | 54 | { |
38 | u16 reserved1a:10; | 55 | unsigned long page; |
39 | u16 ssid:2; | 56 | struct chsc_ssd_area *ssd_area; |
40 | u16 reserved1b:4; | 57 | int ccode; |
41 | u16 f_sch; /* first subchannel */ | 58 | int ret; |
42 | u16 reserved2; | 59 | int i; |
43 | u16 l_sch; /* last subchannel */ | 60 | int mask; |
44 | u32 reserved3; | ||
45 | struct chsc_header response; | ||
46 | u32 reserved4; | ||
47 | u8 sch_valid : 1; | ||
48 | u8 dev_valid : 1; | ||
49 | u8 st : 3; /* subchannel type */ | ||
50 | u8 zeroes : 3; | ||
51 | u8 unit_addr; /* unit address */ | ||
52 | u16 devno; /* device number */ | ||
53 | u8 path_mask; | ||
54 | u8 fla_valid_mask; | ||
55 | u16 sch; /* subchannel */ | ||
56 | u8 chpid[8]; /* chpids 0-7 */ | ||
57 | u16 fla[8]; /* full link addresses 0-7 */ | ||
58 | } __attribute__ ((packed)) *ssd_area; | ||
59 | |||
60 | ssd_area = page; | ||
61 | 61 | ||
62 | page = get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
63 | if (!page) | ||
64 | return -ENOMEM; | ||
65 | ssd_area = (struct chsc_ssd_area *) page; | ||
62 | ssd_area->request.length = 0x0010; | 66 | ssd_area->request.length = 0x0010; |
63 | ssd_area->request.code = 0x0004; | 67 | ssd_area->request.code = 0x0004; |
64 | 68 | ssd_area->ssid = schid.ssid; | |
65 | ssd_area->ssid = sch->schid.ssid; | 69 | ssd_area->f_sch = schid.sch_no; |
66 | ssd_area->f_sch = sch->schid.sch_no; | 70 | ssd_area->l_sch = schid.sch_no; |
67 | ssd_area->l_sch = sch->schid.sch_no; | ||
68 | 71 | ||
69 | ccode = chsc(ssd_area); | 72 | ccode = chsc(ssd_area); |
73 | /* Check response. */ | ||
70 | if (ccode > 0) { | 74 | if (ccode > 0) { |
71 | pr_debug("chsc returned with ccode = %d\n", ccode); | 75 | ret = (ccode == 3) ? -ENODEV : -EBUSY; |
72 | return (ccode == 3) ? -ENODEV : -EBUSY; | 76 | goto out_free; |
73 | } | 77 | } |
74 | 78 | if (ssd_area->response.code != 0x0001) { | |
75 | switch (ssd_area->response.code) { | 79 | CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n", |
76 | case 0x0001: /* everything ok */ | 80 | schid.ssid, schid.sch_no, |
77 | break; | ||
78 | case 0x0002: | ||
79 | CIO_CRW_EVENT(2, "Invalid command!\n"); | ||
80 | return -EINVAL; | ||
81 | case 0x0003: | ||
82 | CIO_CRW_EVENT(2, "Error in chsc request block!\n"); | ||
83 | return -EINVAL; | ||
84 | case 0x0004: | ||
85 | CIO_CRW_EVENT(2, "Model does not provide ssd\n"); | ||
86 | return -EOPNOTSUPP; | ||
87 | default: | ||
88 | CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", | ||
89 | ssd_area->response.code); | 81 | ssd_area->response.code); |
90 | return -EIO; | 82 | ret = -EIO; |
91 | } | 83 | goto out_free; |
92 | |||
93 | /* | ||
94 | * ssd_area->st stores the type of the detected | ||
95 | * subchannel, with the following definitions: | ||
96 | * | ||
97 | * 0: I/O subchannel: All fields have meaning | ||
98 | * 1: CHSC subchannel: Only sch_val, st and sch | ||
99 | * have meaning | ||
100 | * 2: Message subchannel: All fields except unit_addr | ||
101 | * have meaning | ||
102 | * 3: ADM subchannel: Only sch_val, st and sch | ||
103 | * have meaning | ||
104 | * | ||
105 | * Other types are currently undefined. | ||
106 | */ | ||
107 | if (ssd_area->st > 3) { /* uhm, that looks strange... */ | ||
108 | CIO_CRW_EVENT(0, "Strange subchannel type %d" | ||
109 | " for sch 0.%x.%04x\n", ssd_area->st, | ||
110 | sch->schid.ssid, sch->schid.sch_no); | ||
111 | /* | ||
112 | * There may have been a new subchannel type defined in the | ||
113 | * time since this code was written; since we don't know which | ||
114 | * fields have meaning and what to do with it we just jump out | ||
115 | */ | ||
116 | return 0; | ||
117 | } else { | ||
118 | const char *type[4] = {"I/O", "chsc", "message", "ADM"}; | ||
119 | CIO_CRW_EVENT(6, "ssd: sch 0.%x.%04x is %s subchannel\n", | ||
120 | sch->schid.ssid, sch->schid.sch_no, | ||
121 | type[ssd_area->st]); | ||
122 | |||
123 | sch->ssd_info.valid = 1; | ||
124 | sch->ssd_info.type = ssd_area->st; | ||
125 | } | 84 | } |
126 | 85 | if (!ssd_area->sch_valid) { | |
127 | if (ssd_area->st == 0 || ssd_area->st == 2) { | 86 | ret = -ENODEV; |
128 | for (j = 0; j < 8; j++) { | 87 | goto out_free; |
129 | if (!((0x80 >> j) & ssd_area->path_mask & | ||
130 | ssd_area->fla_valid_mask)) | ||
131 | continue; | ||
132 | sch->ssd_info.chpid[j] = ssd_area->chpid[j]; | ||
133 | sch->ssd_info.fla[j] = ssd_area->fla[j]; | ||
134 | } | ||
135 | } | 88 | } |
136 | return 0; | 89 | /* Copy data */ |
137 | } | 90 | ret = 0; |
138 | 91 | memset(ssd, 0, sizeof(struct chsc_ssd_info)); | |
139 | int | 92 | if ((ssd_area->st != 0) && (ssd_area->st != 2)) |
140 | css_get_ssd_info(struct subchannel *sch) | 93 | goto out_free; |
141 | { | 94 | ssd->path_mask = ssd_area->path_mask; |
142 | int ret; | 95 | ssd->fla_valid_mask = ssd_area->fla_valid_mask; |
143 | void *page; | 96 | for (i = 0; i < 8; i++) { |
144 | 97 | mask = 0x80 >> i; | |
145 | page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | 98 | if (ssd_area->path_mask & mask) { |
146 | if (!page) | 99 | chp_id_init(&ssd->chpid[i]); |
147 | return -ENOMEM; | 100 | ssd->chpid[i].id = ssd_area->chpid[i]; |
148 | spin_lock_irq(sch->lock); | ||
149 | ret = chsc_get_sch_desc_irq(sch, page); | ||
150 | if (ret) { | ||
151 | static int cio_chsc_err_msg; | ||
152 | |||
153 | if (!cio_chsc_err_msg) { | ||
154 | printk(KERN_ERR | ||
155 | "chsc_get_sch_descriptions:" | ||
156 | " Error %d while doing chsc; " | ||
157 | "processing some machine checks may " | ||
158 | "not work\n", ret); | ||
159 | cio_chsc_err_msg = 1; | ||
160 | } | ||
161 | } | ||
162 | spin_unlock_irq(sch->lock); | ||
163 | free_page((unsigned long)page); | ||
164 | if (!ret) { | ||
165 | int j, mask; | ||
166 | struct chp_id chpid; | ||
167 | |||
168 | chp_id_init(&chpid); | ||
169 | /* Allocate channel path structures, if needed. */ | ||
170 | for (j = 0; j < 8; j++) { | ||
171 | mask = 0x80 >> j; | ||
172 | chpid.id = sch->ssd_info.chpid[j]; | ||
173 | if ((sch->schib.pmcw.pim & mask) && | ||
174 | !chp_is_registered(chpid)) | ||
175 | chp_new(chpid); | ||
176 | } | 101 | } |
102 | if (ssd_area->fla_valid_mask & mask) | ||
103 | ssd->fla[i] = ssd_area->fla[i]; | ||
177 | } | 104 | } |
105 | out_free: | ||
106 | free_page(page); | ||
178 | return ret; | 107 | return ret; |
179 | } | 108 | } |
180 | 109 | ||
@@ -276,47 +205,6 @@ void chsc_chp_offline(struct chp_id chpid) | |||
276 | s390_subchannel_remove_chpid); | 205 | s390_subchannel_remove_chpid); |
277 | } | 206 | } |
278 | 207 | ||
279 | struct res_acc_data { | ||
280 | struct chp_id chpid; | ||
281 | u32 fla_mask; | ||
282 | u16 fla; | ||
283 | }; | ||
284 | |||
285 | static int s390_process_res_acc_sch(struct res_acc_data *res_data, | ||
286 | struct subchannel *sch) | ||
287 | { | ||
288 | int found; | ||
289 | int chp; | ||
290 | int ccode; | ||
291 | |||
292 | found = 0; | ||
293 | for (chp = 0; chp <= 7; chp++) | ||
294 | /* | ||
295 | * check if chpid is in information updated by ssd | ||
296 | */ | ||
297 | if (sch->ssd_info.valid && | ||
298 | sch->ssd_info.chpid[chp] == res_data->chpid.id && | ||
299 | (sch->ssd_info.fla[chp] & res_data->fla_mask) | ||
300 | == res_data->fla) { | ||
301 | found = 1; | ||
302 | break; | ||
303 | } | ||
304 | |||
305 | if (found == 0) | ||
306 | return 0; | ||
307 | |||
308 | /* | ||
309 | * Do a stsch to update our subchannel structure with the | ||
310 | * new path information and eventually check for logically | ||
311 | * offline chpids. | ||
312 | */ | ||
313 | ccode = stsch(sch->schid, &sch->schib); | ||
314 | if (ccode > 0) | ||
315 | return 0; | ||
316 | |||
317 | return 0x80 >> chp; | ||
318 | } | ||
319 | |||
320 | static int | 208 | static int |
321 | s390_process_res_acc_new_sch(struct subchannel_id schid) | 209 | s390_process_res_acc_new_sch(struct subchannel_id schid) |
322 | { | 210 | { |
@@ -338,6 +226,32 @@ s390_process_res_acc_new_sch(struct subchannel_id schid) | |||
338 | return 0; | 226 | return 0; |
339 | } | 227 | } |
340 | 228 | ||
229 | struct res_acc_data { | ||
230 | struct chp_id chpid; | ||
231 | u32 fla_mask; | ||
232 | u16 fla; | ||
233 | }; | ||
234 | |||
235 | static int get_res_chpid_mask(struct chsc_ssd_info *ssd, | ||
236 | struct res_acc_data *data) | ||
237 | { | ||
238 | int i; | ||
239 | int mask; | ||
240 | |||
241 | for (i = 0; i < 8; i++) { | ||
242 | mask = 0x80 >> i; | ||
243 | if (!(ssd->path_mask & mask)) | ||
244 | continue; | ||
245 | if (!chp_id_is_equal(&ssd->chpid[i], &data->chpid)) | ||
246 | continue; | ||
247 | if ((ssd->fla_valid_mask & mask) && | ||
248 | ((ssd->fla[i] & data->fla_mask) != data->fla)) | ||
249 | continue; | ||
250 | return mask; | ||
251 | } | ||
252 | return 0; | ||
253 | } | ||
254 | |||
341 | static int | 255 | static int |
342 | __s390_process_res_acc(struct subchannel_id schid, void *data) | 256 | __s390_process_res_acc(struct subchannel_id schid, void *data) |
343 | { | 257 | { |
@@ -352,14 +266,11 @@ __s390_process_res_acc(struct subchannel_id schid, void *data) | |||
352 | return s390_process_res_acc_new_sch(schid); | 266 | return s390_process_res_acc_new_sch(schid); |
353 | 267 | ||
354 | spin_lock_irq(sch->lock); | 268 | spin_lock_irq(sch->lock); |
355 | 269 | chp_mask = get_res_chpid_mask(&sch->ssd_info, res_data); | |
356 | chp_mask = s390_process_res_acc_sch(res_data, sch); | 270 | if (chp_mask == 0) |
357 | 271 | goto out; | |
358 | if (chp_mask == 0) { | 272 | if (stsch(sch->schid, &sch->schib)) |
359 | spin_unlock_irq(sch->lock); | 273 | goto out; |
360 | put_device(&sch->dev); | ||
361 | return 0; | ||
362 | } | ||
363 | old_lpm = sch->lpm; | 274 | old_lpm = sch->lpm; |
364 | sch->lpm = ((sch->schib.pmcw.pim & | 275 | sch->lpm = ((sch->schib.pmcw.pim & |
365 | sch->schib.pmcw.pam & | 276 | sch->schib.pmcw.pam & |
@@ -369,13 +280,12 @@ __s390_process_res_acc(struct subchannel_id schid, void *data) | |||
369 | device_trigger_reprobe(sch); | 280 | device_trigger_reprobe(sch); |
370 | else if (sch->driver && sch->driver->verify) | 281 | else if (sch->driver && sch->driver->verify) |
371 | sch->driver->verify(&sch->dev); | 282 | sch->driver->verify(&sch->dev); |
372 | 283 | out: | |
373 | spin_unlock_irq(sch->lock); | 284 | spin_unlock_irq(sch->lock); |
374 | put_device(&sch->dev); | 285 | put_device(&sch->dev); |
375 | return 0; | 286 | return 0; |
376 | } | 287 | } |
377 | 288 | ||
378 | |||
379 | static void s390_process_res_acc (struct res_acc_data *res_data) | 289 | static void s390_process_res_acc (struct res_acc_data *res_data) |
380 | { | 290 | { |
381 | char dbf_txt[15]; | 291 | char dbf_txt[15]; |
@@ -661,29 +571,30 @@ static void __s390_subchannel_vary_chpid(struct subchannel *sch, | |||
661 | struct chp_id chpid, int on) | 571 | struct chp_id chpid, int on) |
662 | { | 572 | { |
663 | int chp, old_lpm; | 573 | int chp, old_lpm; |
574 | int mask; | ||
664 | unsigned long flags; | 575 | unsigned long flags; |
665 | 576 | ||
666 | if (!sch->ssd_info.valid) | ||
667 | return; | ||
668 | |||
669 | spin_lock_irqsave(sch->lock, flags); | 577 | spin_lock_irqsave(sch->lock, flags); |
670 | old_lpm = sch->lpm; | 578 | old_lpm = sch->lpm; |
671 | for (chp = 0; chp < 8; chp++) { | 579 | for (chp = 0; chp < 8; chp++) { |
672 | if (sch->ssd_info.chpid[chp] != chpid.id) | 580 | mask = 0x80 >> chp; |
581 | if (!(sch->ssd_info.path_mask & mask)) | ||
582 | continue; | ||
583 | if (!chp_id_is_equal(&sch->ssd_info.chpid[chp], &chpid)) | ||
673 | continue; | 584 | continue; |
674 | 585 | ||
675 | if (on) { | 586 | if (on) { |
676 | sch->opm |= (0x80 >> chp); | 587 | sch->opm |= mask; |
677 | sch->lpm |= (0x80 >> chp); | 588 | sch->lpm |= mask; |
678 | if (!old_lpm) | 589 | if (!old_lpm) |
679 | device_trigger_reprobe(sch); | 590 | device_trigger_reprobe(sch); |
680 | else if (sch->driver && sch->driver->verify) | 591 | else if (sch->driver && sch->driver->verify) |
681 | sch->driver->verify(&sch->dev); | 592 | sch->driver->verify(&sch->dev); |
682 | break; | 593 | break; |
683 | } | 594 | } |
684 | sch->opm &= ~(0x80 >> chp); | 595 | sch->opm &= ~mask; |
685 | sch->lpm &= ~(0x80 >> chp); | 596 | sch->lpm &= ~mask; |
686 | if (check_for_io_on_path(sch, (0x80 >> chp))) { | 597 | if (check_for_io_on_path(sch, mask)) { |
687 | if (device_is_online(sch)) | 598 | if (device_is_online(sch)) |
688 | /* Path verification is done after killing. */ | 599 | /* Path verification is done after killing. */ |
689 | device_kill_io(sch); | 600 | device_kill_io(sch); |