aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/block/dasd_fba.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/block/dasd_fba.c')
-rw-r--r--drivers/s390/block/dasd_fba.c119
1 files changed, 68 insertions, 51 deletions
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 1d95822e0b8..d13ea05089a 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -117,6 +117,7 @@ locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw,
117static int 117static int
118dasd_fba_check_characteristics(struct dasd_device *device) 118dasd_fba_check_characteristics(struct dasd_device *device)
119{ 119{
120 struct dasd_block *block;
120 struct dasd_fba_private *private; 121 struct dasd_fba_private *private;
121 struct ccw_device *cdev = device->cdev; 122 struct ccw_device *cdev = device->cdev;
122 void *rdc_data; 123 void *rdc_data;
@@ -133,6 +134,16 @@ dasd_fba_check_characteristics(struct dasd_device *device)
133 } 134 }
134 device->private = (void *) private; 135 device->private = (void *) private;
135 } 136 }
137 block = dasd_alloc_block();
138 if (IS_ERR(block)) {
139 DEV_MESSAGE(KERN_WARNING, device, "%s",
140 "could not allocate dasd block structure");
141 kfree(device->private);
142 return PTR_ERR(block);
143 }
144 device->block = block;
145 block->base = device;
146
136 /* Read Device Characteristics */ 147 /* Read Device Characteristics */
137 rdc_data = (void *) &(private->rdc_data); 148 rdc_data = (void *) &(private->rdc_data);
138 rc = dasd_generic_read_dev_chars(device, "FBA ", &rdc_data, 32); 149 rc = dasd_generic_read_dev_chars(device, "FBA ", &rdc_data, 32);
@@ -155,60 +166,37 @@ dasd_fba_check_characteristics(struct dasd_device *device)
155 return 0; 166 return 0;
156} 167}
157 168
158static int 169static int dasd_fba_do_analysis(struct dasd_block *block)
159dasd_fba_do_analysis(struct dasd_device *device)
160{ 170{
161 struct dasd_fba_private *private; 171 struct dasd_fba_private *private;
162 int sb, rc; 172 int sb, rc;
163 173
164 private = (struct dasd_fba_private *) device->private; 174 private = (struct dasd_fba_private *) block->base->private;
165 rc = dasd_check_blocksize(private->rdc_data.blk_size); 175 rc = dasd_check_blocksize(private->rdc_data.blk_size);
166 if (rc) { 176 if (rc) {
167 DEV_MESSAGE(KERN_INFO, device, "unknown blocksize %d", 177 DEV_MESSAGE(KERN_INFO, block->base, "unknown blocksize %d",
168 private->rdc_data.blk_size); 178 private->rdc_data.blk_size);
169 return rc; 179 return rc;
170 } 180 }
171 device->blocks = private->rdc_data.blk_bdsa; 181 block->blocks = private->rdc_data.blk_bdsa;
172 device->bp_block = private->rdc_data.blk_size; 182 block->bp_block = private->rdc_data.blk_size;
173 device->s2b_shift = 0; /* bits to shift 512 to get a block */ 183 block->s2b_shift = 0; /* bits to shift 512 to get a block */
174 for (sb = 512; sb < private->rdc_data.blk_size; sb = sb << 1) 184 for (sb = 512; sb < private->rdc_data.blk_size; sb = sb << 1)
175 device->s2b_shift++; 185 block->s2b_shift++;
176 return 0; 186 return 0;
177} 187}
178 188
179static int 189static int dasd_fba_fill_geometry(struct dasd_block *block,
180dasd_fba_fill_geometry(struct dasd_device *device, struct hd_geometry *geo) 190 struct hd_geometry *geo)
181{ 191{
182 if (dasd_check_blocksize(device->bp_block) != 0) 192 if (dasd_check_blocksize(block->bp_block) != 0)
183 return -EINVAL; 193 return -EINVAL;
184 geo->cylinders = (device->blocks << device->s2b_shift) >> 10; 194 geo->cylinders = (block->blocks << block->s2b_shift) >> 10;
185 geo->heads = 16; 195 geo->heads = 16;
186 geo->sectors = 128 >> device->s2b_shift; 196 geo->sectors = 128 >> block->s2b_shift;
187 return 0; 197 return 0;
188} 198}
189 199
190static dasd_era_t
191dasd_fba_examine_error(struct dasd_ccw_req * cqr, struct irb * irb)
192{
193 struct dasd_device *device;
194 struct ccw_device *cdev;
195
196 device = (struct dasd_device *) cqr->device;
197 if (irb->scsw.cstat == 0x00 &&
198 irb->scsw.dstat == (DEV_STAT_CHN_END | DEV_STAT_DEV_END))
199 return dasd_era_none;
200
201 cdev = device->cdev;
202 switch (cdev->id.dev_type) {
203 case 0x3370:
204 return dasd_3370_erp_examine(cqr, irb);
205 case 0x9336:
206 return dasd_9336_erp_examine(cqr, irb);
207 default:
208 return dasd_era_recover;
209 }
210}
211
212static dasd_erp_fn_t 200static dasd_erp_fn_t
213dasd_fba_erp_action(struct dasd_ccw_req * cqr) 201dasd_fba_erp_action(struct dasd_ccw_req * cqr)
214{ 202{
@@ -221,13 +209,34 @@ dasd_fba_erp_postaction(struct dasd_ccw_req * cqr)
221 if (cqr->function == dasd_default_erp_action) 209 if (cqr->function == dasd_default_erp_action)
222 return dasd_default_erp_postaction; 210 return dasd_default_erp_postaction;
223 211
224 DEV_MESSAGE(KERN_WARNING, cqr->device, "unknown ERP action %p", 212 DEV_MESSAGE(KERN_WARNING, cqr->startdev, "unknown ERP action %p",
225 cqr->function); 213 cqr->function);
226 return NULL; 214 return NULL;
227} 215}
228 216
229static struct dasd_ccw_req * 217static void dasd_fba_handle_unsolicited_interrupt(struct dasd_device *device,
230dasd_fba_build_cp(struct dasd_device * device, struct request *req) 218 struct irb *irb)
219{
220 char mask;
221
222 /* first of all check for state change pending interrupt */
223 mask = DEV_STAT_ATTENTION | DEV_STAT_DEV_END | DEV_STAT_UNIT_EXCEP;
224 if ((irb->scsw.dstat & mask) == mask) {
225 dasd_generic_handle_state_change(device);
226 return;
227 }
228
229 /* check for unsolicited interrupts */
230 DEV_MESSAGE(KERN_DEBUG, device, "%s",
231 "unsolicited interrupt received");
232 device->discipline->dump_sense(device, NULL, irb);
233 dasd_schedule_device_bh(device);
234 return;
235};
236
237static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
238 struct dasd_block *block,
239 struct request *req)
231{ 240{
232 struct dasd_fba_private *private; 241 struct dasd_fba_private *private;
233 unsigned long *idaws; 242 unsigned long *idaws;
@@ -242,17 +251,17 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
242 unsigned int blksize, off; 251 unsigned int blksize, off;
243 unsigned char cmd; 252 unsigned char cmd;
244 253
245 private = (struct dasd_fba_private *) device->private; 254 private = (struct dasd_fba_private *) block->base->private;
246 if (rq_data_dir(req) == READ) { 255 if (rq_data_dir(req) == READ) {
247 cmd = DASD_FBA_CCW_READ; 256 cmd = DASD_FBA_CCW_READ;
248 } else if (rq_data_dir(req) == WRITE) { 257 } else if (rq_data_dir(req) == WRITE) {
249 cmd = DASD_FBA_CCW_WRITE; 258 cmd = DASD_FBA_CCW_WRITE;
250 } else 259 } else
251 return ERR_PTR(-EINVAL); 260 return ERR_PTR(-EINVAL);
252 blksize = device->bp_block; 261 blksize = block->bp_block;
253 /* Calculate record id of first and last block. */ 262 /* Calculate record id of first and last block. */
254 first_rec = req->sector >> device->s2b_shift; 263 first_rec = req->sector >> block->s2b_shift;
255 last_rec = (req->sector + req->nr_sectors - 1) >> device->s2b_shift; 264 last_rec = (req->sector + req->nr_sectors - 1) >> block->s2b_shift;
256 /* Check struct bio and count the number of blocks for the request. */ 265 /* Check struct bio and count the number of blocks for the request. */
257 count = 0; 266 count = 0;
258 cidaw = 0; 267 cidaw = 0;
@@ -260,7 +269,7 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
260 if (bv->bv_len & (blksize - 1)) 269 if (bv->bv_len & (blksize - 1))
261 /* Fba can only do full blocks. */ 270 /* Fba can only do full blocks. */
262 return ERR_PTR(-EINVAL); 271 return ERR_PTR(-EINVAL);
263 count += bv->bv_len >> (device->s2b_shift + 9); 272 count += bv->bv_len >> (block->s2b_shift + 9);
264#if defined(CONFIG_64BIT) 273#if defined(CONFIG_64BIT)
265 if (idal_is_needed (page_address(bv->bv_page), bv->bv_len)) 274 if (idal_is_needed (page_address(bv->bv_page), bv->bv_len))
266 cidaw += bv->bv_len / blksize; 275 cidaw += bv->bv_len / blksize;
@@ -284,13 +293,13 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
284 } 293 }
285 /* Allocate the ccw request. */ 294 /* Allocate the ccw request. */
286 cqr = dasd_smalloc_request(dasd_fba_discipline.name, 295 cqr = dasd_smalloc_request(dasd_fba_discipline.name,
287 cplength, datasize, device); 296 cplength, datasize, memdev);
288 if (IS_ERR(cqr)) 297 if (IS_ERR(cqr))
289 return cqr; 298 return cqr;
290 ccw = cqr->cpaddr; 299 ccw = cqr->cpaddr;
291 /* First ccw is define extent. */ 300 /* First ccw is define extent. */
292 define_extent(ccw++, cqr->data, rq_data_dir(req), 301 define_extent(ccw++, cqr->data, rq_data_dir(req),
293 device->bp_block, req->sector, req->nr_sectors); 302 block->bp_block, req->sector, req->nr_sectors);
294 /* Build locate_record + read/write ccws. */ 303 /* Build locate_record + read/write ccws. */
295 idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data)); 304 idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data));
296 LO_data = (struct LO_fba_data *) (idaws + cidaw); 305 LO_data = (struct LO_fba_data *) (idaws + cidaw);
@@ -326,7 +335,7 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
326 ccw[-1].flags |= CCW_FLAG_CC; 335 ccw[-1].flags |= CCW_FLAG_CC;
327 } 336 }
328 ccw->cmd_code = cmd; 337 ccw->cmd_code = cmd;
329 ccw->count = device->bp_block; 338 ccw->count = block->bp_block;
330 if (idal_is_needed(dst, blksize)) { 339 if (idal_is_needed(dst, blksize)) {
331 ccw->cda = (__u32)(addr_t) idaws; 340 ccw->cda = (__u32)(addr_t) idaws;
332 ccw->flags = CCW_FLAG_IDA; 341 ccw->flags = CCW_FLAG_IDA;
@@ -342,7 +351,9 @@ dasd_fba_build_cp(struct dasd_device * device, struct request *req)
342 } 351 }
343 if (req->cmd_flags & REQ_FAILFAST) 352 if (req->cmd_flags & REQ_FAILFAST)
344 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags); 353 set_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags);
345 cqr->device = device; 354 cqr->startdev = memdev;
355 cqr->memdev = memdev;
356 cqr->block = block;
346 cqr->expires = 5 * 60 * HZ; /* 5 minutes */ 357 cqr->expires = 5 * 60 * HZ; /* 5 minutes */
347 cqr->retries = 32; 358 cqr->retries = 32;
348 cqr->buildclk = get_clock(); 359 cqr->buildclk = get_clock();
@@ -363,8 +374,8 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
363 374
364 if (!dasd_page_cache) 375 if (!dasd_page_cache)
365 goto out; 376 goto out;
366 private = (struct dasd_fba_private *) cqr->device->private; 377 private = (struct dasd_fba_private *) cqr->block->base->private;
367 blksize = cqr->device->bp_block; 378 blksize = cqr->block->bp_block;
368 ccw = cqr->cpaddr; 379 ccw = cqr->cpaddr;
369 /* Skip over define extent & locate record. */ 380 /* Skip over define extent & locate record. */
370 ccw++; 381 ccw++;
@@ -394,10 +405,15 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
394 } 405 }
395out: 406out:
396 status = cqr->status == DASD_CQR_DONE; 407 status = cqr->status == DASD_CQR_DONE;
397 dasd_sfree_request(cqr, cqr->device); 408 dasd_sfree_request(cqr, cqr->memdev);
398 return status; 409 return status;
399} 410}
400 411
412static void dasd_fba_handle_terminated_request(struct dasd_ccw_req *cqr)
413{
414 cqr->status = DASD_CQR_FILLED;
415};
416
401static int 417static int
402dasd_fba_fill_info(struct dasd_device * device, 418dasd_fba_fill_info(struct dasd_device * device,
403 struct dasd_information2_t * info) 419 struct dasd_information2_t * info)
@@ -546,9 +562,10 @@ static struct dasd_discipline dasd_fba_discipline = {
546 .fill_geometry = dasd_fba_fill_geometry, 562 .fill_geometry = dasd_fba_fill_geometry,
547 .start_IO = dasd_start_IO, 563 .start_IO = dasd_start_IO,
548 .term_IO = dasd_term_IO, 564 .term_IO = dasd_term_IO,
549 .examine_error = dasd_fba_examine_error, 565 .handle_terminated_request = dasd_fba_handle_terminated_request,
550 .erp_action = dasd_fba_erp_action, 566 .erp_action = dasd_fba_erp_action,
551 .erp_postaction = dasd_fba_erp_postaction, 567 .erp_postaction = dasd_fba_erp_postaction,
568 .handle_unsolicited_interrupt = dasd_fba_handle_unsolicited_interrupt,
552 .build_cp = dasd_fba_build_cp, 569 .build_cp = dasd_fba_build_cp,
553 .free_cp = dasd_fba_free_cp, 570 .free_cp = dasd_fba_free_cp,
554 .dump_sense = dasd_fba_dump_sense, 571 .dump_sense = dasd_fba_dump_sense,