diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-06-25 17:29:53 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-06-25 17:29:53 -0400 |
commit | bfffa1cc9db8a950dd4b1a09999f8a20e69a6652 (patch) | |
tree | 01b046072ca9105b7852f790762f7b00b72e6ff7 /drivers | |
parent | cc8a0a943948d1b1bc803b37486831af7b04dd38 (diff) | |
parent | ae994ea972473c0ace9d55f718b60f0727af1381 (diff) |
Merge branch 'for-4.2/core' of git://git.kernel.dk/linux-block
Pull core block IO update from Jens Axboe:
"Nothing really major in here, mostly a collection of smaller
optimizations and cleanups, mixed with various fixes. In more detail,
this contains:
- Addition of policy specific data to blkcg for block cgroups. From
Arianna Avanzini.
- Various cleanups around command types from Christoph.
- Cleanup of the suspend block I/O path from Christoph.
- Plugging updates from Shaohua and Jeff Moyer, for blk-mq.
- Eliminating atomic inc/dec of both remaining IO count and reference
count in a bio. From me.
- Fixes for SG gap and chunk size support for data-less (discards)
IO, so we can merge these better. From me.
- Small restructuring of blk-mq shared tag support, freeing drivers
from iterating hardware queues. From Keith Busch.
- A few cfq-iosched tweaks, from Tahsin Erdogan and me. Makes the
IOPS mode the default for non-rotational storage"
* 'for-4.2/core' of git://git.kernel.dk/linux-block: (35 commits)
cfq-iosched: fix other locations where blkcg_to_cfqgd() can return NULL
cfq-iosched: fix sysfs oops when attempting to read unconfigured weights
cfq-iosched: move group scheduling functions under ifdef
cfq-iosched: fix the setting of IOPS mode on SSDs
blktrace: Add blktrace.c to BLOCK LAYER in MAINTAINERS file
block, cgroup: implement policy-specific per-blkcg data
block: Make CFQ default to IOPS mode on SSDs
block: add blk_set_queue_dying() to blkdev.h
blk-mq: Shared tag enhancements
block: don't honor chunk sizes for data-less IO
block: only honor SG gap prevention for merges that contain data
block: fix returnvar.cocci warnings
block, dm: don't copy bios for request clones
block: remove management of bi_remaining when restoring original bi_end_io
block: replace trylock with mutex_lock in blkdev_reread_part()
block: export blkdev_reread_part() and __blkdev_reread_part()
suspend: simplify block I/O handling
block: collapse bio bit space
block: remove unused BIO_RW_BLOCK and BIO_EOF flags
block: remove BIO_EOPNOTSUPP
...
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/block/nbd.c | 50 | ||||
-rw-r--r-- | drivers/block/paride/pd.c | 4 | ||||
-rw-r--r-- | drivers/block/sx8.c | 4 | ||||
-rw-r--r-- | drivers/block/virtio_blk.c | 6 | ||||
-rw-r--r-- | drivers/ide/ide-atapi.c | 10 | ||||
-rw-r--r-- | drivers/ide/ide-cd.c | 10 | ||||
-rw-r--r-- | drivers/ide/ide-cd_ioctl.c | 2 | ||||
-rw-r--r-- | drivers/ide/ide-devsets.c | 2 | ||||
-rw-r--r-- | drivers/ide/ide-eh.c | 4 | ||||
-rw-r--r-- | drivers/ide/ide-floppy.c | 8 | ||||
-rw-r--r-- | drivers/ide/ide-io.c | 12 | ||||
-rw-r--r-- | drivers/ide/ide-ioctls.c | 2 | ||||
-rw-r--r-- | drivers/ide/ide-park.c | 4 | ||||
-rw-r--r-- | drivers/ide/ide-pm.c | 56 | ||||
-rw-r--r-- | drivers/ide/ide-tape.c | 6 | ||||
-rw-r--r-- | drivers/ide/ide-taskfile.c | 2 | ||||
-rw-r--r-- | drivers/md/bcache/io.c | 2 | ||||
-rw-r--r-- | drivers/md/bcache/request.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-cache-target.c | 6 | ||||
-rw-r--r-- | drivers/md/dm-raid1.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-snap.c | 1 | ||||
-rw-r--r-- | drivers/md/dm-table.c | 25 | ||||
-rw-r--r-- | drivers/md/dm-thin.c | 9 | ||||
-rw-r--r-- | drivers/md/dm-verity.c | 2 | ||||
-rw-r--r-- | drivers/md/dm.c | 171 | ||||
-rw-r--r-- | drivers/md/dm.h | 5 |
26 files changed, 170 insertions, 237 deletions
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 39e5f7fae3ef..83a7ba4a3eec 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
@@ -230,29 +230,40 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req) | |||
230 | int result, flags; | 230 | int result, flags; |
231 | struct nbd_request request; | 231 | struct nbd_request request; |
232 | unsigned long size = blk_rq_bytes(req); | 232 | unsigned long size = blk_rq_bytes(req); |
233 | u32 type; | ||
234 | |||
235 | if (req->cmd_type == REQ_TYPE_DRV_PRIV) | ||
236 | type = NBD_CMD_DISC; | ||
237 | else if (req->cmd_flags & REQ_DISCARD) | ||
238 | type = NBD_CMD_TRIM; | ||
239 | else if (req->cmd_flags & REQ_FLUSH) | ||
240 | type = NBD_CMD_FLUSH; | ||
241 | else if (rq_data_dir(req) == WRITE) | ||
242 | type = NBD_CMD_WRITE; | ||
243 | else | ||
244 | type = NBD_CMD_READ; | ||
233 | 245 | ||
234 | memset(&request, 0, sizeof(request)); | 246 | memset(&request, 0, sizeof(request)); |
235 | request.magic = htonl(NBD_REQUEST_MAGIC); | 247 | request.magic = htonl(NBD_REQUEST_MAGIC); |
236 | request.type = htonl(nbd_cmd(req)); | 248 | request.type = htonl(type); |
237 | 249 | if (type != NBD_CMD_FLUSH && type != NBD_CMD_DISC) { | |
238 | if (nbd_cmd(req) != NBD_CMD_FLUSH && nbd_cmd(req) != NBD_CMD_DISC) { | ||
239 | request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); | 250 | request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); |
240 | request.len = htonl(size); | 251 | request.len = htonl(size); |
241 | } | 252 | } |
242 | memcpy(request.handle, &req, sizeof(req)); | 253 | memcpy(request.handle, &req, sizeof(req)); |
243 | 254 | ||
244 | dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", | 255 | dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", |
245 | req, nbdcmd_to_ascii(nbd_cmd(req)), | 256 | req, nbdcmd_to_ascii(type), |
246 | (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); | 257 | (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); |
247 | result = sock_xmit(nbd, 1, &request, sizeof(request), | 258 | result = sock_xmit(nbd, 1, &request, sizeof(request), |
248 | (nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0); | 259 | (type == NBD_CMD_WRITE) ? MSG_MORE : 0); |
249 | if (result <= 0) { | 260 | if (result <= 0) { |
250 | dev_err(disk_to_dev(nbd->disk), | 261 | dev_err(disk_to_dev(nbd->disk), |
251 | "Send control failed (result %d)\n", result); | 262 | "Send control failed (result %d)\n", result); |
252 | return -EIO; | 263 | return -EIO; |
253 | } | 264 | } |
254 | 265 | ||
255 | if (nbd_cmd(req) == NBD_CMD_WRITE) { | 266 | if (type == NBD_CMD_WRITE) { |
256 | struct req_iterator iter; | 267 | struct req_iterator iter; |
257 | struct bio_vec bvec; | 268 | struct bio_vec bvec; |
258 | /* | 269 | /* |
@@ -352,7 +363,7 @@ static struct request *nbd_read_stat(struct nbd_device *nbd) | |||
352 | } | 363 | } |
353 | 364 | ||
354 | dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req); | 365 | dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req); |
355 | if (nbd_cmd(req) == NBD_CMD_READ) { | 366 | if (rq_data_dir(req) != WRITE) { |
356 | struct req_iterator iter; | 367 | struct req_iterator iter; |
357 | struct bio_vec bvec; | 368 | struct bio_vec bvec; |
358 | 369 | ||
@@ -452,23 +463,11 @@ static void nbd_handle_req(struct nbd_device *nbd, struct request *req) | |||
452 | if (req->cmd_type != REQ_TYPE_FS) | 463 | if (req->cmd_type != REQ_TYPE_FS) |
453 | goto error_out; | 464 | goto error_out; |
454 | 465 | ||
455 | nbd_cmd(req) = NBD_CMD_READ; | 466 | if (rq_data_dir(req) == WRITE && |
456 | if (rq_data_dir(req) == WRITE) { | 467 | (nbd->flags & NBD_FLAG_READ_ONLY)) { |
457 | if ((req->cmd_flags & REQ_DISCARD)) { | 468 | dev_err(disk_to_dev(nbd->disk), |
458 | WARN_ON(!(nbd->flags & NBD_FLAG_SEND_TRIM)); | 469 | "Write on read-only\n"); |
459 | nbd_cmd(req) = NBD_CMD_TRIM; | 470 | goto error_out; |
460 | } else | ||
461 | nbd_cmd(req) = NBD_CMD_WRITE; | ||
462 | if (nbd->flags & NBD_FLAG_READ_ONLY) { | ||
463 | dev_err(disk_to_dev(nbd->disk), | ||
464 | "Write on read-only\n"); | ||
465 | goto error_out; | ||
466 | } | ||
467 | } | ||
468 | |||
469 | if (req->cmd_flags & REQ_FLUSH) { | ||
470 | BUG_ON(unlikely(blk_rq_sectors(req))); | ||
471 | nbd_cmd(req) = NBD_CMD_FLUSH; | ||
472 | } | 471 | } |
473 | 472 | ||
474 | req->errors = 0; | 473 | req->errors = 0; |
@@ -592,8 +591,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, | |||
592 | fsync_bdev(bdev); | 591 | fsync_bdev(bdev); |
593 | mutex_lock(&nbd->tx_lock); | 592 | mutex_lock(&nbd->tx_lock); |
594 | blk_rq_init(NULL, &sreq); | 593 | blk_rq_init(NULL, &sreq); |
595 | sreq.cmd_type = REQ_TYPE_SPECIAL; | 594 | sreq.cmd_type = REQ_TYPE_DRV_PRIV; |
596 | nbd_cmd(&sreq) = NBD_CMD_DISC; | ||
597 | 595 | ||
598 | /* Check again after getting mutex back. */ | 596 | /* Check again after getting mutex back. */ |
599 | if (!nbd->sock) | 597 | if (!nbd->sock) |
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index d48715b287e6..dbb4da1cdca8 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c | |||
@@ -442,7 +442,7 @@ static char *pd_buf; /* buffer for request in progress */ | |||
442 | 442 | ||
443 | static enum action do_pd_io_start(void) | 443 | static enum action do_pd_io_start(void) |
444 | { | 444 | { |
445 | if (pd_req->cmd_type == REQ_TYPE_SPECIAL) { | 445 | if (pd_req->cmd_type == REQ_TYPE_DRV_PRIV) { |
446 | phase = pd_special; | 446 | phase = pd_special; |
447 | return pd_special(); | 447 | return pd_special(); |
448 | } | 448 | } |
@@ -725,7 +725,7 @@ static int pd_special_command(struct pd_unit *disk, | |||
725 | if (IS_ERR(rq)) | 725 | if (IS_ERR(rq)) |
726 | return PTR_ERR(rq); | 726 | return PTR_ERR(rq); |
727 | 727 | ||
728 | rq->cmd_type = REQ_TYPE_SPECIAL; | 728 | rq->cmd_type = REQ_TYPE_DRV_PRIV; |
729 | rq->special = func; | 729 | rq->special = func; |
730 | 730 | ||
731 | err = blk_execute_rq(disk->gd->queue, disk->gd, rq, 0); | 731 | err = blk_execute_rq(disk->gd->queue, disk->gd, rq, 0); |
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c index 5d552857de41..59c91d49b14b 100644 --- a/drivers/block/sx8.c +++ b/drivers/block/sx8.c | |||
@@ -620,7 +620,7 @@ static int carm_array_info (struct carm_host *host, unsigned int array_idx) | |||
620 | spin_unlock_irq(&host->lock); | 620 | spin_unlock_irq(&host->lock); |
621 | 621 | ||
622 | DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx); | 622 | DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx); |
623 | crq->rq->cmd_type = REQ_TYPE_SPECIAL; | 623 | crq->rq->cmd_type = REQ_TYPE_DRV_PRIV; |
624 | crq->rq->special = crq; | 624 | crq->rq->special = crq; |
625 | blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL); | 625 | blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL); |
626 | 626 | ||
@@ -661,7 +661,7 @@ static int carm_send_special (struct carm_host *host, carm_sspc_t func) | |||
661 | crq->msg_bucket = (u32) rc; | 661 | crq->msg_bucket = (u32) rc; |
662 | 662 | ||
663 | DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx); | 663 | DPRINTK("blk_execute_rq_nowait, tag == %u\n", idx); |
664 | crq->rq->cmd_type = REQ_TYPE_SPECIAL; | 664 | crq->rq->cmd_type = REQ_TYPE_DRV_PRIV; |
665 | crq->rq->special = crq; | 665 | crq->rq->special = crq; |
666 | blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL); | 666 | blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL); |
667 | 667 | ||
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 5ea2f0bbbc7c..d4d05f064d39 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c | |||
@@ -124,7 +124,7 @@ static inline void virtblk_request_done(struct request *req) | |||
124 | req->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual); | 124 | req->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual); |
125 | req->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len); | 125 | req->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len); |
126 | req->errors = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors); | 126 | req->errors = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors); |
127 | } else if (req->cmd_type == REQ_TYPE_SPECIAL) { | 127 | } else if (req->cmd_type == REQ_TYPE_DRV_PRIV) { |
128 | req->errors = (error != 0); | 128 | req->errors = (error != 0); |
129 | } | 129 | } |
130 | 130 | ||
@@ -188,7 +188,7 @@ static int virtio_queue_rq(struct blk_mq_hw_ctx *hctx, | |||
188 | vbr->out_hdr.sector = 0; | 188 | vbr->out_hdr.sector = 0; |
189 | vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req)); | 189 | vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req)); |
190 | break; | 190 | break; |
191 | case REQ_TYPE_SPECIAL: | 191 | case REQ_TYPE_DRV_PRIV: |
192 | vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID); | 192 | vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_GET_ID); |
193 | vbr->out_hdr.sector = 0; | 193 | vbr->out_hdr.sector = 0; |
194 | vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req)); | 194 | vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(vbr->req)); |
@@ -251,7 +251,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str) | |||
251 | return PTR_ERR(req); | 251 | return PTR_ERR(req); |
252 | } | 252 | } |
253 | 253 | ||
254 | req->cmd_type = REQ_TYPE_SPECIAL; | 254 | req->cmd_type = REQ_TYPE_DRV_PRIV; |
255 | err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false); | 255 | err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false); |
256 | blk_put_request(req); | 256 | blk_put_request(req); |
257 | 257 | ||
diff --git a/drivers/ide/ide-atapi.c b/drivers/ide/ide-atapi.c index fac3d9da2e07..1362ad80a76c 100644 --- a/drivers/ide/ide-atapi.c +++ b/drivers/ide/ide-atapi.c | |||
@@ -93,7 +93,7 @@ int ide_queue_pc_tail(ide_drive_t *drive, struct gendisk *disk, | |||
93 | int error; | 93 | int error; |
94 | 94 | ||
95 | rq = blk_get_request(drive->queue, READ, __GFP_WAIT); | 95 | rq = blk_get_request(drive->queue, READ, __GFP_WAIT); |
96 | rq->cmd_type = REQ_TYPE_SPECIAL; | 96 | rq->cmd_type = REQ_TYPE_DRV_PRIV; |
97 | rq->special = (char *)pc; | 97 | rq->special = (char *)pc; |
98 | 98 | ||
99 | if (buf && bufflen) { | 99 | if (buf && bufflen) { |
@@ -191,7 +191,7 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq) | |||
191 | 191 | ||
192 | BUG_ON(sense_len > sizeof(*sense)); | 192 | BUG_ON(sense_len > sizeof(*sense)); |
193 | 193 | ||
194 | if (rq->cmd_type == REQ_TYPE_SENSE || drive->sense_rq_armed) | 194 | if (rq->cmd_type == REQ_TYPE_ATA_SENSE || drive->sense_rq_armed) |
195 | return; | 195 | return; |
196 | 196 | ||
197 | memset(sense, 0, sizeof(*sense)); | 197 | memset(sense, 0, sizeof(*sense)); |
@@ -210,7 +210,7 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq) | |||
210 | sense_rq->rq_disk = rq->rq_disk; | 210 | sense_rq->rq_disk = rq->rq_disk; |
211 | sense_rq->cmd[0] = GPCMD_REQUEST_SENSE; | 211 | sense_rq->cmd[0] = GPCMD_REQUEST_SENSE; |
212 | sense_rq->cmd[4] = cmd_len; | 212 | sense_rq->cmd[4] = cmd_len; |
213 | sense_rq->cmd_type = REQ_TYPE_SENSE; | 213 | sense_rq->cmd_type = REQ_TYPE_ATA_SENSE; |
214 | sense_rq->cmd_flags |= REQ_PREEMPT; | 214 | sense_rq->cmd_flags |= REQ_PREEMPT; |
215 | 215 | ||
216 | if (drive->media == ide_tape) | 216 | if (drive->media == ide_tape) |
@@ -310,7 +310,7 @@ int ide_cd_get_xferlen(struct request *rq) | |||
310 | switch (rq->cmd_type) { | 310 | switch (rq->cmd_type) { |
311 | case REQ_TYPE_FS: | 311 | case REQ_TYPE_FS: |
312 | return 32768; | 312 | return 32768; |
313 | case REQ_TYPE_SENSE: | 313 | case REQ_TYPE_ATA_SENSE: |
314 | case REQ_TYPE_BLOCK_PC: | 314 | case REQ_TYPE_BLOCK_PC: |
315 | case REQ_TYPE_ATA_PC: | 315 | case REQ_TYPE_ATA_PC: |
316 | return blk_rq_bytes(rq); | 316 | return blk_rq_bytes(rq); |
@@ -477,7 +477,7 @@ static ide_startstop_t ide_pc_intr(ide_drive_t *drive) | |||
477 | if (uptodate == 0) | 477 | if (uptodate == 0) |
478 | drive->failed_pc = NULL; | 478 | drive->failed_pc = NULL; |
479 | 479 | ||
480 | if (rq->cmd_type == REQ_TYPE_SPECIAL) { | 480 | if (rq->cmd_type == REQ_TYPE_DRV_PRIV) { |
481 | rq->errors = 0; | 481 | rq->errors = 0; |
482 | error = 0; | 482 | error = 0; |
483 | } else { | 483 | } else { |
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index 0b510bafd90e..64a6b827b3dd 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c | |||
@@ -210,7 +210,7 @@ static void cdrom_analyze_sense_data(ide_drive_t *drive, | |||
210 | static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq) | 210 | static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq) |
211 | { | 211 | { |
212 | /* | 212 | /* |
213 | * For REQ_TYPE_SENSE, "rq->special" points to the original | 213 | * For REQ_TYPE_ATA_SENSE, "rq->special" points to the original |
214 | * failed request. Also, the sense data should be read | 214 | * failed request. Also, the sense data should be read |
215 | * directly from rq which might be different from the original | 215 | * directly from rq which might be different from the original |
216 | * sense buffer if it got copied during mapping. | 216 | * sense buffer if it got copied during mapping. |
@@ -285,7 +285,7 @@ static int cdrom_decode_status(ide_drive_t *drive, u8 stat) | |||
285 | "stat 0x%x", | 285 | "stat 0x%x", |
286 | rq->cmd[0], rq->cmd_type, err, stat); | 286 | rq->cmd[0], rq->cmd_type, err, stat); |
287 | 287 | ||
288 | if (rq->cmd_type == REQ_TYPE_SENSE) { | 288 | if (rq->cmd_type == REQ_TYPE_ATA_SENSE) { |
289 | /* | 289 | /* |
290 | * We got an error trying to get sense info from the drive | 290 | * We got an error trying to get sense info from the drive |
291 | * (probably while trying to recover from a former error). | 291 | * (probably while trying to recover from a former error). |
@@ -526,7 +526,7 @@ static ide_startstop_t cdrom_newpc_intr(ide_drive_t *drive) | |||
526 | ide_expiry_t *expiry = NULL; | 526 | ide_expiry_t *expiry = NULL; |
527 | int dma_error = 0, dma, thislen, uptodate = 0; | 527 | int dma_error = 0, dma, thislen, uptodate = 0; |
528 | int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0; | 528 | int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0; |
529 | int sense = (rq->cmd_type == REQ_TYPE_SENSE); | 529 | int sense = (rq->cmd_type == REQ_TYPE_ATA_SENSE); |
530 | unsigned int timeout; | 530 | unsigned int timeout; |
531 | u16 len; | 531 | u16 len; |
532 | u8 ireason, stat; | 532 | u8 ireason, stat; |
@@ -791,7 +791,7 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq, | |||
791 | if (cdrom_start_rw(drive, rq) == ide_stopped) | 791 | if (cdrom_start_rw(drive, rq) == ide_stopped) |
792 | goto out_end; | 792 | goto out_end; |
793 | break; | 793 | break; |
794 | case REQ_TYPE_SENSE: | 794 | case REQ_TYPE_ATA_SENSE: |
795 | case REQ_TYPE_BLOCK_PC: | 795 | case REQ_TYPE_BLOCK_PC: |
796 | case REQ_TYPE_ATA_PC: | 796 | case REQ_TYPE_ATA_PC: |
797 | if (!rq->timeout) | 797 | if (!rq->timeout) |
@@ -799,7 +799,7 @@ static ide_startstop_t ide_cd_do_request(ide_drive_t *drive, struct request *rq, | |||
799 | 799 | ||
800 | cdrom_do_block_pc(drive, rq); | 800 | cdrom_do_block_pc(drive, rq); |
801 | break; | 801 | break; |
802 | case REQ_TYPE_SPECIAL: | 802 | case REQ_TYPE_DRV_PRIV: |
803 | /* right now this can only be a reset... */ | 803 | /* right now this can only be a reset... */ |
804 | uptodate = 1; | 804 | uptodate = 1; |
805 | goto out_end; | 805 | goto out_end; |
diff --git a/drivers/ide/ide-cd_ioctl.c b/drivers/ide/ide-cd_ioctl.c index 02caa7dd51c8..066e39036518 100644 --- a/drivers/ide/ide-cd_ioctl.c +++ b/drivers/ide/ide-cd_ioctl.c | |||
@@ -304,7 +304,7 @@ int ide_cdrom_reset(struct cdrom_device_info *cdi) | |||
304 | int ret; | 304 | int ret; |
305 | 305 | ||
306 | rq = blk_get_request(drive->queue, READ, __GFP_WAIT); | 306 | rq = blk_get_request(drive->queue, READ, __GFP_WAIT); |
307 | rq->cmd_type = REQ_TYPE_SPECIAL; | 307 | rq->cmd_type = REQ_TYPE_DRV_PRIV; |
308 | rq->cmd_flags = REQ_QUIET; | 308 | rq->cmd_flags = REQ_QUIET; |
309 | ret = blk_execute_rq(drive->queue, cd->disk, rq, 0); | 309 | ret = blk_execute_rq(drive->queue, cd->disk, rq, 0); |
310 | blk_put_request(rq); | 310 | blk_put_request(rq); |
diff --git a/drivers/ide/ide-devsets.c b/drivers/ide/ide-devsets.c index 9e98122f646e..b05a74d78ef5 100644 --- a/drivers/ide/ide-devsets.c +++ b/drivers/ide/ide-devsets.c | |||
@@ -166,7 +166,7 @@ int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting, | |||
166 | return setting->set(drive, arg); | 166 | return setting->set(drive, arg); |
167 | 167 | ||
168 | rq = blk_get_request(q, READ, __GFP_WAIT); | 168 | rq = blk_get_request(q, READ, __GFP_WAIT); |
169 | rq->cmd_type = REQ_TYPE_SPECIAL; | 169 | rq->cmd_type = REQ_TYPE_DRV_PRIV; |
170 | rq->cmd_len = 5; | 170 | rq->cmd_len = 5; |
171 | rq->cmd[0] = REQ_DEVSET_EXEC; | 171 | rq->cmd[0] = REQ_DEVSET_EXEC; |
172 | *(int *)&rq->cmd[1] = arg; | 172 | *(int *)&rq->cmd[1] = arg; |
diff --git a/drivers/ide/ide-eh.c b/drivers/ide/ide-eh.c index 32970664c275..d6da011299f5 100644 --- a/drivers/ide/ide-eh.c +++ b/drivers/ide/ide-eh.c | |||
@@ -129,7 +129,7 @@ ide_startstop_t ide_error(ide_drive_t *drive, const char *msg, u8 stat) | |||
129 | 129 | ||
130 | if (cmd) | 130 | if (cmd) |
131 | ide_complete_cmd(drive, cmd, stat, err); | 131 | ide_complete_cmd(drive, cmd, stat, err); |
132 | } else if (blk_pm_request(rq)) { | 132 | } else if (ata_pm_request(rq)) { |
133 | rq->errors = 1; | 133 | rq->errors = 1; |
134 | ide_complete_pm_rq(drive, rq); | 134 | ide_complete_pm_rq(drive, rq); |
135 | return ide_stopped; | 135 | return ide_stopped; |
@@ -147,7 +147,7 @@ static inline void ide_complete_drive_reset(ide_drive_t *drive, int err) | |||
147 | { | 147 | { |
148 | struct request *rq = drive->hwif->rq; | 148 | struct request *rq = drive->hwif->rq; |
149 | 149 | ||
150 | if (rq && rq->cmd_type == REQ_TYPE_SPECIAL && | 150 | if (rq && rq->cmd_type == REQ_TYPE_DRV_PRIV && |
151 | rq->cmd[0] == REQ_DRIVE_RESET) { | 151 | rq->cmd[0] == REQ_DRIVE_RESET) { |
152 | if (err <= 0 && rq->errors == 0) | 152 | if (err <= 0 && rq->errors == 0) |
153 | rq->errors = -EIO; | 153 | rq->errors = -EIO; |
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c index 8c6363cdd208..2fb5350c5410 100644 --- a/drivers/ide/ide-floppy.c +++ b/drivers/ide/ide-floppy.c | |||
@@ -97,7 +97,7 @@ static int ide_floppy_callback(ide_drive_t *drive, int dsc) | |||
97 | "Aborting request!\n"); | 97 | "Aborting request!\n"); |
98 | } | 98 | } |
99 | 99 | ||
100 | if (rq->cmd_type == REQ_TYPE_SPECIAL) | 100 | if (rq->cmd_type == REQ_TYPE_DRV_PRIV) |
101 | rq->errors = uptodate ? 0 : IDE_DRV_ERROR_GENERAL; | 101 | rq->errors = uptodate ? 0 : IDE_DRV_ERROR_GENERAL; |
102 | 102 | ||
103 | return uptodate; | 103 | return uptodate; |
@@ -246,7 +246,7 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive, | |||
246 | } else | 246 | } else |
247 | printk(KERN_ERR PFX "%s: I/O error\n", drive->name); | 247 | printk(KERN_ERR PFX "%s: I/O error\n", drive->name); |
248 | 248 | ||
249 | if (rq->cmd_type == REQ_TYPE_SPECIAL) { | 249 | if (rq->cmd_type == REQ_TYPE_DRV_PRIV) { |
250 | rq->errors = 0; | 250 | rq->errors = 0; |
251 | ide_complete_rq(drive, 0, blk_rq_bytes(rq)); | 251 | ide_complete_rq(drive, 0, blk_rq_bytes(rq)); |
252 | return ide_stopped; | 252 | return ide_stopped; |
@@ -265,8 +265,8 @@ static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive, | |||
265 | pc = &floppy->queued_pc; | 265 | pc = &floppy->queued_pc; |
266 | idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block); | 266 | idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block); |
267 | break; | 267 | break; |
268 | case REQ_TYPE_SPECIAL: | 268 | case REQ_TYPE_DRV_PRIV: |
269 | case REQ_TYPE_SENSE: | 269 | case REQ_TYPE_ATA_SENSE: |
270 | pc = (struct ide_atapi_pc *)rq->special; | 270 | pc = (struct ide_atapi_pc *)rq->special; |
271 | break; | 271 | break; |
272 | case REQ_TYPE_BLOCK_PC: | 272 | case REQ_TYPE_BLOCK_PC: |
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 177db6d5b2f5..669ea1e45795 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c | |||
@@ -135,7 +135,7 @@ EXPORT_SYMBOL(ide_complete_rq); | |||
135 | 135 | ||
136 | void ide_kill_rq(ide_drive_t *drive, struct request *rq) | 136 | void ide_kill_rq(ide_drive_t *drive, struct request *rq) |
137 | { | 137 | { |
138 | u8 drv_req = (rq->cmd_type == REQ_TYPE_SPECIAL) && rq->rq_disk; | 138 | u8 drv_req = (rq->cmd_type == REQ_TYPE_DRV_PRIV) && rq->rq_disk; |
139 | u8 media = drive->media; | 139 | u8 media = drive->media; |
140 | 140 | ||
141 | drive->failed_pc = NULL; | 141 | drive->failed_pc = NULL; |
@@ -320,7 +320,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) | |||
320 | goto kill_rq; | 320 | goto kill_rq; |
321 | } | 321 | } |
322 | 322 | ||
323 | if (blk_pm_request(rq)) | 323 | if (ata_pm_request(rq)) |
324 | ide_check_pm_state(drive, rq); | 324 | ide_check_pm_state(drive, rq); |
325 | 325 | ||
326 | drive->hwif->tp_ops->dev_select(drive); | 326 | drive->hwif->tp_ops->dev_select(drive); |
@@ -342,8 +342,8 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) | |||
342 | 342 | ||
343 | if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) | 343 | if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) |
344 | return execute_drive_cmd(drive, rq); | 344 | return execute_drive_cmd(drive, rq); |
345 | else if (blk_pm_request(rq)) { | 345 | else if (ata_pm_request(rq)) { |
346 | struct request_pm_state *pm = rq->special; | 346 | struct ide_pm_state *pm = rq->special; |
347 | #ifdef DEBUG_PM | 347 | #ifdef DEBUG_PM |
348 | printk("%s: start_power_step(step: %d)\n", | 348 | printk("%s: start_power_step(step: %d)\n", |
349 | drive->name, pm->pm_step); | 349 | drive->name, pm->pm_step); |
@@ -353,7 +353,7 @@ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) | |||
353 | pm->pm_step == IDE_PM_COMPLETED) | 353 | pm->pm_step == IDE_PM_COMPLETED) |
354 | ide_complete_pm_rq(drive, rq); | 354 | ide_complete_pm_rq(drive, rq); |
355 | return startstop; | 355 | return startstop; |
356 | } else if (!rq->rq_disk && rq->cmd_type == REQ_TYPE_SPECIAL) | 356 | } else if (!rq->rq_disk && rq->cmd_type == REQ_TYPE_DRV_PRIV) |
357 | /* | 357 | /* |
358 | * TODO: Once all ULDs have been modified to | 358 | * TODO: Once all ULDs have been modified to |
359 | * check for specific op codes rather than | 359 | * check for specific op codes rather than |
@@ -538,7 +538,7 @@ repeat: | |||
538 | * state machine. | 538 | * state machine. |
539 | */ | 539 | */ |
540 | if ((drive->dev_flags & IDE_DFLAG_BLOCKED) && | 540 | if ((drive->dev_flags & IDE_DFLAG_BLOCKED) && |
541 | blk_pm_request(rq) == 0 && | 541 | ata_pm_request(rq) == 0 && |
542 | (rq->cmd_flags & REQ_PREEMPT) == 0) { | 542 | (rq->cmd_flags & REQ_PREEMPT) == 0) { |
543 | /* there should be no pending command at this point */ | 543 | /* there should be no pending command at this point */ |
544 | ide_unlock_port(hwif); | 544 | ide_unlock_port(hwif); |
diff --git a/drivers/ide/ide-ioctls.c b/drivers/ide/ide-ioctls.c index 6233fa2cb8a9..aa2e9b77b20d 100644 --- a/drivers/ide/ide-ioctls.c +++ b/drivers/ide/ide-ioctls.c | |||
@@ -222,7 +222,7 @@ static int generic_drive_reset(ide_drive_t *drive) | |||
222 | int ret = 0; | 222 | int ret = 0; |
223 | 223 | ||
224 | rq = blk_get_request(drive->queue, READ, __GFP_WAIT); | 224 | rq = blk_get_request(drive->queue, READ, __GFP_WAIT); |
225 | rq->cmd_type = REQ_TYPE_SPECIAL; | 225 | rq->cmd_type = REQ_TYPE_DRV_PRIV; |
226 | rq->cmd_len = 1; | 226 | rq->cmd_len = 1; |
227 | rq->cmd[0] = REQ_DRIVE_RESET; | 227 | rq->cmd[0] = REQ_DRIVE_RESET; |
228 | if (blk_execute_rq(drive->queue, NULL, rq, 1)) | 228 | if (blk_execute_rq(drive->queue, NULL, rq, 1)) |
diff --git a/drivers/ide/ide-park.c b/drivers/ide/ide-park.c index ca958604cda2..c80868520488 100644 --- a/drivers/ide/ide-park.c +++ b/drivers/ide/ide-park.c | |||
@@ -34,7 +34,7 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout) | |||
34 | rq = blk_get_request(q, READ, __GFP_WAIT); | 34 | rq = blk_get_request(q, READ, __GFP_WAIT); |
35 | rq->cmd[0] = REQ_PARK_HEADS; | 35 | rq->cmd[0] = REQ_PARK_HEADS; |
36 | rq->cmd_len = 1; | 36 | rq->cmd_len = 1; |
37 | rq->cmd_type = REQ_TYPE_SPECIAL; | 37 | rq->cmd_type = REQ_TYPE_DRV_PRIV; |
38 | rq->special = &timeout; | 38 | rq->special = &timeout; |
39 | rc = blk_execute_rq(q, NULL, rq, 1); | 39 | rc = blk_execute_rq(q, NULL, rq, 1); |
40 | blk_put_request(rq); | 40 | blk_put_request(rq); |
@@ -51,7 +51,7 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout) | |||
51 | 51 | ||
52 | rq->cmd[0] = REQ_UNPARK_HEADS; | 52 | rq->cmd[0] = REQ_UNPARK_HEADS; |
53 | rq->cmd_len = 1; | 53 | rq->cmd_len = 1; |
54 | rq->cmd_type = REQ_TYPE_SPECIAL; | 54 | rq->cmd_type = REQ_TYPE_DRV_PRIV; |
55 | elv_add_request(q, rq, ELEVATOR_INSERT_FRONT); | 55 | elv_add_request(q, rq, ELEVATOR_INSERT_FRONT); |
56 | 56 | ||
57 | out: | 57 | out: |
diff --git a/drivers/ide/ide-pm.c b/drivers/ide/ide-pm.c index 8d1e32d7cd97..081e43458d50 100644 --- a/drivers/ide/ide-pm.c +++ b/drivers/ide/ide-pm.c | |||
@@ -8,7 +8,7 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg) | |||
8 | ide_drive_t *pair = ide_get_pair_dev(drive); | 8 | ide_drive_t *pair = ide_get_pair_dev(drive); |
9 | ide_hwif_t *hwif = drive->hwif; | 9 | ide_hwif_t *hwif = drive->hwif; |
10 | struct request *rq; | 10 | struct request *rq; |
11 | struct request_pm_state rqpm; | 11 | struct ide_pm_state rqpm; |
12 | int ret; | 12 | int ret; |
13 | 13 | ||
14 | if (ide_port_acpi(hwif)) { | 14 | if (ide_port_acpi(hwif)) { |
@@ -19,7 +19,7 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg) | |||
19 | 19 | ||
20 | memset(&rqpm, 0, sizeof(rqpm)); | 20 | memset(&rqpm, 0, sizeof(rqpm)); |
21 | rq = blk_get_request(drive->queue, READ, __GFP_WAIT); | 21 | rq = blk_get_request(drive->queue, READ, __GFP_WAIT); |
22 | rq->cmd_type = REQ_TYPE_PM_SUSPEND; | 22 | rq->cmd_type = REQ_TYPE_ATA_PM_SUSPEND; |
23 | rq->special = &rqpm; | 23 | rq->special = &rqpm; |
24 | rqpm.pm_step = IDE_PM_START_SUSPEND; | 24 | rqpm.pm_step = IDE_PM_START_SUSPEND; |
25 | if (mesg.event == PM_EVENT_PRETHAW) | 25 | if (mesg.event == PM_EVENT_PRETHAW) |
@@ -38,13 +38,43 @@ int generic_ide_suspend(struct device *dev, pm_message_t mesg) | |||
38 | return ret; | 38 | return ret; |
39 | } | 39 | } |
40 | 40 | ||
41 | static void ide_end_sync_rq(struct request *rq, int error) | ||
42 | { | ||
43 | complete(rq->end_io_data); | ||
44 | } | ||
45 | |||
46 | static int ide_pm_execute_rq(struct request *rq) | ||
47 | { | ||
48 | struct request_queue *q = rq->q; | ||
49 | DECLARE_COMPLETION_ONSTACK(wait); | ||
50 | |||
51 | rq->end_io_data = &wait; | ||
52 | rq->end_io = ide_end_sync_rq; | ||
53 | |||
54 | spin_lock_irq(q->queue_lock); | ||
55 | if (unlikely(blk_queue_dying(q))) { | ||
56 | rq->cmd_flags |= REQ_QUIET; | ||
57 | rq->errors = -ENXIO; | ||
58 | __blk_end_request_all(rq, rq->errors); | ||
59 | spin_unlock_irq(q->queue_lock); | ||
60 | return -ENXIO; | ||
61 | } | ||
62 | __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT); | ||
63 | __blk_run_queue_uncond(q); | ||
64 | spin_unlock_irq(q->queue_lock); | ||
65 | |||
66 | wait_for_completion_io(&wait); | ||
67 | |||
68 | return rq->errors ? -EIO : 0; | ||
69 | } | ||
70 | |||
41 | int generic_ide_resume(struct device *dev) | 71 | int generic_ide_resume(struct device *dev) |
42 | { | 72 | { |
43 | ide_drive_t *drive = to_ide_device(dev); | 73 | ide_drive_t *drive = to_ide_device(dev); |
44 | ide_drive_t *pair = ide_get_pair_dev(drive); | 74 | ide_drive_t *pair = ide_get_pair_dev(drive); |
45 | ide_hwif_t *hwif = drive->hwif; | 75 | ide_hwif_t *hwif = drive->hwif; |
46 | struct request *rq; | 76 | struct request *rq; |
47 | struct request_pm_state rqpm; | 77 | struct ide_pm_state rqpm; |
48 | int err; | 78 | int err; |
49 | 79 | ||
50 | if (ide_port_acpi(hwif)) { | 80 | if (ide_port_acpi(hwif)) { |
@@ -59,13 +89,13 @@ int generic_ide_resume(struct device *dev) | |||
59 | 89 | ||
60 | memset(&rqpm, 0, sizeof(rqpm)); | 90 | memset(&rqpm, 0, sizeof(rqpm)); |
61 | rq = blk_get_request(drive->queue, READ, __GFP_WAIT); | 91 | rq = blk_get_request(drive->queue, READ, __GFP_WAIT); |
62 | rq->cmd_type = REQ_TYPE_PM_RESUME; | 92 | rq->cmd_type = REQ_TYPE_ATA_PM_RESUME; |
63 | rq->cmd_flags |= REQ_PREEMPT; | 93 | rq->cmd_flags |= REQ_PREEMPT; |
64 | rq->special = &rqpm; | 94 | rq->special = &rqpm; |
65 | rqpm.pm_step = IDE_PM_START_RESUME; | 95 | rqpm.pm_step = IDE_PM_START_RESUME; |
66 | rqpm.pm_state = PM_EVENT_ON; | 96 | rqpm.pm_state = PM_EVENT_ON; |
67 | 97 | ||
68 | err = blk_execute_rq(drive->queue, NULL, rq, 1); | 98 | err = ide_pm_execute_rq(rq); |
69 | blk_put_request(rq); | 99 | blk_put_request(rq); |
70 | 100 | ||
71 | if (err == 0 && dev->driver) { | 101 | if (err == 0 && dev->driver) { |
@@ -80,7 +110,7 @@ int generic_ide_resume(struct device *dev) | |||
80 | 110 | ||
81 | void ide_complete_power_step(ide_drive_t *drive, struct request *rq) | 111 | void ide_complete_power_step(ide_drive_t *drive, struct request *rq) |
82 | { | 112 | { |
83 | struct request_pm_state *pm = rq->special; | 113 | struct ide_pm_state *pm = rq->special; |
84 | 114 | ||
85 | #ifdef DEBUG_PM | 115 | #ifdef DEBUG_PM |
86 | printk(KERN_INFO "%s: complete_power_step(step: %d)\n", | 116 | printk(KERN_INFO "%s: complete_power_step(step: %d)\n", |
@@ -110,7 +140,7 @@ void ide_complete_power_step(ide_drive_t *drive, struct request *rq) | |||
110 | 140 | ||
111 | ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq) | 141 | ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq) |
112 | { | 142 | { |
113 | struct request_pm_state *pm = rq->special; | 143 | struct ide_pm_state *pm = rq->special; |
114 | struct ide_cmd cmd = { }; | 144 | struct ide_cmd cmd = { }; |
115 | 145 | ||
116 | switch (pm->pm_step) { | 146 | switch (pm->pm_step) { |
@@ -182,7 +212,7 @@ out_do_tf: | |||
182 | void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq) | 212 | void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq) |
183 | { | 213 | { |
184 | struct request_queue *q = drive->queue; | 214 | struct request_queue *q = drive->queue; |
185 | struct request_pm_state *pm = rq->special; | 215 | struct ide_pm_state *pm = rq->special; |
186 | unsigned long flags; | 216 | unsigned long flags; |
187 | 217 | ||
188 | ide_complete_power_step(drive, rq); | 218 | ide_complete_power_step(drive, rq); |
@@ -191,10 +221,10 @@ void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq) | |||
191 | 221 | ||
192 | #ifdef DEBUG_PM | 222 | #ifdef DEBUG_PM |
193 | printk("%s: completing PM request, %s\n", drive->name, | 223 | printk("%s: completing PM request, %s\n", drive->name, |
194 | (rq->cmd_type == REQ_TYPE_PM_SUSPEND) ? "suspend" : "resume"); | 224 | (rq->cmd_type == REQ_TYPE_ATA_PM_SUSPEND) ? "suspend" : "resume"); |
195 | #endif | 225 | #endif |
196 | spin_lock_irqsave(q->queue_lock, flags); | 226 | spin_lock_irqsave(q->queue_lock, flags); |
197 | if (rq->cmd_type == REQ_TYPE_PM_SUSPEND) | 227 | if (rq->cmd_type == REQ_TYPE_ATA_PM_SUSPEND) |
198 | blk_stop_queue(q); | 228 | blk_stop_queue(q); |
199 | else | 229 | else |
200 | drive->dev_flags &= ~IDE_DFLAG_BLOCKED; | 230 | drive->dev_flags &= ~IDE_DFLAG_BLOCKED; |
@@ -208,13 +238,13 @@ void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq) | |||
208 | 238 | ||
209 | void ide_check_pm_state(ide_drive_t *drive, struct request *rq) | 239 | void ide_check_pm_state(ide_drive_t *drive, struct request *rq) |
210 | { | 240 | { |
211 | struct request_pm_state *pm = rq->special; | 241 | struct ide_pm_state *pm = rq->special; |
212 | 242 | ||
213 | if (rq->cmd_type == REQ_TYPE_PM_SUSPEND && | 243 | if (rq->cmd_type == REQ_TYPE_ATA_PM_SUSPEND && |
214 | pm->pm_step == IDE_PM_START_SUSPEND) | 244 | pm->pm_step == IDE_PM_START_SUSPEND) |
215 | /* Mark drive blocked when starting the suspend sequence. */ | 245 | /* Mark drive blocked when starting the suspend sequence. */ |
216 | drive->dev_flags |= IDE_DFLAG_BLOCKED; | 246 | drive->dev_flags |= IDE_DFLAG_BLOCKED; |
217 | else if (rq->cmd_type == REQ_TYPE_PM_RESUME && | 247 | else if (rq->cmd_type == REQ_TYPE_ATA_PM_RESUME && |
218 | pm->pm_step == IDE_PM_START_RESUME) { | 248 | pm->pm_step == IDE_PM_START_RESUME) { |
219 | /* | 249 | /* |
220 | * The first thing we do on wakeup is to wait for BSY bit to | 250 | * The first thing we do on wakeup is to wait for BSY bit to |
diff --git a/drivers/ide/ide-tape.c b/drivers/ide/ide-tape.c index 6eb738ca6d2f..f5d51d1d09ee 100644 --- a/drivers/ide/ide-tape.c +++ b/drivers/ide/ide-tape.c | |||
@@ -576,8 +576,8 @@ static ide_startstop_t idetape_do_request(ide_drive_t *drive, | |||
576 | rq->cmd[0], (unsigned long long)blk_rq_pos(rq), | 576 | rq->cmd[0], (unsigned long long)blk_rq_pos(rq), |
577 | blk_rq_sectors(rq)); | 577 | blk_rq_sectors(rq)); |
578 | 578 | ||
579 | BUG_ON(!(rq->cmd_type == REQ_TYPE_SPECIAL || | 579 | BUG_ON(!(rq->cmd_type == REQ_TYPE_DRV_PRIV || |
580 | rq->cmd_type == REQ_TYPE_SENSE)); | 580 | rq->cmd_type == REQ_TYPE_ATA_SENSE)); |
581 | 581 | ||
582 | /* Retry a failed packet command */ | 582 | /* Retry a failed packet command */ |
583 | if (drive->failed_pc && drive->pc->c[0] == REQUEST_SENSE) { | 583 | if (drive->failed_pc && drive->pc->c[0] == REQUEST_SENSE) { |
@@ -853,7 +853,7 @@ static int idetape_queue_rw_tail(ide_drive_t *drive, int cmd, int size) | |||
853 | BUG_ON(size < 0 || size % tape->blk_size); | 853 | BUG_ON(size < 0 || size % tape->blk_size); |
854 | 854 | ||
855 | rq = blk_get_request(drive->queue, READ, __GFP_WAIT); | 855 | rq = blk_get_request(drive->queue, READ, __GFP_WAIT); |
856 | rq->cmd_type = REQ_TYPE_SPECIAL; | 856 | rq->cmd_type = REQ_TYPE_DRV_PRIV; |
857 | rq->cmd[13] = cmd; | 857 | rq->cmd[13] = cmd; |
858 | rq->rq_disk = tape->disk; | 858 | rq->rq_disk = tape->disk; |
859 | rq->__sector = tape->first_frame; | 859 | rq->__sector = tape->first_frame; |
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c index dabb88b1cbec..0979e126fff1 100644 --- a/drivers/ide/ide-taskfile.c +++ b/drivers/ide/ide-taskfile.c | |||
@@ -186,7 +186,7 @@ static ide_startstop_t task_no_data_intr(ide_drive_t *drive) | |||
186 | tf->command == ATA_CMD_CHK_POWER) { | 186 | tf->command == ATA_CMD_CHK_POWER) { |
187 | struct request *rq = hwif->rq; | 187 | struct request *rq = hwif->rq; |
188 | 188 | ||
189 | if (blk_pm_request(rq)) | 189 | if (ata_pm_request(rq)) |
190 | ide_complete_pm_rq(drive, rq); | 190 | ide_complete_pm_rq(drive, rq); |
191 | else | 191 | else |
192 | ide_finish_cmd(drive, cmd, stat); | 192 | ide_finish_cmd(drive, cmd, stat); |
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index fa028fa82df4..cb64e64a4789 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c | |||
@@ -55,7 +55,7 @@ static void bch_bio_submit_split_done(struct closure *cl) | |||
55 | 55 | ||
56 | s->bio->bi_end_io = s->bi_end_io; | 56 | s->bio->bi_end_io = s->bi_end_io; |
57 | s->bio->bi_private = s->bi_private; | 57 | s->bio->bi_private = s->bi_private; |
58 | bio_endio_nodec(s->bio, 0); | 58 | bio_endio(s->bio, 0); |
59 | 59 | ||
60 | closure_debug_destroy(&s->cl); | 60 | closure_debug_destroy(&s->cl); |
61 | mempool_free(s, s->p->bio_split_hook); | 61 | mempool_free(s, s->p->bio_split_hook); |
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index ab43faddb447..1616f668a4cb 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c | |||
@@ -619,7 +619,7 @@ static void do_bio_hook(struct search *s, struct bio *orig_bio) | |||
619 | bio->bi_end_io = request_endio; | 619 | bio->bi_end_io = request_endio; |
620 | bio->bi_private = &s->cl; | 620 | bio->bi_private = &s->cl; |
621 | 621 | ||
622 | atomic_set(&bio->bi_cnt, 3); | 622 | bio_cnt_set(bio, 3); |
623 | } | 623 | } |
624 | 624 | ||
625 | static void search_free(struct closure *cl) | 625 | static void search_free(struct closure *cl) |
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 7755af351867..41b2594a80c6 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
@@ -86,12 +86,6 @@ static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio) | |||
86 | { | 86 | { |
87 | bio->bi_end_io = h->bi_end_io; | 87 | bio->bi_end_io = h->bi_end_io; |
88 | bio->bi_private = h->bi_private; | 88 | bio->bi_private = h->bi_private; |
89 | |||
90 | /* | ||
91 | * Must bump bi_remaining to allow bio to complete with | ||
92 | * restored bi_end_io. | ||
93 | */ | ||
94 | atomic_inc(&bio->bi_remaining); | ||
95 | } | 89 | } |
96 | 90 | ||
97 | /*----------------------------------------------------------------*/ | 91 | /*----------------------------------------------------------------*/ |
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 089d62751f7f..743fa9bbae9e 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c | |||
@@ -1254,8 +1254,6 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error) | |||
1254 | dm_bio_restore(bd, bio); | 1254 | dm_bio_restore(bd, bio); |
1255 | bio_record->details.bi_bdev = NULL; | 1255 | bio_record->details.bi_bdev = NULL; |
1256 | 1256 | ||
1257 | atomic_inc(&bio->bi_remaining); | ||
1258 | |||
1259 | queue_bio(ms, bio, rw); | 1257 | queue_bio(ms, bio, rw); |
1260 | return DM_ENDIO_INCOMPLETE; | 1258 | return DM_ENDIO_INCOMPLETE; |
1261 | } | 1259 | } |
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index f83a0f3fc365..7c82d3ccce87 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -1478,7 +1478,6 @@ out: | |||
1478 | if (full_bio) { | 1478 | if (full_bio) { |
1479 | full_bio->bi_end_io = pe->full_bio_end_io; | 1479 | full_bio->bi_end_io = pe->full_bio_end_io; |
1480 | full_bio->bi_private = pe->full_bio_private; | 1480 | full_bio->bi_private = pe->full_bio_private; |
1481 | atomic_inc(&full_bio->bi_remaining); | ||
1482 | } | 1481 | } |
1483 | increment_pending_exceptions_done_count(); | 1482 | increment_pending_exceptions_done_count(); |
1484 | 1483 | ||
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 16ba55ad7089..a5f94125ad01 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -942,21 +942,28 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device * | |||
942 | { | 942 | { |
943 | unsigned type = dm_table_get_type(t); | 943 | unsigned type = dm_table_get_type(t); |
944 | unsigned per_bio_data_size = 0; | 944 | unsigned per_bio_data_size = 0; |
945 | struct dm_target *tgt; | ||
946 | unsigned i; | 945 | unsigned i; |
947 | 946 | ||
948 | if (unlikely(type == DM_TYPE_NONE)) { | 947 | switch (type) { |
948 | case DM_TYPE_BIO_BASED: | ||
949 | for (i = 0; i < t->num_targets; i++) { | ||
950 | struct dm_target *tgt = t->targets + i; | ||
951 | |||
952 | per_bio_data_size = max(per_bio_data_size, | ||
953 | tgt->per_bio_data_size); | ||
954 | } | ||
955 | t->mempools = dm_alloc_bio_mempools(t->integrity_supported, | ||
956 | per_bio_data_size); | ||
957 | break; | ||
958 | case DM_TYPE_REQUEST_BASED: | ||
959 | case DM_TYPE_MQ_REQUEST_BASED: | ||
960 | t->mempools = dm_alloc_rq_mempools(md, type); | ||
961 | break; | ||
962 | default: | ||
949 | DMWARN("no table type is set, can't allocate mempools"); | 963 | DMWARN("no table type is set, can't allocate mempools"); |
950 | return -EINVAL; | 964 | return -EINVAL; |
951 | } | 965 | } |
952 | 966 | ||
953 | if (type == DM_TYPE_BIO_BASED) | ||
954 | for (i = 0; i < t->num_targets; i++) { | ||
955 | tgt = t->targets + i; | ||
956 | per_bio_data_size = max(per_bio_data_size, tgt->per_bio_data_size); | ||
957 | } | ||
958 | |||
959 | t->mempools = dm_alloc_md_mempools(md, type, t->integrity_supported, per_bio_data_size); | ||
960 | if (!t->mempools) | 967 | if (!t->mempools) |
961 | return -ENOMEM; | 968 | return -ENOMEM; |
962 | 969 | ||
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 921aafd12aee..e852602c0091 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -793,10 +793,9 @@ static void inc_remap_and_issue_cell(struct thin_c *tc, | |||
793 | 793 | ||
794 | static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m) | 794 | static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m) |
795 | { | 795 | { |
796 | if (m->bio) { | 796 | if (m->bio) |
797 | m->bio->bi_end_io = m->saved_bi_end_io; | 797 | m->bio->bi_end_io = m->saved_bi_end_io; |
798 | atomic_inc(&m->bio->bi_remaining); | 798 | |
799 | } | ||
800 | cell_error(m->tc->pool, m->cell); | 799 | cell_error(m->tc->pool, m->cell); |
801 | list_del(&m->list); | 800 | list_del(&m->list); |
802 | mempool_free(m, m->tc->pool->mapping_pool); | 801 | mempool_free(m, m->tc->pool->mapping_pool); |
@@ -810,10 +809,8 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m) | |||
810 | int r; | 809 | int r; |
811 | 810 | ||
812 | bio = m->bio; | 811 | bio = m->bio; |
813 | if (bio) { | 812 | if (bio) |
814 | bio->bi_end_io = m->saved_bi_end_io; | 813 | bio->bi_end_io = m->saved_bi_end_io; |
815 | atomic_inc(&bio->bi_remaining); | ||
816 | } | ||
817 | 814 | ||
818 | if (m->err) { | 815 | if (m->err) { |
819 | cell_error(pool, m->cell); | 816 | cell_error(pool, m->cell); |
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c index 66616db33e6f..bb9c6a00e4b0 100644 --- a/drivers/md/dm-verity.c +++ b/drivers/md/dm-verity.c | |||
@@ -459,7 +459,7 @@ static void verity_finish_io(struct dm_verity_io *io, int error) | |||
459 | bio->bi_end_io = io->orig_bi_end_io; | 459 | bio->bi_end_io = io->orig_bi_end_io; |
460 | bio->bi_private = io->orig_bi_private; | 460 | bio->bi_private = io->orig_bi_private; |
461 | 461 | ||
462 | bio_endio_nodec(bio, error); | 462 | bio_endio(bio, error); |
463 | } | 463 | } |
464 | 464 | ||
465 | static void verity_work(struct work_struct *w) | 465 | static void verity_work(struct work_struct *w) |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 2caf492890d6..4d6f089a0e9e 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -990,57 +990,6 @@ static void clone_endio(struct bio *bio, int error) | |||
990 | dec_pending(io, error); | 990 | dec_pending(io, error); |
991 | } | 991 | } |
992 | 992 | ||
993 | /* | ||
994 | * Partial completion handling for request-based dm | ||
995 | */ | ||
996 | static void end_clone_bio(struct bio *clone, int error) | ||
997 | { | ||
998 | struct dm_rq_clone_bio_info *info = | ||
999 | container_of(clone, struct dm_rq_clone_bio_info, clone); | ||
1000 | struct dm_rq_target_io *tio = info->tio; | ||
1001 | struct bio *bio = info->orig; | ||
1002 | unsigned int nr_bytes = info->orig->bi_iter.bi_size; | ||
1003 | |||
1004 | bio_put(clone); | ||
1005 | |||
1006 | if (tio->error) | ||
1007 | /* | ||
1008 | * An error has already been detected on the request. | ||
1009 | * Once error occurred, just let clone->end_io() handle | ||
1010 | * the remainder. | ||
1011 | */ | ||
1012 | return; | ||
1013 | else if (error) { | ||
1014 | /* | ||
1015 | * Don't notice the error to the upper layer yet. | ||
1016 | * The error handling decision is made by the target driver, | ||
1017 | * when the request is completed. | ||
1018 | */ | ||
1019 | tio->error = error; | ||
1020 | return; | ||
1021 | } | ||
1022 | |||
1023 | /* | ||
1024 | * I/O for the bio successfully completed. | ||
1025 | * Notice the data completion to the upper layer. | ||
1026 | */ | ||
1027 | |||
1028 | /* | ||
1029 | * bios are processed from the head of the list. | ||
1030 | * So the completing bio should always be rq->bio. | ||
1031 | * If it's not, something wrong is happening. | ||
1032 | */ | ||
1033 | if (tio->orig->bio != bio) | ||
1034 | DMERR("bio completion is going in the middle of the request"); | ||
1035 | |||
1036 | /* | ||
1037 | * Update the original request. | ||
1038 | * Do not use blk_end_request() here, because it may complete | ||
1039 | * the original request before the clone, and break the ordering. | ||
1040 | */ | ||
1041 | blk_update_request(tio->orig, 0, nr_bytes); | ||
1042 | } | ||
1043 | |||
1044 | static struct dm_rq_target_io *tio_from_request(struct request *rq) | 993 | static struct dm_rq_target_io *tio_from_request(struct request *rq) |
1045 | { | 994 | { |
1046 | return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special); | 995 | return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special); |
@@ -1087,8 +1036,6 @@ static void free_rq_clone(struct request *clone) | |||
1087 | struct dm_rq_target_io *tio = clone->end_io_data; | 1036 | struct dm_rq_target_io *tio = clone->end_io_data; |
1088 | struct mapped_device *md = tio->md; | 1037 | struct mapped_device *md = tio->md; |
1089 | 1038 | ||
1090 | blk_rq_unprep_clone(clone); | ||
1091 | |||
1092 | if (md->type == DM_TYPE_MQ_REQUEST_BASED) | 1039 | if (md->type == DM_TYPE_MQ_REQUEST_BASED) |
1093 | /* stacked on blk-mq queue(s) */ | 1040 | /* stacked on blk-mq queue(s) */ |
1094 | tio->ti->type->release_clone_rq(clone); | 1041 | tio->ti->type->release_clone_rq(clone); |
@@ -1827,39 +1774,13 @@ static void dm_dispatch_clone_request(struct request *clone, struct request *rq) | |||
1827 | dm_complete_request(rq, r); | 1774 | dm_complete_request(rq, r); |
1828 | } | 1775 | } |
1829 | 1776 | ||
1830 | static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig, | 1777 | static void setup_clone(struct request *clone, struct request *rq, |
1831 | void *data) | 1778 | struct dm_rq_target_io *tio) |
1832 | { | 1779 | { |
1833 | struct dm_rq_target_io *tio = data; | 1780 | blk_rq_prep_clone(clone, rq); |
1834 | struct dm_rq_clone_bio_info *info = | ||
1835 | container_of(bio, struct dm_rq_clone_bio_info, clone); | ||
1836 | |||
1837 | info->orig = bio_orig; | ||
1838 | info->tio = tio; | ||
1839 | bio->bi_end_io = end_clone_bio; | ||
1840 | |||
1841 | return 0; | ||
1842 | } | ||
1843 | |||
1844 | static int setup_clone(struct request *clone, struct request *rq, | ||
1845 | struct dm_rq_target_io *tio, gfp_t gfp_mask) | ||
1846 | { | ||
1847 | int r; | ||
1848 | |||
1849 | r = blk_rq_prep_clone(clone, rq, tio->md->bs, gfp_mask, | ||
1850 | dm_rq_bio_constructor, tio); | ||
1851 | if (r) | ||
1852 | return r; | ||
1853 | |||
1854 | clone->cmd = rq->cmd; | ||
1855 | clone->cmd_len = rq->cmd_len; | ||
1856 | clone->sense = rq->sense; | ||
1857 | clone->end_io = end_clone_request; | 1781 | clone->end_io = end_clone_request; |
1858 | clone->end_io_data = tio; | 1782 | clone->end_io_data = tio; |
1859 | |||
1860 | tio->clone = clone; | 1783 | tio->clone = clone; |
1861 | |||
1862 | return 0; | ||
1863 | } | 1784 | } |
1864 | 1785 | ||
1865 | static struct request *clone_rq(struct request *rq, struct mapped_device *md, | 1786 | static struct request *clone_rq(struct request *rq, struct mapped_device *md, |
@@ -1880,12 +1801,7 @@ static struct request *clone_rq(struct request *rq, struct mapped_device *md, | |||
1880 | clone = tio->clone; | 1801 | clone = tio->clone; |
1881 | 1802 | ||
1882 | blk_rq_init(NULL, clone); | 1803 | blk_rq_init(NULL, clone); |
1883 | if (setup_clone(clone, rq, tio, gfp_mask)) { | 1804 | setup_clone(clone, rq, tio); |
1884 | /* -ENOMEM */ | ||
1885 | if (alloc_clone) | ||
1886 | free_clone_request(md, clone); | ||
1887 | return NULL; | ||
1888 | } | ||
1889 | 1805 | ||
1890 | return clone; | 1806 | return clone; |
1891 | } | 1807 | } |
@@ -1979,11 +1895,7 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq, | |||
1979 | } | 1895 | } |
1980 | if (r != DM_MAPIO_REMAPPED) | 1896 | if (r != DM_MAPIO_REMAPPED) |
1981 | return r; | 1897 | return r; |
1982 | if (setup_clone(clone, rq, tio, GFP_ATOMIC)) { | 1898 | setup_clone(clone, rq, tio); |
1983 | /* -ENOMEM */ | ||
1984 | ti->type->release_clone_rq(clone); | ||
1985 | return DM_MAPIO_REQUEUE; | ||
1986 | } | ||
1987 | } | 1899 | } |
1988 | 1900 | ||
1989 | switch (r) { | 1901 | switch (r) { |
@@ -2437,8 +2349,6 @@ static void __bind_mempools(struct mapped_device *md, struct dm_table *t) | |||
2437 | goto out; | 2349 | goto out; |
2438 | } | 2350 | } |
2439 | 2351 | ||
2440 | BUG_ON(!p || md->io_pool || md->rq_pool || md->bs); | ||
2441 | |||
2442 | md->io_pool = p->io_pool; | 2352 | md->io_pool = p->io_pool; |
2443 | p->io_pool = NULL; | 2353 | p->io_pool = NULL; |
2444 | md->rq_pool = p->rq_pool; | 2354 | md->rq_pool = p->rq_pool; |
@@ -3544,48 +3454,23 @@ int dm_noflush_suspending(struct dm_target *ti) | |||
3544 | } | 3454 | } |
3545 | EXPORT_SYMBOL_GPL(dm_noflush_suspending); | 3455 | EXPORT_SYMBOL_GPL(dm_noflush_suspending); |
3546 | 3456 | ||
3547 | struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type, | 3457 | struct dm_md_mempools *dm_alloc_bio_mempools(unsigned integrity, |
3548 | unsigned integrity, unsigned per_bio_data_size) | 3458 | unsigned per_bio_data_size) |
3549 | { | 3459 | { |
3550 | struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL); | 3460 | struct dm_md_mempools *pools; |
3551 | struct kmem_cache *cachep = NULL; | 3461 | unsigned int pool_size = dm_get_reserved_bio_based_ios(); |
3552 | unsigned int pool_size = 0; | ||
3553 | unsigned int front_pad; | 3462 | unsigned int front_pad; |
3554 | 3463 | ||
3464 | pools = kzalloc(sizeof(*pools), GFP_KERNEL); | ||
3555 | if (!pools) | 3465 | if (!pools) |
3556 | return NULL; | 3466 | return NULL; |
3557 | 3467 | ||
3558 | type = filter_md_type(type, md); | 3468 | front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + |
3469 | offsetof(struct dm_target_io, clone); | ||
3559 | 3470 | ||
3560 | switch (type) { | 3471 | pools->io_pool = mempool_create_slab_pool(pool_size, _io_cache); |
3561 | case DM_TYPE_BIO_BASED: | 3472 | if (!pools->io_pool) |
3562 | cachep = _io_cache; | 3473 | goto out; |
3563 | pool_size = dm_get_reserved_bio_based_ios(); | ||
3564 | front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone); | ||
3565 | break; | ||
3566 | case DM_TYPE_REQUEST_BASED: | ||
3567 | cachep = _rq_tio_cache; | ||
3568 | pool_size = dm_get_reserved_rq_based_ios(); | ||
3569 | pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache); | ||
3570 | if (!pools->rq_pool) | ||
3571 | goto out; | ||
3572 | /* fall through to setup remaining rq-based pools */ | ||
3573 | case DM_TYPE_MQ_REQUEST_BASED: | ||
3574 | if (!pool_size) | ||
3575 | pool_size = dm_get_reserved_rq_based_ios(); | ||
3576 | front_pad = offsetof(struct dm_rq_clone_bio_info, clone); | ||
3577 | /* per_bio_data_size is not used. See __bind_mempools(). */ | ||
3578 | WARN_ON(per_bio_data_size != 0); | ||
3579 | break; | ||
3580 | default: | ||
3581 | BUG(); | ||
3582 | } | ||
3583 | |||
3584 | if (cachep) { | ||
3585 | pools->io_pool = mempool_create_slab_pool(pool_size, cachep); | ||
3586 | if (!pools->io_pool) | ||
3587 | goto out; | ||
3588 | } | ||
3589 | 3474 | ||
3590 | pools->bs = bioset_create_nobvec(pool_size, front_pad); | 3475 | pools->bs = bioset_create_nobvec(pool_size, front_pad); |
3591 | if (!pools->bs) | 3476 | if (!pools->bs) |
@@ -3595,10 +3480,34 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned t | |||
3595 | goto out; | 3480 | goto out; |
3596 | 3481 | ||
3597 | return pools; | 3482 | return pools; |
3598 | |||
3599 | out: | 3483 | out: |
3600 | dm_free_md_mempools(pools); | 3484 | dm_free_md_mempools(pools); |
3485 | return NULL; | ||
3486 | } | ||
3487 | |||
3488 | struct dm_md_mempools *dm_alloc_rq_mempools(struct mapped_device *md, | ||
3489 | unsigned type) | ||
3490 | { | ||
3491 | unsigned int pool_size = dm_get_reserved_rq_based_ios(); | ||
3492 | struct dm_md_mempools *pools; | ||
3493 | |||
3494 | pools = kzalloc(sizeof(*pools), GFP_KERNEL); | ||
3495 | if (!pools) | ||
3496 | return NULL; | ||
3497 | |||
3498 | if (filter_md_type(type, md) == DM_TYPE_REQUEST_BASED) { | ||
3499 | pools->rq_pool = mempool_create_slab_pool(pool_size, _rq_cache); | ||
3500 | if (!pools->rq_pool) | ||
3501 | goto out; | ||
3502 | } | ||
3601 | 3503 | ||
3504 | pools->io_pool = mempool_create_slab_pool(pool_size, _rq_tio_cache); | ||
3505 | if (!pools->io_pool) | ||
3506 | goto out; | ||
3507 | |||
3508 | return pools; | ||
3509 | out: | ||
3510 | dm_free_md_mempools(pools); | ||
3602 | return NULL; | 3511 | return NULL; |
3603 | } | 3512 | } |
3604 | 3513 | ||
diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 6123c2bf9150..e6e66d087b26 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h | |||
@@ -222,8 +222,9 @@ void dm_kcopyd_exit(void); | |||
222 | /* | 222 | /* |
223 | * Mempool operations | 223 | * Mempool operations |
224 | */ | 224 | */ |
225 | struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, unsigned type, | 225 | struct dm_md_mempools *dm_alloc_bio_mempools(unsigned integrity, |
226 | unsigned integrity, unsigned per_bio_data_size); | 226 | unsigned per_bio_data_size); |
227 | struct dm_md_mempools *dm_alloc_rq_mempools(struct mapped_device *md, unsigned type); | ||
227 | void dm_free_md_mempools(struct dm_md_mempools *pools); | 228 | void dm_free_md_mempools(struct dm_md_mempools *pools); |
228 | 229 | ||
229 | /* | 230 | /* |