diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-02-18 21:33:04 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-02-18 21:33:04 -0500 |
commit | ba95fd47d177d46743ad94055908d22840370e06 (patch) | |
tree | f29e6921fefba2728c3b7f6854ac7f7729f602b2 | |
parent | 59af0a0b5848caf38f1bf7013905c3e9cdba4d1d (diff) | |
parent | be987fdb55a4726e2fcbab7501f89276bdb57288 (diff) |
Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
block: fix deadlock in blk_abort_queue() for drivers that readd to timeout list
block: fix booting from partitioned md array
block: revert part of 18ce3751ccd488c78d3827e9f6bf54e6322676fb
cciss: PCI power management reset for kexec
paride/pg.c: xs(): &&/|| confusion
fs/bio: bio_alloc_bioset: pass right object ptr to mempool_free
block: fix bad definition of BIO_RW_SYNC
bsg: Fix sense buffer bug in SG_IO
-rw-r--r-- | block/blk-timeout.c | 9 | ||||
-rw-r--r-- | block/blktrace.c | 2 | ||||
-rw-r--r-- | block/bsg.c | 17 | ||||
-rw-r--r-- | block/genhd.c | 8 | ||||
-rw-r--r-- | drivers/block/cciss.c | 215 | ||||
-rw-r--r-- | drivers/block/paride/pg.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-io.c | 2 | ||||
-rw-r--r-- | drivers/md/dm-kcopyd.c | 2 | ||||
-rw-r--r-- | drivers/md/md.c | 4 | ||||
-rw-r--r-- | fs/bio.c | 5 | ||||
-rw-r--r-- | fs/buffer.c | 2 | ||||
-rw-r--r-- | include/linux/bio.h | 2 | ||||
-rw-r--r-- | include/linux/blktrace_api.h | 1 | ||||
-rw-r--r-- | include/linux/fs.h | 6 | ||||
-rw-r--r-- | kernel/power/swap.c | 5 | ||||
-rw-r--r-- | mm/page_io.c | 2 |
16 files changed, 259 insertions, 25 deletions
diff --git a/block/blk-timeout.c b/block/blk-timeout.c index a09535377a94..bbbdc4b8ccf2 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c | |||
@@ -209,12 +209,19 @@ void blk_abort_queue(struct request_queue *q) | |||
209 | { | 209 | { |
210 | unsigned long flags; | 210 | unsigned long flags; |
211 | struct request *rq, *tmp; | 211 | struct request *rq, *tmp; |
212 | LIST_HEAD(list); | ||
212 | 213 | ||
213 | spin_lock_irqsave(q->queue_lock, flags); | 214 | spin_lock_irqsave(q->queue_lock, flags); |
214 | 215 | ||
215 | elv_abort_queue(q); | 216 | elv_abort_queue(q); |
216 | 217 | ||
217 | list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) | 218 | /* |
219 | * Splice entries to local list, to avoid deadlocking if entries | ||
220 | * get readded to the timeout list by error handling | ||
221 | */ | ||
222 | list_splice_init(&q->timeout_list, &list); | ||
223 | |||
224 | list_for_each_entry_safe(rq, tmp, &list, timeout_list) | ||
218 | blk_abort_request(rq); | 225 | blk_abort_request(rq); |
219 | 226 | ||
220 | spin_unlock_irqrestore(q->queue_lock, flags); | 227 | spin_unlock_irqrestore(q->queue_lock, flags); |
diff --git a/block/blktrace.c b/block/blktrace.c index 39cc3bfe56e4..7cf9d1ff45a0 100644 --- a/block/blktrace.c +++ b/block/blktrace.c | |||
@@ -142,7 +142,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, | |||
142 | 142 | ||
143 | what |= ddir_act[rw & WRITE]; | 143 | what |= ddir_act[rw & WRITE]; |
144 | what |= MASK_TC_BIT(rw, BARRIER); | 144 | what |= MASK_TC_BIT(rw, BARRIER); |
145 | what |= MASK_TC_BIT(rw, SYNC); | 145 | what |= MASK_TC_BIT(rw, SYNCIO); |
146 | what |= MASK_TC_BIT(rw, AHEAD); | 146 | what |= MASK_TC_BIT(rw, AHEAD); |
147 | what |= MASK_TC_BIT(rw, META); | 147 | what |= MASK_TC_BIT(rw, META); |
148 | what |= MASK_TC_BIT(rw, DISCARD); | 148 | what |= MASK_TC_BIT(rw, DISCARD); |
diff --git a/block/bsg.c b/block/bsg.c index d414bb5607e8..0ce8806dd0c1 100644 --- a/block/bsg.c +++ b/block/bsg.c | |||
@@ -244,7 +244,8 @@ bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw) | |||
244 | * map sg_io_v4 to a request. | 244 | * map sg_io_v4 to a request. |
245 | */ | 245 | */ |
246 | static struct request * | 246 | static struct request * |
247 | bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm) | 247 | bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm, |
248 | u8 *sense) | ||
248 | { | 249 | { |
249 | struct request_queue *q = bd->queue; | 250 | struct request_queue *q = bd->queue; |
250 | struct request *rq, *next_rq = NULL; | 251 | struct request *rq, *next_rq = NULL; |
@@ -306,6 +307,10 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm) | |||
306 | if (ret) | 307 | if (ret) |
307 | goto out; | 308 | goto out; |
308 | } | 309 | } |
310 | |||
311 | rq->sense = sense; | ||
312 | rq->sense_len = 0; | ||
313 | |||
309 | return rq; | 314 | return rq; |
310 | out: | 315 | out: |
311 | if (rq->cmd != rq->__cmd) | 316 | if (rq->cmd != rq->__cmd) |
@@ -348,9 +353,6 @@ static void bsg_rq_end_io(struct request *rq, int uptodate) | |||
348 | static void bsg_add_command(struct bsg_device *bd, struct request_queue *q, | 353 | static void bsg_add_command(struct bsg_device *bd, struct request_queue *q, |
349 | struct bsg_command *bc, struct request *rq) | 354 | struct bsg_command *bc, struct request *rq) |
350 | { | 355 | { |
351 | rq->sense = bc->sense; | ||
352 | rq->sense_len = 0; | ||
353 | |||
354 | /* | 356 | /* |
355 | * add bc command to busy queue and submit rq for io | 357 | * add bc command to busy queue and submit rq for io |
356 | */ | 358 | */ |
@@ -419,7 +421,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr, | |||
419 | { | 421 | { |
420 | int ret = 0; | 422 | int ret = 0; |
421 | 423 | ||
422 | dprintk("rq %p bio %p %u\n", rq, bio, rq->errors); | 424 | dprintk("rq %p bio %p 0x%x\n", rq, bio, rq->errors); |
423 | /* | 425 | /* |
424 | * fill in all the output members | 426 | * fill in all the output members |
425 | */ | 427 | */ |
@@ -635,7 +637,7 @@ static int __bsg_write(struct bsg_device *bd, const char __user *buf, | |||
635 | /* | 637 | /* |
636 | * get a request, fill in the blanks, and add to request queue | 638 | * get a request, fill in the blanks, and add to request queue |
637 | */ | 639 | */ |
638 | rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm); | 640 | rq = bsg_map_hdr(bd, &bc->hdr, has_write_perm, bc->sense); |
639 | if (IS_ERR(rq)) { | 641 | if (IS_ERR(rq)) { |
640 | ret = PTR_ERR(rq); | 642 | ret = PTR_ERR(rq); |
641 | rq = NULL; | 643 | rq = NULL; |
@@ -922,11 +924,12 @@ static long bsg_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
922 | struct request *rq; | 924 | struct request *rq; |
923 | struct bio *bio, *bidi_bio = NULL; | 925 | struct bio *bio, *bidi_bio = NULL; |
924 | struct sg_io_v4 hdr; | 926 | struct sg_io_v4 hdr; |
927 | u8 sense[SCSI_SENSE_BUFFERSIZE]; | ||
925 | 928 | ||
926 | if (copy_from_user(&hdr, uarg, sizeof(hdr))) | 929 | if (copy_from_user(&hdr, uarg, sizeof(hdr))) |
927 | return -EFAULT; | 930 | return -EFAULT; |
928 | 931 | ||
929 | rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE); | 932 | rq = bsg_map_hdr(bd, &hdr, file->f_mode & FMODE_WRITE, sense); |
930 | if (IS_ERR(rq)) | 933 | if (IS_ERR(rq)) |
931 | return PTR_ERR(rq); | 934 | return PTR_ERR(rq); |
932 | 935 | ||
diff --git a/block/genhd.c b/block/genhd.c index 397960cf26af..e1eadcc9546a 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
@@ -1087,6 +1087,14 @@ dev_t blk_lookup_devt(const char *name, int partno) | |||
1087 | if (strcmp(dev_name(dev), name)) | 1087 | if (strcmp(dev_name(dev), name)) |
1088 | continue; | 1088 | continue; |
1089 | 1089 | ||
1090 | if (partno < disk->minors) { | ||
1091 | /* We need to return the right devno, even | ||
1092 | * if the partition doesn't exist yet. | ||
1093 | */ | ||
1094 | devt = MKDEV(MAJOR(dev->devt), | ||
1095 | MINOR(dev->devt) + partno); | ||
1096 | break; | ||
1097 | } | ||
1090 | part = disk_get_part(disk, partno); | 1098 | part = disk_get_part(disk, partno); |
1091 | if (part) { | 1099 | if (part) { |
1092 | devt = part_devt(part); | 1100 | devt = part_devt(part); |
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 01e69383d9c0..d2cb67b61176 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -3390,6 +3390,203 @@ static void free_hba(int i) | |||
3390 | kfree(p); | 3390 | kfree(p); |
3391 | } | 3391 | } |
3392 | 3392 | ||
3393 | /* Send a message CDB to the firmware. */ | ||
3394 | static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, unsigned char type) | ||
3395 | { | ||
3396 | typedef struct { | ||
3397 | CommandListHeader_struct CommandHeader; | ||
3398 | RequestBlock_struct Request; | ||
3399 | ErrDescriptor_struct ErrorDescriptor; | ||
3400 | } Command; | ||
3401 | static const size_t cmd_sz = sizeof(Command) + sizeof(ErrorInfo_struct); | ||
3402 | Command *cmd; | ||
3403 | dma_addr_t paddr64; | ||
3404 | uint32_t paddr32, tag; | ||
3405 | void __iomem *vaddr; | ||
3406 | int i, err; | ||
3407 | |||
3408 | vaddr = ioremap_nocache(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); | ||
3409 | if (vaddr == NULL) | ||
3410 | return -ENOMEM; | ||
3411 | |||
3412 | /* The Inbound Post Queue only accepts 32-bit physical addresses for the | ||
3413 | CCISS commands, so they must be allocated from the lower 4GiB of | ||
3414 | memory. */ | ||
3415 | err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); | ||
3416 | if (err) { | ||
3417 | iounmap(vaddr); | ||
3418 | return -ENOMEM; | ||
3419 | } | ||
3420 | |||
3421 | cmd = pci_alloc_consistent(pdev, cmd_sz, &paddr64); | ||
3422 | if (cmd == NULL) { | ||
3423 | iounmap(vaddr); | ||
3424 | return -ENOMEM; | ||
3425 | } | ||
3426 | |||
3427 | /* This must fit, because of the 32-bit consistent DMA mask. Also, | ||
3428 | although there's no guarantee, we assume that the address is at | ||
3429 | least 4-byte aligned (most likely, it's page-aligned). */ | ||
3430 | paddr32 = paddr64; | ||
3431 | |||
3432 | cmd->CommandHeader.ReplyQueue = 0; | ||
3433 | cmd->CommandHeader.SGList = 0; | ||
3434 | cmd->CommandHeader.SGTotal = 0; | ||
3435 | cmd->CommandHeader.Tag.lower = paddr32; | ||
3436 | cmd->CommandHeader.Tag.upper = 0; | ||
3437 | memset(&cmd->CommandHeader.LUN.LunAddrBytes, 0, 8); | ||
3438 | |||
3439 | cmd->Request.CDBLen = 16; | ||
3440 | cmd->Request.Type.Type = TYPE_MSG; | ||
3441 | cmd->Request.Type.Attribute = ATTR_HEADOFQUEUE; | ||
3442 | cmd->Request.Type.Direction = XFER_NONE; | ||
3443 | cmd->Request.Timeout = 0; /* Don't time out */ | ||
3444 | cmd->Request.CDB[0] = opcode; | ||
3445 | cmd->Request.CDB[1] = type; | ||
3446 | memset(&cmd->Request.CDB[2], 0, 14); /* the rest of the CDB is reserved */ | ||
3447 | |||
3448 | cmd->ErrorDescriptor.Addr.lower = paddr32 + sizeof(Command); | ||
3449 | cmd->ErrorDescriptor.Addr.upper = 0; | ||
3450 | cmd->ErrorDescriptor.Len = sizeof(ErrorInfo_struct); | ||
3451 | |||
3452 | writel(paddr32, vaddr + SA5_REQUEST_PORT_OFFSET); | ||
3453 | |||
3454 | for (i = 0; i < 10; i++) { | ||
3455 | tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); | ||
3456 | if ((tag & ~3) == paddr32) | ||
3457 | break; | ||
3458 | schedule_timeout_uninterruptible(HZ); | ||
3459 | } | ||
3460 | |||
3461 | iounmap(vaddr); | ||
3462 | |||
3463 | /* we leak the DMA buffer here ... no choice since the controller could | ||
3464 | still complete the command. */ | ||
3465 | if (i == 10) { | ||
3466 | printk(KERN_ERR "cciss: controller message %02x:%02x timed out\n", | ||
3467 | opcode, type); | ||
3468 | return -ETIMEDOUT; | ||
3469 | } | ||
3470 | |||
3471 | pci_free_consistent(pdev, cmd_sz, cmd, paddr64); | ||
3472 | |||
3473 | if (tag & 2) { | ||
3474 | printk(KERN_ERR "cciss: controller message %02x:%02x failed\n", | ||
3475 | opcode, type); | ||
3476 | return -EIO; | ||
3477 | } | ||
3478 | |||
3479 | printk(KERN_INFO "cciss: controller message %02x:%02x succeeded\n", | ||
3480 | opcode, type); | ||
3481 | return 0; | ||
3482 | } | ||
3483 | |||
3484 | #define cciss_soft_reset_controller(p) cciss_message(p, 1, 0) | ||
3485 | #define cciss_noop(p) cciss_message(p, 3, 0) | ||
3486 | |||
3487 | static __devinit int cciss_reset_msi(struct pci_dev *pdev) | ||
3488 | { | ||
3489 | /* the #defines are stolen from drivers/pci/msi.h. */ | ||
3490 | #define msi_control_reg(base) (base + PCI_MSI_FLAGS) | ||
3491 | #define PCI_MSIX_FLAGS_ENABLE (1 << 15) | ||
3492 | |||
3493 | int pos; | ||
3494 | u16 control = 0; | ||
3495 | |||
3496 | pos = pci_find_capability(pdev, PCI_CAP_ID_MSI); | ||
3497 | if (pos) { | ||
3498 | pci_read_config_word(pdev, msi_control_reg(pos), &control); | ||
3499 | if (control & PCI_MSI_FLAGS_ENABLE) { | ||
3500 | printk(KERN_INFO "cciss: resetting MSI\n"); | ||
3501 | pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSI_FLAGS_ENABLE); | ||
3502 | } | ||
3503 | } | ||
3504 | |||
3505 | pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX); | ||
3506 | if (pos) { | ||
3507 | pci_read_config_word(pdev, msi_control_reg(pos), &control); | ||
3508 | if (control & PCI_MSIX_FLAGS_ENABLE) { | ||
3509 | printk(KERN_INFO "cciss: resetting MSI-X\n"); | ||
3510 | pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSIX_FLAGS_ENABLE); | ||
3511 | } | ||
3512 | } | ||
3513 | |||
3514 | return 0; | ||
3515 | } | ||
3516 | |||
3517 | /* This does a hard reset of the controller using PCI power management | ||
3518 | * states. */ | ||
3519 | static __devinit int cciss_hard_reset_controller(struct pci_dev *pdev) | ||
3520 | { | ||
3521 | u16 pmcsr, saved_config_space[32]; | ||
3522 | int i, pos; | ||
3523 | |||
3524 | printk(KERN_INFO "cciss: using PCI PM to reset controller\n"); | ||
3525 | |||
3526 | /* This is very nearly the same thing as | ||
3527 | |||
3528 | pci_save_state(pci_dev); | ||
3529 | pci_set_power_state(pci_dev, PCI_D3hot); | ||
3530 | pci_set_power_state(pci_dev, PCI_D0); | ||
3531 | pci_restore_state(pci_dev); | ||
3532 | |||
3533 | but we can't use these nice canned kernel routines on | ||
3534 | kexec, because they also check the MSI/MSI-X state in PCI | ||
3535 | configuration space and do the wrong thing when it is | ||
3536 | set/cleared. Also, the pci_save/restore_state functions | ||
3537 | violate the ordering requirements for restoring the | ||
3538 | configuration space from the CCISS document (see the | ||
3539 | comment below). So we roll our own .... */ | ||
3540 | |||
3541 | for (i = 0; i < 32; i++) | ||
3542 | pci_read_config_word(pdev, 2*i, &saved_config_space[i]); | ||
3543 | |||
3544 | pos = pci_find_capability(pdev, PCI_CAP_ID_PM); | ||
3545 | if (pos == 0) { | ||
3546 | printk(KERN_ERR "cciss_reset_controller: PCI PM not supported\n"); | ||
3547 | return -ENODEV; | ||
3548 | } | ||
3549 | |||
3550 | /* Quoting from the Open CISS Specification: "The Power | ||
3551 | * Management Control/Status Register (CSR) controls the power | ||
3552 | * state of the device. The normal operating state is D0, | ||
3553 | * CSR=00h. The software off state is D3, CSR=03h. To reset | ||
3554 | * the controller, place the interface device in D3 then to | ||
3555 | * D0, this causes a secondary PCI reset which will reset the | ||
3556 | * controller." */ | ||
3557 | |||
3558 | /* enter the D3hot power management state */ | ||
3559 | pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr); | ||
3560 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; | ||
3561 | pmcsr |= PCI_D3hot; | ||
3562 | pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); | ||
3563 | |||
3564 | schedule_timeout_uninterruptible(HZ >> 1); | ||
3565 | |||
3566 | /* enter the D0 power management state */ | ||
3567 | pmcsr &= ~PCI_PM_CTRL_STATE_MASK; | ||
3568 | pmcsr |= PCI_D0; | ||
3569 | pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); | ||
3570 | |||
3571 | schedule_timeout_uninterruptible(HZ >> 1); | ||
3572 | |||
3573 | /* Restore the PCI configuration space. The Open CISS | ||
3574 | * Specification says, "Restore the PCI Configuration | ||
3575 | * Registers, offsets 00h through 60h. It is important to | ||
3576 | * restore the command register, 16-bits at offset 04h, | ||
3577 | * last. Do not restore the configuration status register, | ||
3578 | * 16-bits at offset 06h." Note that the offset is 2*i. */ | ||
3579 | for (i = 0; i < 32; i++) { | ||
3580 | if (i == 2 || i == 3) | ||
3581 | continue; | ||
3582 | pci_write_config_word(pdev, 2*i, saved_config_space[i]); | ||
3583 | } | ||
3584 | wmb(); | ||
3585 | pci_write_config_word(pdev, 4, saved_config_space[2]); | ||
3586 | |||
3587 | return 0; | ||
3588 | } | ||
3589 | |||
3393 | /* | 3590 | /* |
3394 | * This is it. Find all the controllers and register them. I really hate | 3591 | * This is it. Find all the controllers and register them. I really hate |
3395 | * stealing all these major device numbers. | 3592 | * stealing all these major device numbers. |
@@ -3404,6 +3601,24 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
3404 | int dac, return_code; | 3601 | int dac, return_code; |
3405 | InquiryData_struct *inq_buff = NULL; | 3602 | InquiryData_struct *inq_buff = NULL; |
3406 | 3603 | ||
3604 | if (reset_devices) { | ||
3605 | /* Reset the controller with a PCI power-cycle */ | ||
3606 | if (cciss_hard_reset_controller(pdev) || cciss_reset_msi(pdev)) | ||
3607 | return -ENODEV; | ||
3608 | |||
3609 | /* Some devices (notably the HP Smart Array 5i Controller) | ||
3610 | need a little pause here */ | ||
3611 | schedule_timeout_uninterruptible(30*HZ); | ||
3612 | |||
3613 | /* Now try to get the controller to respond to a no-op */ | ||
3614 | for (i=0; i<12; i++) { | ||
3615 | if (cciss_noop(pdev) == 0) | ||
3616 | break; | ||
3617 | else | ||
3618 | printk("cciss: no-op failed%s\n", (i < 11 ? "; re-trying" : "")); | ||
3619 | } | ||
3620 | } | ||
3621 | |||
3407 | i = alloc_cciss_hba(); | 3622 | i = alloc_cciss_hba(); |
3408 | if (i < 0) | 3623 | if (i < 0) |
3409 | return -1; | 3624 | return -1; |
diff --git a/drivers/block/paride/pg.c b/drivers/block/paride/pg.c index 9dfa27163001..c397b3ddba9b 100644 --- a/drivers/block/paride/pg.c +++ b/drivers/block/paride/pg.c | |||
@@ -422,7 +422,7 @@ static void xs(char *buf, char *targ, int len) | |||
422 | 422 | ||
423 | for (k = 0; k < len; k++) { | 423 | for (k = 0; k < len; k++) { |
424 | char c = *buf++; | 424 | char c = *buf++; |
425 | if (c != ' ' || c != l) | 425 | if (c != ' ' && c != l) |
426 | l = *targ++ = c; | 426 | l = *targ++ = c; |
427 | } | 427 | } |
428 | if (l == ' ') | 428 | if (l == ' ') |
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index a34338567a2a..f14813be4eff 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c | |||
@@ -328,7 +328,7 @@ static void dispatch_io(int rw, unsigned int num_regions, | |||
328 | struct dpages old_pages = *dp; | 328 | struct dpages old_pages = *dp; |
329 | 329 | ||
330 | if (sync) | 330 | if (sync) |
331 | rw |= (1 << BIO_RW_SYNC); | 331 | rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); |
332 | 332 | ||
333 | /* | 333 | /* |
334 | * For multiple regions we need to be careful to rewind | 334 | * For multiple regions we need to be careful to rewind |
diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index 3073618269ea..0a225da21272 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c | |||
@@ -344,7 +344,7 @@ static int run_io_job(struct kcopyd_job *job) | |||
344 | { | 344 | { |
345 | int r; | 345 | int r; |
346 | struct dm_io_request io_req = { | 346 | struct dm_io_request io_req = { |
347 | .bi_rw = job->rw | (1 << BIO_RW_SYNC), | 347 | .bi_rw = job->rw | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG), |
348 | .mem.type = DM_IO_PAGE_LIST, | 348 | .mem.type = DM_IO_PAGE_LIST, |
349 | .mem.ptr.pl = job->pages, | 349 | .mem.ptr.pl = job->pages, |
350 | .mem.offset = job->offset, | 350 | .mem.offset = job->offset, |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 4495104f6c9f..03b4cd0a6344 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -474,7 +474,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, | |||
474 | * causes ENOTSUPP, we allocate a spare bio... | 474 | * causes ENOTSUPP, we allocate a spare bio... |
475 | */ | 475 | */ |
476 | struct bio *bio = bio_alloc(GFP_NOIO, 1); | 476 | struct bio *bio = bio_alloc(GFP_NOIO, 1); |
477 | int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNC); | 477 | int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG); |
478 | 478 | ||
479 | bio->bi_bdev = rdev->bdev; | 479 | bio->bi_bdev = rdev->bdev; |
480 | bio->bi_sector = sector; | 480 | bio->bi_sector = sector; |
@@ -531,7 +531,7 @@ int sync_page_io(struct block_device *bdev, sector_t sector, int size, | |||
531 | struct completion event; | 531 | struct completion event; |
532 | int ret; | 532 | int ret; |
533 | 533 | ||
534 | rw |= (1 << BIO_RW_SYNC); | 534 | rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); |
535 | 535 | ||
536 | bio->bi_bdev = bdev; | 536 | bio->bi_bdev = bdev; |
537 | bio->bi_sector = sector; | 537 | bio->bi_sector = sector; |
@@ -302,9 +302,10 @@ void bio_init(struct bio *bio) | |||
302 | struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) | 302 | struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) |
303 | { | 303 | { |
304 | struct bio *bio = NULL; | 304 | struct bio *bio = NULL; |
305 | void *p; | ||
305 | 306 | ||
306 | if (bs) { | 307 | if (bs) { |
307 | void *p = mempool_alloc(bs->bio_pool, gfp_mask); | 308 | p = mempool_alloc(bs->bio_pool, gfp_mask); |
308 | 309 | ||
309 | if (p) | 310 | if (p) |
310 | bio = p + bs->front_pad; | 311 | bio = p + bs->front_pad; |
@@ -329,7 +330,7 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) | |||
329 | } | 330 | } |
330 | if (unlikely(!bvl)) { | 331 | if (unlikely(!bvl)) { |
331 | if (bs) | 332 | if (bs) |
332 | mempool_free(bio, bs->bio_pool); | 333 | mempool_free(p, bs->bio_pool); |
333 | else | 334 | else |
334 | kfree(bio); | 335 | kfree(bio); |
335 | bio = NULL; | 336 | bio = NULL; |
diff --git a/fs/buffer.c b/fs/buffer.c index ff4d1cdd779b..9f697419ed8e 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -3109,7 +3109,7 @@ int sync_dirty_buffer(struct buffer_head *bh) | |||
3109 | if (test_clear_buffer_dirty(bh)) { | 3109 | if (test_clear_buffer_dirty(bh)) { |
3110 | get_bh(bh); | 3110 | get_bh(bh); |
3111 | bh->b_end_io = end_buffer_write_sync; | 3111 | bh->b_end_io = end_buffer_write_sync; |
3112 | ret = submit_bh(WRITE_SYNC, bh); | 3112 | ret = submit_bh(WRITE, bh); |
3113 | wait_on_buffer(bh); | 3113 | wait_on_buffer(bh); |
3114 | if (buffer_eopnotsupp(bh)) { | 3114 | if (buffer_eopnotsupp(bh)) { |
3115 | clear_buffer_eopnotsupp(bh); | 3115 | clear_buffer_eopnotsupp(bh); |
diff --git a/include/linux/bio.h b/include/linux/bio.h index 2aa283ab062b..1b16108a5417 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
@@ -171,8 +171,6 @@ struct bio { | |||
171 | #define BIO_RW_FAILFAST_TRANSPORT 8 | 171 | #define BIO_RW_FAILFAST_TRANSPORT 8 |
172 | #define BIO_RW_FAILFAST_DRIVER 9 | 172 | #define BIO_RW_FAILFAST_DRIVER 9 |
173 | 173 | ||
174 | #define BIO_RW_SYNC (BIO_RW_SYNCIO | BIO_RW_UNPLUG) | ||
175 | |||
176 | #define bio_rw_flagged(bio, flag) ((bio)->bi_rw & (1 << (flag))) | 174 | #define bio_rw_flagged(bio, flag) ((bio)->bi_rw & (1 << (flag))) |
177 | 175 | ||
178 | /* | 176 | /* |
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 25379cba2370..6e915878e88c 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h | |||
@@ -15,6 +15,7 @@ enum blktrace_cat { | |||
15 | BLK_TC_WRITE = 1 << 1, /* writes */ | 15 | BLK_TC_WRITE = 1 << 1, /* writes */ |
16 | BLK_TC_BARRIER = 1 << 2, /* barrier */ | 16 | BLK_TC_BARRIER = 1 << 2, /* barrier */ |
17 | BLK_TC_SYNC = 1 << 3, /* sync IO */ | 17 | BLK_TC_SYNC = 1 << 3, /* sync IO */ |
18 | BLK_TC_SYNCIO = BLK_TC_SYNC, | ||
18 | BLK_TC_QUEUE = 1 << 4, /* queueing/merging */ | 19 | BLK_TC_QUEUE = 1 << 4, /* queueing/merging */ |
19 | BLK_TC_REQUEUE = 1 << 5, /* requeueing */ | 20 | BLK_TC_REQUEUE = 1 << 5, /* requeueing */ |
20 | BLK_TC_ISSUE = 1 << 6, /* issue */ | 21 | BLK_TC_ISSUE = 1 << 6, /* issue */ |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 5852bd6afbe4..92734c0012e6 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -93,10 +93,10 @@ struct inodes_stat_t { | |||
93 | #define WRITE 1 | 93 | #define WRITE 1 |
94 | #define READA 2 /* read-ahead - don't block if no resources */ | 94 | #define READA 2 /* read-ahead - don't block if no resources */ |
95 | #define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */ | 95 | #define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */ |
96 | #define READ_SYNC (READ | (1 << BIO_RW_SYNC)) | 96 | #define READ_SYNC (READ | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) |
97 | #define READ_META (READ | (1 << BIO_RW_META)) | 97 | #define READ_META (READ | (1 << BIO_RW_META)) |
98 | #define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNC)) | 98 | #define WRITE_SYNC (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) |
99 | #define SWRITE_SYNC (SWRITE | (1 << BIO_RW_SYNC)) | 99 | #define SWRITE_SYNC (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) |
100 | #define WRITE_BARRIER (WRITE | (1 << BIO_RW_BARRIER)) | 100 | #define WRITE_BARRIER (WRITE | (1 << BIO_RW_BARRIER)) |
101 | #define DISCARD_NOBARRIER (1 << BIO_RW_DISCARD) | 101 | #define DISCARD_NOBARRIER (1 << BIO_RW_DISCARD) |
102 | #define DISCARD_BARRIER ((1 << BIO_RW_DISCARD) | (1 << BIO_RW_BARRIER)) | 102 | #define DISCARD_BARRIER ((1 << BIO_RW_DISCARD) | (1 << BIO_RW_BARRIER)) |
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 6da14358537c..505f319e489c 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
@@ -60,6 +60,7 @@ static struct block_device *resume_bdev; | |||
60 | static int submit(int rw, pgoff_t page_off, struct page *page, | 60 | static int submit(int rw, pgoff_t page_off, struct page *page, |
61 | struct bio **bio_chain) | 61 | struct bio **bio_chain) |
62 | { | 62 | { |
63 | const int bio_rw = rw | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); | ||
63 | struct bio *bio; | 64 | struct bio *bio; |
64 | 65 | ||
65 | bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); | 66 | bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); |
@@ -80,7 +81,7 @@ static int submit(int rw, pgoff_t page_off, struct page *page, | |||
80 | bio_get(bio); | 81 | bio_get(bio); |
81 | 82 | ||
82 | if (bio_chain == NULL) { | 83 | if (bio_chain == NULL) { |
83 | submit_bio(rw | (1 << BIO_RW_SYNC), bio); | 84 | submit_bio(bio_rw, bio); |
84 | wait_on_page_locked(page); | 85 | wait_on_page_locked(page); |
85 | if (rw == READ) | 86 | if (rw == READ) |
86 | bio_set_pages_dirty(bio); | 87 | bio_set_pages_dirty(bio); |
@@ -90,7 +91,7 @@ static int submit(int rw, pgoff_t page_off, struct page *page, | |||
90 | get_page(page); /* These pages are freed later */ | 91 | get_page(page); /* These pages are freed later */ |
91 | bio->bi_private = *bio_chain; | 92 | bio->bi_private = *bio_chain; |
92 | *bio_chain = bio; | 93 | *bio_chain = bio; |
93 | submit_bio(rw | (1 << BIO_RW_SYNC), bio); | 94 | submit_bio(bio_rw, bio); |
94 | } | 95 | } |
95 | return 0; | 96 | return 0; |
96 | } | 97 | } |
diff --git a/mm/page_io.c b/mm/page_io.c index dc6ce0afbded..3023c475e041 100644 --- a/mm/page_io.c +++ b/mm/page_io.c | |||
@@ -111,7 +111,7 @@ int swap_writepage(struct page *page, struct writeback_control *wbc) | |||
111 | goto out; | 111 | goto out; |
112 | } | 112 | } |
113 | if (wbc->sync_mode == WB_SYNC_ALL) | 113 | if (wbc->sync_mode == WB_SYNC_ALL) |
114 | rw |= (1 << BIO_RW_SYNC); | 114 | rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); |
115 | count_vm_event(PSWPOUT); | 115 | count_vm_event(PSWPOUT); |
116 | set_page_writeback(page); | 116 | set_page_writeback(page); |
117 | unlock_page(page); | 117 | unlock_page(page); |