diff options
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r-- | block/ll_rw_blk.c | 30 |
1 files changed, 16 insertions, 14 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 7eb36c53f4b7..5813d63c20af 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -10,7 +10,6 @@ | |||
10 | /* | 10 | /* |
11 | * This handles all read/write requests to block devices | 11 | * This handles all read/write requests to block devices |
12 | */ | 12 | */ |
13 | #include <linux/config.h> | ||
14 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
15 | #include <linux/module.h> | 14 | #include <linux/module.h> |
16 | #include <linux/backing-dev.h> | 15 | #include <linux/backing-dev.h> |
@@ -638,7 +637,7 @@ void blk_queue_bounce_limit(request_queue_t *q, u64 dma_addr) | |||
638 | /* Assume anything <= 4GB can be handled by IOMMU. | 637 | /* Assume anything <= 4GB can be handled by IOMMU. |
639 | Actually some IOMMUs can handle everything, but I don't | 638 | Actually some IOMMUs can handle everything, but I don't |
640 | know of a way to test this here. */ | 639 | know of a way to test this here. */ |
641 | if (bounce_pfn < (0xffffffff>>PAGE_SHIFT)) | 640 | if (bounce_pfn < (min_t(u64,0xffffffff,BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) |
642 | dma = 1; | 641 | dma = 1; |
643 | q->bounce_pfn = max_low_pfn; | 642 | q->bounce_pfn = max_low_pfn; |
644 | #else | 643 | #else |
@@ -1663,6 +1662,8 @@ static void blk_unplug_timeout(unsigned long data) | |||
1663 | **/ | 1662 | **/ |
1664 | void blk_start_queue(request_queue_t *q) | 1663 | void blk_start_queue(request_queue_t *q) |
1665 | { | 1664 | { |
1665 | WARN_ON(!irqs_disabled()); | ||
1666 | |||
1666 | clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); | 1667 | clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); |
1667 | 1668 | ||
1668 | /* | 1669 | /* |
@@ -1878,7 +1879,8 @@ EXPORT_SYMBOL(blk_alloc_queue_node); | |||
1878 | * get dealt with eventually. | 1879 | * get dealt with eventually. |
1879 | * | 1880 | * |
1880 | * The queue spin lock must be held while manipulating the requests on the | 1881 | * The queue spin lock must be held while manipulating the requests on the |
1881 | * request queue. | 1882 | * request queue; this lock will be taken also from interrupt context, so irq |
1883 | * disabling is needed for it. | ||
1882 | * | 1884 | * |
1883 | * Function returns a pointer to the initialized request queue, or NULL if | 1885 | * Function returns a pointer to the initialized request queue, or NULL if |
1884 | * it didn't succeed. | 1886 | * it didn't succeed. |
@@ -2742,7 +2744,7 @@ static int attempt_merge(request_queue_t *q, struct request *req, | |||
2742 | return 0; | 2744 | return 0; |
2743 | 2745 | ||
2744 | /* | 2746 | /* |
2745 | * not contigious | 2747 | * not contiguous |
2746 | */ | 2748 | */ |
2747 | if (req->sector + req->nr_sectors != next->sector) | 2749 | if (req->sector + req->nr_sectors != next->sector) |
2748 | return 0; | 2750 | return 0; |
@@ -2824,6 +2826,9 @@ static void init_request_from_bio(struct request *req, struct bio *bio) | |||
2824 | if (unlikely(bio_barrier(bio))) | 2826 | if (unlikely(bio_barrier(bio))) |
2825 | req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE); | 2827 | req->flags |= (REQ_HARDBARRIER | REQ_NOMERGE); |
2826 | 2828 | ||
2829 | if (bio_sync(bio)) | ||
2830 | req->flags |= REQ_RW_SYNC; | ||
2831 | |||
2827 | req->errors = 0; | 2832 | req->errors = 0; |
2828 | req->hard_sector = req->sector = bio->bi_sector; | 2833 | req->hard_sector = req->sector = bio->bi_sector; |
2829 | req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio); | 2834 | req->hard_nr_sectors = req->nr_sectors = bio_sectors(bio); |
@@ -3111,9 +3116,9 @@ void submit_bio(int rw, struct bio *bio) | |||
3111 | BIO_BUG_ON(!bio->bi_io_vec); | 3116 | BIO_BUG_ON(!bio->bi_io_vec); |
3112 | bio->bi_rw |= rw; | 3117 | bio->bi_rw |= rw; |
3113 | if (rw & WRITE) | 3118 | if (rw & WRITE) |
3114 | mod_page_state(pgpgout, count); | 3119 | count_vm_events(PGPGOUT, count); |
3115 | else | 3120 | else |
3116 | mod_page_state(pgpgin, count); | 3121 | count_vm_events(PGPGIN, count); |
3117 | 3122 | ||
3118 | if (unlikely(block_dump)) { | 3123 | if (unlikely(block_dump)) { |
3119 | char b[BDEVNAME_SIZE]; | 3124 | char b[BDEVNAME_SIZE]; |
@@ -3359,12 +3364,11 @@ EXPORT_SYMBOL(end_that_request_chunk); | |||
3359 | */ | 3364 | */ |
3360 | static void blk_done_softirq(struct softirq_action *h) | 3365 | static void blk_done_softirq(struct softirq_action *h) |
3361 | { | 3366 | { |
3362 | struct list_head *cpu_list; | 3367 | struct list_head *cpu_list, local_list; |
3363 | LIST_HEAD(local_list); | ||
3364 | 3368 | ||
3365 | local_irq_disable(); | 3369 | local_irq_disable(); |
3366 | cpu_list = &__get_cpu_var(blk_cpu_done); | 3370 | cpu_list = &__get_cpu_var(blk_cpu_done); |
3367 | list_splice_init(cpu_list, &local_list); | 3371 | list_replace_init(cpu_list, &local_list); |
3368 | local_irq_enable(); | 3372 | local_irq_enable(); |
3369 | 3373 | ||
3370 | while (!list_empty(&local_list)) { | 3374 | while (!list_empty(&local_list)) { |
@@ -3398,7 +3402,7 @@ static int blk_cpu_notify(struct notifier_block *self, unsigned long action, | |||
3398 | } | 3402 | } |
3399 | 3403 | ||
3400 | 3404 | ||
3401 | static struct notifier_block blk_cpu_notifier = { | 3405 | static struct notifier_block __devinitdata blk_cpu_notifier = { |
3402 | .notifier_call = blk_cpu_notify, | 3406 | .notifier_call = blk_cpu_notify, |
3403 | }; | 3407 | }; |
3404 | 3408 | ||
@@ -3410,7 +3414,7 @@ static struct notifier_block blk_cpu_notifier = { | |||
3410 | * | 3414 | * |
3411 | * Description: | 3415 | * Description: |
3412 | * Ends all I/O on a request. It does not handle partial completions, | 3416 | * Ends all I/O on a request. It does not handle partial completions, |
3413 | * unless the driver actually implements this in its completionc callback | 3417 | * unless the driver actually implements this in its completion callback |
3414 | * through requeueing. Theh actual completion happens out-of-order, | 3418 | * through requeueing. Theh actual completion happens out-of-order, |
3415 | * through a softirq handler. The user must have registered a completion | 3419 | * through a softirq handler. The user must have registered a completion |
3416 | * callback through blk_queue_softirq_done(). | 3420 | * callback through blk_queue_softirq_done(). |
@@ -3536,9 +3540,7 @@ int __init blk_dev_init(void) | |||
3536 | INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); | 3540 | INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); |
3537 | 3541 | ||
3538 | open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL); | 3542 | open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL); |
3539 | #ifdef CONFIG_HOTPLUG_CPU | 3543 | register_hotcpu_notifier(&blk_cpu_notifier); |
3540 | register_cpu_notifier(&blk_cpu_notifier); | ||
3541 | #endif | ||
3542 | 3544 | ||
3543 | blk_max_low_pfn = max_low_pfn; | 3545 | blk_max_low_pfn = max_low_pfn; |
3544 | blk_max_pfn = max_pfn; | 3546 | blk_max_pfn = max_pfn; |