diff options
author | James Bottomley <jejb@mulgrave.il.steeleye.com> | 2006-11-22 13:06:44 -0500 |
---|---|---|
committer | James Bottomley <jejb@mulgrave.il.steeleye.com> | 2006-11-22 13:06:44 -0500 |
commit | 0bd2af46839ad6262d25714a6ec0365db9d6b98f (patch) | |
tree | dcced72d230d69fd0c5816ac6dd03ab84799a93e /block | |
parent | e138a5d2356729b8752e88520cc1525fae9794ac (diff) | |
parent | f26b90440cd74c78fe10c9bd5160809704a9627c (diff) |
Merge ../scsi-rc-fixes-2.6
Diffstat (limited to 'block')
-rw-r--r-- | block/cfq-iosched.c | 16 | ||||
-rw-r--r-- | block/elevator.c | 17 | ||||
-rw-r--r-- | block/ll_rw_blk.c | 108 | ||||
-rw-r--r-- | block/scsi_ioctl.c | 5 |
4 files changed, 45 insertions, 101 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index d3d76136f53a..1d9c3c70a9a0 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -456,6 +456,9 @@ static void cfq_add_rq_rb(struct request *rq) | |||
456 | */ | 456 | */ |
457 | while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL) | 457 | while ((__alias = elv_rb_add(&cfqq->sort_list, rq)) != NULL) |
458 | cfq_dispatch_insert(cfqd->queue, __alias); | 458 | cfq_dispatch_insert(cfqd->queue, __alias); |
459 | |||
460 | if (!cfq_cfqq_on_rr(cfqq)) | ||
461 | cfq_add_cfqq_rr(cfqd, cfqq); | ||
459 | } | 462 | } |
460 | 463 | ||
461 | static inline void | 464 | static inline void |
@@ -1215,11 +1218,12 @@ static inline void changed_ioprio(struct cfq_io_context *cic) | |||
1215 | { | 1218 | { |
1216 | struct cfq_data *cfqd = cic->key; | 1219 | struct cfq_data *cfqd = cic->key; |
1217 | struct cfq_queue *cfqq; | 1220 | struct cfq_queue *cfqq; |
1221 | unsigned long flags; | ||
1218 | 1222 | ||
1219 | if (unlikely(!cfqd)) | 1223 | if (unlikely(!cfqd)) |
1220 | return; | 1224 | return; |
1221 | 1225 | ||
1222 | spin_lock(cfqd->queue->queue_lock); | 1226 | spin_lock_irqsave(cfqd->queue->queue_lock, flags); |
1223 | 1227 | ||
1224 | cfqq = cic->cfqq[ASYNC]; | 1228 | cfqq = cic->cfqq[ASYNC]; |
1225 | if (cfqq) { | 1229 | if (cfqq) { |
@@ -1236,7 +1240,7 @@ static inline void changed_ioprio(struct cfq_io_context *cic) | |||
1236 | if (cfqq) | 1240 | if (cfqq) |
1237 | cfq_mark_cfqq_prio_changed(cfqq); | 1241 | cfq_mark_cfqq_prio_changed(cfqq); |
1238 | 1242 | ||
1239 | spin_unlock(cfqd->queue->queue_lock); | 1243 | spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); |
1240 | } | 1244 | } |
1241 | 1245 | ||
1242 | static void cfq_ioc_set_ioprio(struct io_context *ioc) | 1246 | static void cfq_ioc_set_ioprio(struct io_context *ioc) |
@@ -1362,6 +1366,7 @@ cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, | |||
1362 | struct rb_node **p; | 1366 | struct rb_node **p; |
1363 | struct rb_node *parent; | 1367 | struct rb_node *parent; |
1364 | struct cfq_io_context *__cic; | 1368 | struct cfq_io_context *__cic; |
1369 | unsigned long flags; | ||
1365 | void *k; | 1370 | void *k; |
1366 | 1371 | ||
1367 | cic->ioc = ioc; | 1372 | cic->ioc = ioc; |
@@ -1391,9 +1396,9 @@ restart: | |||
1391 | rb_link_node(&cic->rb_node, parent, p); | 1396 | rb_link_node(&cic->rb_node, parent, p); |
1392 | rb_insert_color(&cic->rb_node, &ioc->cic_root); | 1397 | rb_insert_color(&cic->rb_node, &ioc->cic_root); |
1393 | 1398 | ||
1394 | spin_lock_irq(cfqd->queue->queue_lock); | 1399 | spin_lock_irqsave(cfqd->queue->queue_lock, flags); |
1395 | list_add(&cic->queue_list, &cfqd->cic_list); | 1400 | list_add(&cic->queue_list, &cfqd->cic_list); |
1396 | spin_unlock_irq(cfqd->queue->queue_lock); | 1401 | spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); |
1397 | } | 1402 | } |
1398 | 1403 | ||
1399 | /* | 1404 | /* |
@@ -1650,9 +1655,6 @@ static void cfq_insert_request(request_queue_t *q, struct request *rq) | |||
1650 | 1655 | ||
1651 | cfq_add_rq_rb(rq); | 1656 | cfq_add_rq_rb(rq); |
1652 | 1657 | ||
1653 | if (!cfq_cfqq_on_rr(cfqq)) | ||
1654 | cfq_add_cfqq_rr(cfqd, cfqq); | ||
1655 | |||
1656 | list_add_tail(&rq->queuelist, &cfqq->fifo); | 1658 | list_add_tail(&rq->queuelist, &cfqq->fifo); |
1657 | 1659 | ||
1658 | cfq_rq_enqueued(cfqd, cfqq, rq); | 1660 | cfq_rq_enqueued(cfqd, cfqq, rq); |
diff --git a/block/elevator.c b/block/elevator.c index 487dd3da8853..8ccd163254b8 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -93,21 +93,18 @@ static inline int elv_try_merge(struct request *__rq, struct bio *bio) | |||
93 | 93 | ||
94 | static struct elevator_type *elevator_find(const char *name) | 94 | static struct elevator_type *elevator_find(const char *name) |
95 | { | 95 | { |
96 | struct elevator_type *e = NULL; | 96 | struct elevator_type *e; |
97 | struct list_head *entry; | 97 | struct list_head *entry; |
98 | 98 | ||
99 | list_for_each(entry, &elv_list) { | 99 | list_for_each(entry, &elv_list) { |
100 | struct elevator_type *__e; | ||
101 | 100 | ||
102 | __e = list_entry(entry, struct elevator_type, list); | 101 | e = list_entry(entry, struct elevator_type, list); |
103 | 102 | ||
104 | if (!strcmp(__e->elevator_name, name)) { | 103 | if (!strcmp(e->elevator_name, name)) |
105 | e = __e; | 104 | return e; |
106 | break; | ||
107 | } | ||
108 | } | 105 | } |
109 | 106 | ||
110 | return e; | 107 | return NULL; |
111 | } | 108 | } |
112 | 109 | ||
113 | static void elevator_put(struct elevator_type *e) | 110 | static void elevator_put(struct elevator_type *e) |
@@ -1088,7 +1085,7 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name) | |||
1088 | struct list_head *entry; | 1085 | struct list_head *entry; |
1089 | int len = 0; | 1086 | int len = 0; |
1090 | 1087 | ||
1091 | spin_lock_irq(q->queue_lock); | 1088 | spin_lock_irq(&elv_list_lock); |
1092 | list_for_each(entry, &elv_list) { | 1089 | list_for_each(entry, &elv_list) { |
1093 | struct elevator_type *__e; | 1090 | struct elevator_type *__e; |
1094 | 1091 | ||
@@ -1098,7 +1095,7 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name) | |||
1098 | else | 1095 | else |
1099 | len += sprintf(name+len, "%s ", __e->elevator_name); | 1096 | len += sprintf(name+len, "%s ", __e->elevator_name); |
1100 | } | 1097 | } |
1101 | spin_unlock_irq(q->queue_lock); | 1098 | spin_unlock_irq(&elv_list_lock); |
1102 | 1099 | ||
1103 | len += sprintf(len+name, "\n"); | 1100 | len += sprintf(len+name, "\n"); |
1104 | return len; | 1101 | return len; |
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index c847e17e5caa..9eaee6640535 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -56,11 +56,6 @@ static kmem_cache_t *requestq_cachep; | |||
56 | */ | 56 | */ |
57 | static kmem_cache_t *iocontext_cachep; | 57 | static kmem_cache_t *iocontext_cachep; |
58 | 58 | ||
59 | static wait_queue_head_t congestion_wqh[2] = { | ||
60 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[0]), | ||
61 | __WAIT_QUEUE_HEAD_INITIALIZER(congestion_wqh[1]) | ||
62 | }; | ||
63 | |||
64 | /* | 59 | /* |
65 | * Controlling structure to kblockd | 60 | * Controlling structure to kblockd |
66 | */ | 61 | */ |
@@ -112,35 +107,6 @@ static void blk_queue_congestion_threshold(struct request_queue *q) | |||
112 | q->nr_congestion_off = nr; | 107 | q->nr_congestion_off = nr; |
113 | } | 108 | } |
114 | 109 | ||
115 | /* | ||
116 | * A queue has just exitted congestion. Note this in the global counter of | ||
117 | * congested queues, and wake up anyone who was waiting for requests to be | ||
118 | * put back. | ||
119 | */ | ||
120 | static void clear_queue_congested(request_queue_t *q, int rw) | ||
121 | { | ||
122 | enum bdi_state bit; | ||
123 | wait_queue_head_t *wqh = &congestion_wqh[rw]; | ||
124 | |||
125 | bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested; | ||
126 | clear_bit(bit, &q->backing_dev_info.state); | ||
127 | smp_mb__after_clear_bit(); | ||
128 | if (waitqueue_active(wqh)) | ||
129 | wake_up(wqh); | ||
130 | } | ||
131 | |||
132 | /* | ||
133 | * A queue has just entered congestion. Flag that in the queue's VM-visible | ||
134 | * state flags and increment the global gounter of congested queues. | ||
135 | */ | ||
136 | static void set_queue_congested(request_queue_t *q, int rw) | ||
137 | { | ||
138 | enum bdi_state bit; | ||
139 | |||
140 | bit = (rw == WRITE) ? BDI_write_congested : BDI_read_congested; | ||
141 | set_bit(bit, &q->backing_dev_info.state); | ||
142 | } | ||
143 | |||
144 | /** | 110 | /** |
145 | * blk_get_backing_dev_info - get the address of a queue's backing_dev_info | 111 | * blk_get_backing_dev_info - get the address of a queue's backing_dev_info |
146 | * @bdev: device | 112 | * @bdev: device |
@@ -159,7 +125,6 @@ struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev) | |||
159 | ret = &q->backing_dev_info; | 125 | ret = &q->backing_dev_info; |
160 | return ret; | 126 | return ret; |
161 | } | 127 | } |
162 | |||
163 | EXPORT_SYMBOL(blk_get_backing_dev_info); | 128 | EXPORT_SYMBOL(blk_get_backing_dev_info); |
164 | 129 | ||
165 | void blk_queue_activity_fn(request_queue_t *q, activity_fn *fn, void *data) | 130 | void blk_queue_activity_fn(request_queue_t *q, activity_fn *fn, void *data) |
@@ -167,7 +132,6 @@ void blk_queue_activity_fn(request_queue_t *q, activity_fn *fn, void *data) | |||
167 | q->activity_fn = fn; | 132 | q->activity_fn = fn; |
168 | q->activity_data = data; | 133 | q->activity_data = data; |
169 | } | 134 | } |
170 | |||
171 | EXPORT_SYMBOL(blk_queue_activity_fn); | 135 | EXPORT_SYMBOL(blk_queue_activity_fn); |
172 | 136 | ||
173 | /** | 137 | /** |
@@ -2067,7 +2031,7 @@ static void __freed_request(request_queue_t *q, int rw) | |||
2067 | struct request_list *rl = &q->rq; | 2031 | struct request_list *rl = &q->rq; |
2068 | 2032 | ||
2069 | if (rl->count[rw] < queue_congestion_off_threshold(q)) | 2033 | if (rl->count[rw] < queue_congestion_off_threshold(q)) |
2070 | clear_queue_congested(q, rw); | 2034 | blk_clear_queue_congested(q, rw); |
2071 | 2035 | ||
2072 | if (rl->count[rw] + 1 <= q->nr_requests) { | 2036 | if (rl->count[rw] + 1 <= q->nr_requests) { |
2073 | if (waitqueue_active(&rl->wait[rw])) | 2037 | if (waitqueue_active(&rl->wait[rw])) |
@@ -2137,7 +2101,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, | |||
2137 | } | 2101 | } |
2138 | } | 2102 | } |
2139 | } | 2103 | } |
2140 | set_queue_congested(q, rw); | 2104 | blk_set_queue_congested(q, rw); |
2141 | } | 2105 | } |
2142 | 2106 | ||
2143 | /* | 2107 | /* |
@@ -2755,41 +2719,6 @@ void blk_end_sync_rq(struct request *rq, int error) | |||
2755 | } | 2719 | } |
2756 | EXPORT_SYMBOL(blk_end_sync_rq); | 2720 | EXPORT_SYMBOL(blk_end_sync_rq); |
2757 | 2721 | ||
2758 | /** | ||
2759 | * blk_congestion_wait - wait for a queue to become uncongested | ||
2760 | * @rw: READ or WRITE | ||
2761 | * @timeout: timeout in jiffies | ||
2762 | * | ||
2763 | * Waits for up to @timeout jiffies for a queue (any queue) to exit congestion. | ||
2764 | * If no queues are congested then just wait for the next request to be | ||
2765 | * returned. | ||
2766 | */ | ||
2767 | long blk_congestion_wait(int rw, long timeout) | ||
2768 | { | ||
2769 | long ret; | ||
2770 | DEFINE_WAIT(wait); | ||
2771 | wait_queue_head_t *wqh = &congestion_wqh[rw]; | ||
2772 | |||
2773 | prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE); | ||
2774 | ret = io_schedule_timeout(timeout); | ||
2775 | finish_wait(wqh, &wait); | ||
2776 | return ret; | ||
2777 | } | ||
2778 | |||
2779 | EXPORT_SYMBOL(blk_congestion_wait); | ||
2780 | |||
2781 | /** | ||
2782 | * blk_congestion_end - wake up sleepers on a congestion queue | ||
2783 | * @rw: READ or WRITE | ||
2784 | */ | ||
2785 | void blk_congestion_end(int rw) | ||
2786 | { | ||
2787 | wait_queue_head_t *wqh = &congestion_wqh[rw]; | ||
2788 | |||
2789 | if (waitqueue_active(wqh)) | ||
2790 | wake_up(wqh); | ||
2791 | } | ||
2792 | |||
2793 | /* | 2722 | /* |
2794 | * Has to be called with the request spinlock acquired | 2723 | * Has to be called with the request spinlock acquired |
2795 | */ | 2724 | */ |
@@ -3070,6 +2999,7 @@ void generic_make_request(struct bio *bio) | |||
3070 | { | 2999 | { |
3071 | request_queue_t *q; | 3000 | request_queue_t *q; |
3072 | sector_t maxsector; | 3001 | sector_t maxsector; |
3002 | sector_t old_sector; | ||
3073 | int ret, nr_sectors = bio_sectors(bio); | 3003 | int ret, nr_sectors = bio_sectors(bio); |
3074 | dev_t old_dev; | 3004 | dev_t old_dev; |
3075 | 3005 | ||
@@ -3098,7 +3028,7 @@ void generic_make_request(struct bio *bio) | |||
3098 | * NOTE: we don't repeat the blk_size check for each new device. | 3028 | * NOTE: we don't repeat the blk_size check for each new device. |
3099 | * Stacking drivers are expected to know what they are doing. | 3029 | * Stacking drivers are expected to know what they are doing. |
3100 | */ | 3030 | */ |
3101 | maxsector = -1; | 3031 | old_sector = -1; |
3102 | old_dev = 0; | 3032 | old_dev = 0; |
3103 | do { | 3033 | do { |
3104 | char b[BDEVNAME_SIZE]; | 3034 | char b[BDEVNAME_SIZE]; |
@@ -3132,15 +3062,31 @@ end_io: | |||
3132 | */ | 3062 | */ |
3133 | blk_partition_remap(bio); | 3063 | blk_partition_remap(bio); |
3134 | 3064 | ||
3135 | if (maxsector != -1) | 3065 | if (old_sector != -1) |
3136 | blk_add_trace_remap(q, bio, old_dev, bio->bi_sector, | 3066 | blk_add_trace_remap(q, bio, old_dev, bio->bi_sector, |
3137 | maxsector); | 3067 | old_sector); |
3138 | 3068 | ||
3139 | blk_add_trace_bio(q, bio, BLK_TA_QUEUE); | 3069 | blk_add_trace_bio(q, bio, BLK_TA_QUEUE); |
3140 | 3070 | ||
3141 | maxsector = bio->bi_sector; | 3071 | old_sector = bio->bi_sector; |
3142 | old_dev = bio->bi_bdev->bd_dev; | 3072 | old_dev = bio->bi_bdev->bd_dev; |
3143 | 3073 | ||
3074 | maxsector = bio->bi_bdev->bd_inode->i_size >> 9; | ||
3075 | if (maxsector) { | ||
3076 | sector_t sector = bio->bi_sector; | ||
3077 | |||
3078 | if (maxsector < nr_sectors || | ||
3079 | maxsector - nr_sectors < sector) { | ||
3080 | /* | ||
3081 | * This may well happen - partitions are not | ||
3082 | * checked to make sure they are within the size | ||
3083 | * of the whole device. | ||
3084 | */ | ||
3085 | handle_bad_sector(bio); | ||
3086 | goto end_io; | ||
3087 | } | ||
3088 | } | ||
3089 | |||
3144 | ret = q->make_request_fn(q, bio); | 3090 | ret = q->make_request_fn(q, bio); |
3145 | } while (ret); | 3091 | } while (ret); |
3146 | } | 3092 | } |
@@ -3765,14 +3711,14 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) | |||
3765 | blk_queue_congestion_threshold(q); | 3711 | blk_queue_congestion_threshold(q); |
3766 | 3712 | ||
3767 | if (rl->count[READ] >= queue_congestion_on_threshold(q)) | 3713 | if (rl->count[READ] >= queue_congestion_on_threshold(q)) |
3768 | set_queue_congested(q, READ); | 3714 | blk_set_queue_congested(q, READ); |
3769 | else if (rl->count[READ] < queue_congestion_off_threshold(q)) | 3715 | else if (rl->count[READ] < queue_congestion_off_threshold(q)) |
3770 | clear_queue_congested(q, READ); | 3716 | blk_clear_queue_congested(q, READ); |
3771 | 3717 | ||
3772 | if (rl->count[WRITE] >= queue_congestion_on_threshold(q)) | 3718 | if (rl->count[WRITE] >= queue_congestion_on_threshold(q)) |
3773 | set_queue_congested(q, WRITE); | 3719 | blk_set_queue_congested(q, WRITE); |
3774 | else if (rl->count[WRITE] < queue_congestion_off_threshold(q)) | 3720 | else if (rl->count[WRITE] < queue_congestion_off_threshold(q)) |
3775 | clear_queue_congested(q, WRITE); | 3721 | blk_clear_queue_congested(q, WRITE); |
3776 | 3722 | ||
3777 | if (rl->count[READ] >= q->nr_requests) { | 3723 | if (rl->count[READ] >= q->nr_requests) { |
3778 | blk_set_queue_full(q, READ); | 3724 | blk_set_queue_full(q, READ); |
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index ac63964b7242..dcd9c71fe8d3 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c | |||
@@ -246,10 +246,10 @@ static int sg_io(struct file *file, request_queue_t *q, | |||
246 | switch (hdr->dxfer_direction) { | 246 | switch (hdr->dxfer_direction) { |
247 | default: | 247 | default: |
248 | return -EINVAL; | 248 | return -EINVAL; |
249 | case SG_DXFER_TO_FROM_DEV: | ||
250 | case SG_DXFER_TO_DEV: | 249 | case SG_DXFER_TO_DEV: |
251 | writing = 1; | 250 | writing = 1; |
252 | break; | 251 | break; |
252 | case SG_DXFER_TO_FROM_DEV: | ||
253 | case SG_DXFER_FROM_DEV: | 253 | case SG_DXFER_FROM_DEV: |
254 | break; | 254 | break; |
255 | } | 255 | } |
@@ -286,9 +286,8 @@ static int sg_io(struct file *file, request_queue_t *q, | |||
286 | * fill in request structure | 286 | * fill in request structure |
287 | */ | 287 | */ |
288 | rq->cmd_len = hdr->cmd_len; | 288 | rq->cmd_len = hdr->cmd_len; |
289 | memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ | ||
289 | memcpy(rq->cmd, cmd, hdr->cmd_len); | 290 | memcpy(rq->cmd, cmd, hdr->cmd_len); |
290 | if (sizeof(rq->cmd) != hdr->cmd_len) | ||
291 | memset(rq->cmd + hdr->cmd_len, 0, sizeof(rq->cmd) - hdr->cmd_len); | ||
292 | 291 | ||
293 | memset(sense, 0, sizeof(sense)); | 292 | memset(sense, 0, sizeof(sense)); |
294 | rq->sense = sense; | 293 | rq->sense = sense; |