diff options
77 files changed, 704 insertions, 903 deletions
diff --git a/Documentation/ABI/testing/sysfs-block b/Documentation/ABI/testing/sysfs-block index d2f90334bb93..4873c759d535 100644 --- a/Documentation/ABI/testing/sysfs-block +++ b/Documentation/ABI/testing/sysfs-block | |||
| @@ -128,3 +128,17 @@ Description: | |||
| 128 | preferred request size for workloads where sustained | 128 | preferred request size for workloads where sustained |
| 129 | throughput is desired. If no optimal I/O size is | 129 | throughput is desired. If no optimal I/O size is |
| 130 | reported this file contains 0. | 130 | reported this file contains 0. |
| 131 | |||
| 132 | What: /sys/block/<disk>/queue/nomerges | ||
| 133 | Date: January 2010 | ||
| 134 | Contact: | ||
| 135 | Description: | ||
| 136 | Standard I/O elevator operations include attempts to | ||
| 137 | merge contiguous I/Os. For known random I/O loads these | ||
| 138 | attempts will always fail and result in extra cycles | ||
| 139 | being spent in the kernel. This allows one to turn off | ||
| 140 | this behavior on one of two ways: When set to 1, complex | ||
| 141 | merge checks are disabled, but the simple one-shot merges | ||
| 142 | with the previous I/O request are enabled. When set to 2, | ||
| 143 | all merge tries are disabled. The default value is 0 - | ||
| 144 | which enables all types of merge tries. | ||
diff --git a/Documentation/block/queue-sysfs.txt b/Documentation/block/queue-sysfs.txt index e164403f60e1..f65274081c8d 100644 --- a/Documentation/block/queue-sysfs.txt +++ b/Documentation/block/queue-sysfs.txt | |||
| @@ -25,11 +25,11 @@ size allowed by the hardware. | |||
| 25 | 25 | ||
| 26 | nomerges (RW) | 26 | nomerges (RW) |
| 27 | ------------- | 27 | ------------- |
| 28 | This enables the user to disable the lookup logic involved with IO merging | 28 | This enables the user to disable the lookup logic involved with IO |
| 29 | requests in the block layer. Merging may still occur through a direct | 29 | merging requests in the block layer. By default (0) all merges are |
| 30 | 1-hit cache, since that comes for (almost) free. The IO scheduler will not | 30 | enabled. When set to 1 only simple one-hit merges will be tried. When |
| 31 | waste cycles doing tree/hash lookups for merges if nomerges is 1. Defaults | 31 | set to 2 no merge algorithms will be tried (including one-hit or more |
| 32 | to 0, enabling all merges. | 32 | complex tree/hash lookups). |
| 33 | 33 | ||
| 34 | nr_requests (RW) | 34 | nr_requests (RW) |
| 35 | ---------------- | 35 | ---------------- |
diff --git a/arch/um/drivers/ubd_kern.c b/arch/um/drivers/ubd_kern.c index 5ff554677f40..c1ff6903b622 100644 --- a/arch/um/drivers/ubd_kern.c +++ b/arch/um/drivers/ubd_kern.c | |||
| @@ -747,7 +747,7 @@ static int ubd_open_dev(struct ubd *ubd_dev) | |||
| 747 | ubd_dev->fd = fd; | 747 | ubd_dev->fd = fd; |
| 748 | 748 | ||
| 749 | if(ubd_dev->cow.file != NULL){ | 749 | if(ubd_dev->cow.file != NULL){ |
| 750 | blk_queue_max_sectors(ubd_dev->queue, 8 * sizeof(long)); | 750 | blk_queue_max_hw_sectors(ubd_dev->queue, 8 * sizeof(long)); |
| 751 | 751 | ||
| 752 | err = -ENOMEM; | 752 | err = -ENOMEM; |
| 753 | ubd_dev->cow.bitmap = vmalloc(ubd_dev->cow.bitmap_len); | 753 | ubd_dev->cow.bitmap = vmalloc(ubd_dev->cow.bitmap_len); |
| @@ -849,7 +849,7 @@ static int ubd_add(int n, char **error_out) | |||
| 849 | } | 849 | } |
| 850 | ubd_dev->queue->queuedata = ubd_dev; | 850 | ubd_dev->queue->queuedata = ubd_dev; |
| 851 | 851 | ||
| 852 | blk_queue_max_hw_segments(ubd_dev->queue, MAX_SG); | 852 | blk_queue_max_segments(ubd_dev->queue, MAX_SG); |
| 853 | err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]); | 853 | err = ubd_disk_register(UBD_MAJOR, ubd_dev->size, n, &ubd_gendisk[n]); |
| 854 | if(err){ | 854 | if(err){ |
| 855 | *error_out = "Failed to register device"; | 855 | *error_out = "Failed to register device"; |
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index e7dbbaf5fb3e..c85d74cae200 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
| @@ -23,20 +23,6 @@ static LIST_HEAD(blkio_list); | |||
| 23 | struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT }; | 23 | struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT }; |
| 24 | EXPORT_SYMBOL_GPL(blkio_root_cgroup); | 24 | EXPORT_SYMBOL_GPL(blkio_root_cgroup); |
| 25 | 25 | ||
| 26 | bool blkiocg_css_tryget(struct blkio_cgroup *blkcg) | ||
| 27 | { | ||
| 28 | if (!css_tryget(&blkcg->css)) | ||
| 29 | return false; | ||
| 30 | return true; | ||
| 31 | } | ||
| 32 | EXPORT_SYMBOL_GPL(blkiocg_css_tryget); | ||
| 33 | |||
| 34 | void blkiocg_css_put(struct blkio_cgroup *blkcg) | ||
| 35 | { | ||
| 36 | css_put(&blkcg->css); | ||
| 37 | } | ||
| 38 | EXPORT_SYMBOL_GPL(blkiocg_css_put); | ||
| 39 | |||
| 40 | struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup) | 26 | struct blkio_cgroup *cgroup_to_blkio_cgroup(struct cgroup *cgroup) |
| 41 | { | 27 | { |
| 42 | return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id), | 28 | return container_of(cgroup_subsys_state(cgroup, blkio_subsys_id), |
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index 4d316df863b4..84bf745fa775 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h | |||
| @@ -43,9 +43,6 @@ struct blkio_group { | |||
| 43 | unsigned long sectors; | 43 | unsigned long sectors; |
| 44 | }; | 44 | }; |
| 45 | 45 | ||
| 46 | extern bool blkiocg_css_tryget(struct blkio_cgroup *blkcg); | ||
| 47 | extern void blkiocg_css_put(struct blkio_cgroup *blkcg); | ||
| 48 | |||
| 49 | typedef void (blkio_unlink_group_fn) (void *key, struct blkio_group *blkg); | 46 | typedef void (blkio_unlink_group_fn) (void *key, struct blkio_group *blkg); |
| 50 | typedef void (blkio_update_group_weight_fn) (struct blkio_group *blkg, | 47 | typedef void (blkio_update_group_weight_fn) (struct blkio_group *blkg, |
| 51 | unsigned int weight); | 48 | unsigned int weight); |
diff --git a/block/blk-core.c b/block/blk-core.c index d1a9a0a64f95..9fe174dc74d1 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
| @@ -1490,9 +1490,9 @@ end_io: | |||
| 1490 | /* | 1490 | /* |
| 1491 | * We only want one ->make_request_fn to be active at a time, | 1491 | * We only want one ->make_request_fn to be active at a time, |
| 1492 | * else stack usage with stacked devices could be a problem. | 1492 | * else stack usage with stacked devices could be a problem. |
| 1493 | * So use current->bio_{list,tail} to keep a list of requests | 1493 | * So use current->bio_list to keep a list of requests |
| 1494 | * submited by a make_request_fn function. | 1494 | * submited by a make_request_fn function. |
| 1495 | * current->bio_tail is also used as a flag to say if | 1495 | * current->bio_list is also used as a flag to say if |
| 1496 | * generic_make_request is currently active in this task or not. | 1496 | * generic_make_request is currently active in this task or not. |
| 1497 | * If it is NULL, then no make_request is active. If it is non-NULL, | 1497 | * If it is NULL, then no make_request is active. If it is non-NULL, |
| 1498 | * then a make_request is active, and new requests should be added | 1498 | * then a make_request is active, and new requests should be added |
| @@ -1500,11 +1500,11 @@ end_io: | |||
| 1500 | */ | 1500 | */ |
| 1501 | void generic_make_request(struct bio *bio) | 1501 | void generic_make_request(struct bio *bio) |
| 1502 | { | 1502 | { |
| 1503 | if (current->bio_tail) { | 1503 | struct bio_list bio_list_on_stack; |
| 1504 | |||
| 1505 | if (current->bio_list) { | ||
| 1504 | /* make_request is active */ | 1506 | /* make_request is active */ |
| 1505 | *(current->bio_tail) = bio; | 1507 | bio_list_add(current->bio_list, bio); |
| 1506 | bio->bi_next = NULL; | ||
| 1507 | current->bio_tail = &bio->bi_next; | ||
| 1508 | return; | 1508 | return; |
| 1509 | } | 1509 | } |
| 1510 | /* following loop may be a bit non-obvious, and so deserves some | 1510 | /* following loop may be a bit non-obvious, and so deserves some |
| @@ -1512,30 +1512,27 @@ void generic_make_request(struct bio *bio) | |||
| 1512 | * Before entering the loop, bio->bi_next is NULL (as all callers | 1512 | * Before entering the loop, bio->bi_next is NULL (as all callers |
| 1513 | * ensure that) so we have a list with a single bio. | 1513 | * ensure that) so we have a list with a single bio. |
| 1514 | * We pretend that we have just taken it off a longer list, so | 1514 | * We pretend that we have just taken it off a longer list, so |
| 1515 | * we assign bio_list to the next (which is NULL) and bio_tail | 1515 | * we assign bio_list to a pointer to the bio_list_on_stack, |
| 1516 | * to &bio_list, thus initialising the bio_list of new bios to be | 1516 | * thus initialising the bio_list of new bios to be |
| 1517 | * added. __generic_make_request may indeed add some more bios | 1517 | * added. __generic_make_request may indeed add some more bios |
| 1518 | * through a recursive call to generic_make_request. If it | 1518 | * through a recursive call to generic_make_request. If it |
| 1519 | * did, we find a non-NULL value in bio_list and re-enter the loop | 1519 | * did, we find a non-NULL value in bio_list and re-enter the loop |
| 1520 | * from the top. In this case we really did just take the bio | 1520 | * from the top. In this case we really did just take the bio |
| 1521 | * of the top of the list (no pretending) and so fixup bio_list and | 1521 | * of the top of the list (no pretending) and so remove it from |
| 1522 | * bio_tail or bi_next, and call into __generic_make_request again. | 1522 | * bio_list, and call into __generic_make_request again. |
| 1523 | * | 1523 | * |
| 1524 | * The loop was structured like this to make only one call to | 1524 | * The loop was structured like this to make only one call to |
| 1525 | * __generic_make_request (which is important as it is large and | 1525 | * __generic_make_request (which is important as it is large and |
| 1526 | * inlined) and to keep the structure simple. | 1526 | * inlined) and to keep the structure simple. |
| 1527 | */ | 1527 | */ |
| 1528 | BUG_ON(bio->bi_next); | 1528 | BUG_ON(bio->bi_next); |
| 1529 | bio_list_init(&bio_list_on_stack); | ||
| 1530 | current->bio_list = &bio_list_on_stack; | ||
| 1529 | do { | 1531 | do { |
| 1530 | current->bio_list = bio->bi_next; | ||
| 1531 | if (bio->bi_next == NULL) | ||
| 1532 | current->bio_tail = ¤t->bio_list; | ||
| 1533 | else | ||
| 1534 | bio->bi_next = NULL; | ||
| 1535 | __generic_make_request(bio); | 1532 | __generic_make_request(bio); |
| 1536 | bio = current->bio_list; | 1533 | bio = bio_list_pop(current->bio_list); |
| 1537 | } while (bio); | 1534 | } while (bio); |
| 1538 | current->bio_tail = NULL; /* deactivate */ | 1535 | current->bio_list = NULL; /* deactivate */ |
| 1539 | } | 1536 | } |
| 1540 | EXPORT_SYMBOL(generic_make_request); | 1537 | EXPORT_SYMBOL(generic_make_request); |
| 1541 | 1538 | ||
| @@ -1617,8 +1614,7 @@ int blk_rq_check_limits(struct request_queue *q, struct request *rq) | |||
| 1617 | * limitation. | 1614 | * limitation. |
| 1618 | */ | 1615 | */ |
| 1619 | blk_recalc_rq_segments(rq); | 1616 | blk_recalc_rq_segments(rq); |
| 1620 | if (rq->nr_phys_segments > queue_max_phys_segments(q) || | 1617 | if (rq->nr_phys_segments > queue_max_segments(q)) { |
| 1621 | rq->nr_phys_segments > queue_max_hw_segments(q)) { | ||
| 1622 | printk(KERN_ERR "%s: over max segments limit.\n", __func__); | 1618 | printk(KERN_ERR "%s: over max segments limit.\n", __func__); |
| 1623 | return -EIO; | 1619 | return -EIO; |
| 1624 | } | 1620 | } |
diff --git a/block/blk-ioc.c b/block/blk-ioc.c index 98e6bf61b0ac..3f65c8aadb2f 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c | |||
| @@ -91,7 +91,7 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node) | |||
| 91 | spin_lock_init(&ret->lock); | 91 | spin_lock_init(&ret->lock); |
| 92 | ret->ioprio_changed = 0; | 92 | ret->ioprio_changed = 0; |
| 93 | ret->ioprio = 0; | 93 | ret->ioprio = 0; |
| 94 | ret->last_waited = jiffies; /* doesn't matter... */ | 94 | ret->last_waited = 0; /* doesn't matter... */ |
| 95 | ret->nr_batch_requests = 0; /* because this is 0 */ | 95 | ret->nr_batch_requests = 0; /* because this is 0 */ |
| 96 | INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH); | 96 | INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH); |
| 97 | INIT_HLIST_HEAD(&ret->cic_list); | 97 | INIT_HLIST_HEAD(&ret->cic_list); |
diff --git a/block/blk-merge.c b/block/blk-merge.c index 99cb5cf1f447..5e7dc9973458 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
| @@ -206,8 +206,7 @@ static inline int ll_new_hw_segment(struct request_queue *q, | |||
| 206 | { | 206 | { |
| 207 | int nr_phys_segs = bio_phys_segments(q, bio); | 207 | int nr_phys_segs = bio_phys_segments(q, bio); |
| 208 | 208 | ||
| 209 | if (req->nr_phys_segments + nr_phys_segs > queue_max_hw_segments(q) || | 209 | if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) { |
| 210 | req->nr_phys_segments + nr_phys_segs > queue_max_phys_segments(q)) { | ||
| 211 | req->cmd_flags |= REQ_NOMERGE; | 210 | req->cmd_flags |= REQ_NOMERGE; |
| 212 | if (req == q->last_merge) | 211 | if (req == q->last_merge) |
| 213 | q->last_merge = NULL; | 212 | q->last_merge = NULL; |
| @@ -300,10 +299,7 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req, | |||
| 300 | total_phys_segments--; | 299 | total_phys_segments--; |
| 301 | } | 300 | } |
| 302 | 301 | ||
| 303 | if (total_phys_segments > queue_max_phys_segments(q)) | 302 | if (total_phys_segments > queue_max_segments(q)) |
| 304 | return 0; | ||
| 305 | |||
| 306 | if (total_phys_segments > queue_max_hw_segments(q)) | ||
| 307 | return 0; | 303 | return 0; |
| 308 | 304 | ||
| 309 | /* Merge is OK... */ | 305 | /* Merge is OK... */ |
diff --git a/block/blk-settings.c b/block/blk-settings.c index 5eeb9e0d256e..31e7a9375c13 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
| @@ -91,10 +91,9 @@ EXPORT_SYMBOL_GPL(blk_queue_lld_busy); | |||
| 91 | */ | 91 | */ |
| 92 | void blk_set_default_limits(struct queue_limits *lim) | 92 | void blk_set_default_limits(struct queue_limits *lim) |
| 93 | { | 93 | { |
| 94 | lim->max_phys_segments = MAX_PHYS_SEGMENTS; | 94 | lim->max_segments = BLK_MAX_SEGMENTS; |
| 95 | lim->max_hw_segments = MAX_HW_SEGMENTS; | ||
| 96 | lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; | 95 | lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; |
| 97 | lim->max_segment_size = MAX_SEGMENT_SIZE; | 96 | lim->max_segment_size = BLK_MAX_SEGMENT_SIZE; |
| 98 | lim->max_sectors = BLK_DEF_MAX_SECTORS; | 97 | lim->max_sectors = BLK_DEF_MAX_SECTORS; |
| 99 | lim->max_hw_sectors = INT_MAX; | 98 | lim->max_hw_sectors = INT_MAX; |
| 100 | lim->max_discard_sectors = 0; | 99 | lim->max_discard_sectors = 0; |
| @@ -154,7 +153,7 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) | |||
| 154 | q->unplug_timer.data = (unsigned long)q; | 153 | q->unplug_timer.data = (unsigned long)q; |
| 155 | 154 | ||
| 156 | blk_set_default_limits(&q->limits); | 155 | blk_set_default_limits(&q->limits); |
| 157 | blk_queue_max_sectors(q, SAFE_MAX_SECTORS); | 156 | blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS); |
| 158 | 157 | ||
| 159 | /* | 158 | /* |
| 160 | * If the caller didn't supply a lock, fall back to our embedded | 159 | * If the caller didn't supply a lock, fall back to our embedded |
| @@ -210,37 +209,32 @@ void blk_queue_bounce_limit(struct request_queue *q, u64 dma_mask) | |||
| 210 | EXPORT_SYMBOL(blk_queue_bounce_limit); | 209 | EXPORT_SYMBOL(blk_queue_bounce_limit); |
| 211 | 210 | ||
| 212 | /** | 211 | /** |
| 213 | * blk_queue_max_sectors - set max sectors for a request for this queue | 212 | * blk_queue_max_hw_sectors - set max sectors for a request for this queue |
| 214 | * @q: the request queue for the device | 213 | * @q: the request queue for the device |
| 215 | * @max_sectors: max sectors in the usual 512b unit | 214 | * @max_hw_sectors: max hardware sectors in the usual 512b unit |
| 216 | * | 215 | * |
| 217 | * Description: | 216 | * Description: |
| 218 | * Enables a low level driver to set an upper limit on the size of | 217 | * Enables a low level driver to set a hard upper limit, |
| 219 | * received requests. | 218 | * max_hw_sectors, on the size of requests. max_hw_sectors is set by |
| 219 | * the device driver based upon the combined capabilities of I/O | ||
| 220 | * controller and storage device. | ||
| 221 | * | ||
| 222 | * max_sectors is a soft limit imposed by the block layer for | ||
| 223 | * filesystem type requests. This value can be overridden on a | ||
| 224 | * per-device basis in /sys/block/<device>/queue/max_sectors_kb. | ||
| 225 | * The soft limit can not exceed max_hw_sectors. | ||
| 220 | **/ | 226 | **/ |
| 221 | void blk_queue_max_sectors(struct request_queue *q, unsigned int max_sectors) | 227 | void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors) |
| 222 | { | 228 | { |
| 223 | if ((max_sectors << 9) < PAGE_CACHE_SIZE) { | 229 | if ((max_hw_sectors << 9) < PAGE_CACHE_SIZE) { |
| 224 | max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); | 230 | max_hw_sectors = 1 << (PAGE_CACHE_SHIFT - 9); |
| 225 | printk(KERN_INFO "%s: set to minimum %d\n", | 231 | printk(KERN_INFO "%s: set to minimum %d\n", |
| 226 | __func__, max_sectors); | 232 | __func__, max_hw_sectors); |
| 227 | } | 233 | } |
| 228 | 234 | ||
| 229 | if (BLK_DEF_MAX_SECTORS > max_sectors) | 235 | q->limits.max_hw_sectors = max_hw_sectors; |
| 230 | q->limits.max_hw_sectors = q->limits.max_sectors = max_sectors; | 236 | q->limits.max_sectors = min_t(unsigned int, max_hw_sectors, |
| 231 | else { | 237 | BLK_DEF_MAX_SECTORS); |
| 232 | q->limits.max_sectors = BLK_DEF_MAX_SECTORS; | ||
| 233 | q->limits.max_hw_sectors = max_sectors; | ||
| 234 | } | ||
| 235 | } | ||
| 236 | EXPORT_SYMBOL(blk_queue_max_sectors); | ||
| 237 | |||
| 238 | void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors) | ||
| 239 | { | ||
| 240 | if (BLK_DEF_MAX_SECTORS > max_sectors) | ||
| 241 | q->limits.max_hw_sectors = BLK_DEF_MAX_SECTORS; | ||
| 242 | else | ||
| 243 | q->limits.max_hw_sectors = max_sectors; | ||
| 244 | } | 238 | } |
| 245 | EXPORT_SYMBOL(blk_queue_max_hw_sectors); | 239 | EXPORT_SYMBOL(blk_queue_max_hw_sectors); |
| 246 | 240 | ||
| @@ -257,17 +251,15 @@ void blk_queue_max_discard_sectors(struct request_queue *q, | |||
| 257 | EXPORT_SYMBOL(blk_queue_max_discard_sectors); | 251 | EXPORT_SYMBOL(blk_queue_max_discard_sectors); |
| 258 | 252 | ||
| 259 | /** | 253 | /** |
| 260 | * blk_queue_max_phys_segments - set max phys segments for a request for this queue | 254 | * blk_queue_max_segments - set max hw segments for a request for this queue |
| 261 | * @q: the request queue for the device | 255 | * @q: the request queue for the device |
| 262 | * @max_segments: max number of segments | 256 | * @max_segments: max number of segments |
| 263 | * | 257 | * |
| 264 | * Description: | 258 | * Description: |
| 265 | * Enables a low level driver to set an upper limit on the number of | 259 | * Enables a low level driver to set an upper limit on the number of |
| 266 | * physical data segments in a request. This would be the largest sized | 260 | * hw data segments in a request. |
| 267 | * scatter list the driver could handle. | ||
| 268 | **/ | 261 | **/ |
| 269 | void blk_queue_max_phys_segments(struct request_queue *q, | 262 | void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments) |
| 270 | unsigned short max_segments) | ||
| 271 | { | 263 | { |
| 272 | if (!max_segments) { | 264 | if (!max_segments) { |
| 273 | max_segments = 1; | 265 | max_segments = 1; |
| @@ -275,33 +267,9 @@ void blk_queue_max_phys_segments(struct request_queue *q, | |||
| 275 | __func__, max_segments); | 267 | __func__, max_segments); |
| 276 | } | 268 | } |
| 277 | 269 | ||
| 278 | q->limits.max_phys_segments = max_segments; | 270 | q->limits.max_segments = max_segments; |
| 279 | } | 271 | } |
| 280 | EXPORT_SYMBOL(blk_queue_max_phys_segments); | 272 | EXPORT_SYMBOL(blk_queue_max_segments); |
| 281 | |||
| 282 | /** | ||
| 283 | * blk_queue_max_hw_segments - set max hw segments for a request for this queue | ||
| 284 | * @q: the request queue for the device | ||
| 285 | * @max_segments: max number of segments | ||
| 286 | * | ||
| 287 | * Description: | ||
| 288 | * Enables a low level driver to set an upper limit on the number of | ||
| 289 | * hw data segments in a request. This would be the largest number of | ||
| 290 | * address/length pairs the host adapter can actually give at once | ||
| 291 | * to the device. | ||
| 292 | **/ | ||
| 293 | void blk_queue_max_hw_segments(struct request_queue *q, | ||
| 294 | unsigned short max_segments) | ||
| 295 | { | ||
| 296 | if (!max_segments) { | ||
| 297 | max_segments = 1; | ||
| 298 | printk(KERN_INFO "%s: set to minimum %d\n", | ||
| 299 | __func__, max_segments); | ||
| 300 | } | ||
| 301 | |||
| 302 | q->limits.max_hw_segments = max_segments; | ||
| 303 | } | ||
| 304 | EXPORT_SYMBOL(blk_queue_max_hw_segments); | ||
| 305 | 273 | ||
| 306 | /** | 274 | /** |
| 307 | * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg | 275 | * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg |
| @@ -507,7 +475,7 @@ static unsigned int lcm(unsigned int a, unsigned int b) | |||
| 507 | * blk_stack_limits - adjust queue_limits for stacked devices | 475 | * blk_stack_limits - adjust queue_limits for stacked devices |
| 508 | * @t: the stacking driver limits (top device) | 476 | * @t: the stacking driver limits (top device) |
| 509 | * @b: the underlying queue limits (bottom, component device) | 477 | * @b: the underlying queue limits (bottom, component device) |
| 510 | * @offset: offset to beginning of data within component device | 478 | * @start: first data sector within component device |
| 511 | * | 479 | * |
| 512 | * Description: | 480 | * Description: |
| 513 | * This function is used by stacking drivers like MD and DM to ensure | 481 | * This function is used by stacking drivers like MD and DM to ensure |
| @@ -525,10 +493,9 @@ static unsigned int lcm(unsigned int a, unsigned int b) | |||
| 525 | * the alignment_offset is undefined. | 493 | * the alignment_offset is undefined. |
| 526 | */ | 494 | */ |
| 527 | int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | 495 | int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, |
| 528 | sector_t offset) | 496 | sector_t start) |
| 529 | { | 497 | { |
| 530 | sector_t alignment; | 498 | unsigned int top, bottom, alignment, ret = 0; |
| 531 | unsigned int top, bottom, ret = 0; | ||
| 532 | 499 | ||
| 533 | t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); | 500 | t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); |
| 534 | t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); | 501 | t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); |
| @@ -537,18 +504,14 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | |||
| 537 | t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, | 504 | t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, |
| 538 | b->seg_boundary_mask); | 505 | b->seg_boundary_mask); |
| 539 | 506 | ||
| 540 | t->max_phys_segments = min_not_zero(t->max_phys_segments, | 507 | t->max_segments = min_not_zero(t->max_segments, b->max_segments); |
| 541 | b->max_phys_segments); | ||
| 542 | |||
| 543 | t->max_hw_segments = min_not_zero(t->max_hw_segments, | ||
| 544 | b->max_hw_segments); | ||
| 545 | 508 | ||
| 546 | t->max_segment_size = min_not_zero(t->max_segment_size, | 509 | t->max_segment_size = min_not_zero(t->max_segment_size, |
| 547 | b->max_segment_size); | 510 | b->max_segment_size); |
| 548 | 511 | ||
| 549 | t->misaligned |= b->misaligned; | 512 | t->misaligned |= b->misaligned; |
| 550 | 513 | ||
| 551 | alignment = queue_limit_alignment_offset(b, offset); | 514 | alignment = queue_limit_alignment_offset(b, start); |
| 552 | 515 | ||
| 553 | /* Bottom device has different alignment. Check that it is | 516 | /* Bottom device has different alignment. Check that it is |
| 554 | * compatible with the current top alignment. | 517 | * compatible with the current top alignment. |
| @@ -611,11 +574,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, | |||
| 611 | 574 | ||
| 612 | /* Discard alignment and granularity */ | 575 | /* Discard alignment and granularity */ |
| 613 | if (b->discard_granularity) { | 576 | if (b->discard_granularity) { |
| 614 | unsigned int granularity = b->discard_granularity; | 577 | alignment = queue_limit_discard_alignment(b, start); |
| 615 | offset &= granularity - 1; | ||
| 616 | |||
| 617 | alignment = (granularity + b->discard_alignment - offset) | ||
| 618 | & (granularity - 1); | ||
| 619 | 578 | ||
| 620 | if (t->discard_granularity != 0 && | 579 | if (t->discard_granularity != 0 && |
| 621 | t->discard_alignment != alignment) { | 580 | t->discard_alignment != alignment) { |
| @@ -657,7 +616,7 @@ int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, | |||
| 657 | 616 | ||
| 658 | start += get_start_sect(bdev); | 617 | start += get_start_sect(bdev); |
| 659 | 618 | ||
| 660 | return blk_stack_limits(t, &bq->limits, start << 9); | 619 | return blk_stack_limits(t, &bq->limits, start); |
| 661 | } | 620 | } |
| 662 | EXPORT_SYMBOL(bdev_stack_limits); | 621 | EXPORT_SYMBOL(bdev_stack_limits); |
| 663 | 622 | ||
| @@ -668,9 +627,8 @@ EXPORT_SYMBOL(bdev_stack_limits); | |||
| 668 | * @offset: offset to beginning of data within component device | 627 | * @offset: offset to beginning of data within component device |
| 669 | * | 628 | * |
| 670 | * Description: | 629 | * Description: |
| 671 | * Merges the limits for two queues. Returns 0 if alignment | 630 | * Merges the limits for a top level gendisk and a bottom level |
| 672 | * didn't change. Returns -1 if adding the bottom device caused | 631 | * block_device. |
| 673 | * misalignment. | ||
| 674 | */ | 632 | */ |
| 675 | void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, | 633 | void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, |
| 676 | sector_t offset) | 634 | sector_t offset) |
| @@ -678,9 +636,7 @@ void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, | |||
| 678 | struct request_queue *t = disk->queue; | 636 | struct request_queue *t = disk->queue; |
| 679 | struct request_queue *b = bdev_get_queue(bdev); | 637 | struct request_queue *b = bdev_get_queue(bdev); |
| 680 | 638 | ||
| 681 | offset += get_start_sect(bdev) << 9; | 639 | if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) { |
| 682 | |||
| 683 | if (blk_stack_limits(&t->limits, &b->limits, offset) < 0) { | ||
| 684 | char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; | 640 | char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; |
| 685 | 641 | ||
| 686 | disk_name(disk, 0, top); | 642 | disk_name(disk, 0, top); |
| @@ -752,22 +708,19 @@ EXPORT_SYMBOL(blk_queue_update_dma_pad); | |||
| 752 | * does is adjust the queue so that the buf is always appended | 708 | * does is adjust the queue so that the buf is always appended |
| 753 | * silently to the scatterlist. | 709 | * silently to the scatterlist. |
| 754 | * | 710 | * |
| 755 | * Note: This routine adjusts max_hw_segments to make room for | 711 | * Note: This routine adjusts max_hw_segments to make room for appending |
| 756 | * appending the drain buffer. If you call | 712 | * the drain buffer. If you call blk_queue_max_segments() after calling |
| 757 | * blk_queue_max_hw_segments() or blk_queue_max_phys_segments() after | 713 | * this routine, you must set the limit to one fewer than your device |
| 758 | * calling this routine, you must set the limit to one fewer than your | 714 | * can support otherwise there won't be room for the drain buffer. |
| 759 | * device can support otherwise there won't be room for the drain | ||
| 760 | * buffer. | ||
| 761 | */ | 715 | */ |
| 762 | int blk_queue_dma_drain(struct request_queue *q, | 716 | int blk_queue_dma_drain(struct request_queue *q, |
| 763 | dma_drain_needed_fn *dma_drain_needed, | 717 | dma_drain_needed_fn *dma_drain_needed, |
| 764 | void *buf, unsigned int size) | 718 | void *buf, unsigned int size) |
| 765 | { | 719 | { |
| 766 | if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2) | 720 | if (queue_max_segments(q) < 2) |
| 767 | return -EINVAL; | 721 | return -EINVAL; |
| 768 | /* make room for appending the drain */ | 722 | /* make room for appending the drain */ |
| 769 | blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1); | 723 | blk_queue_max_segments(q, queue_max_segments(q) - 1); |
| 770 | blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1); | ||
| 771 | q->dma_drain_needed = dma_drain_needed; | 724 | q->dma_drain_needed = dma_drain_needed; |
| 772 | q->dma_drain_buffer = buf; | 725 | q->dma_drain_buffer = buf; |
| 773 | q->dma_drain_size = size; | 726 | q->dma_drain_size = size; |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 8606c9543fdd..e85442415db3 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
| @@ -189,7 +189,8 @@ static ssize_t queue_nonrot_store(struct request_queue *q, const char *page, | |||
| 189 | 189 | ||
| 190 | static ssize_t queue_nomerges_show(struct request_queue *q, char *page) | 190 | static ssize_t queue_nomerges_show(struct request_queue *q, char *page) |
| 191 | { | 191 | { |
| 192 | return queue_var_show(blk_queue_nomerges(q), page); | 192 | return queue_var_show((blk_queue_nomerges(q) << 1) | |
| 193 | blk_queue_noxmerges(q), page); | ||
| 193 | } | 194 | } |
| 194 | 195 | ||
| 195 | static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, | 196 | static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, |
| @@ -199,10 +200,12 @@ static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, | |||
| 199 | ssize_t ret = queue_var_store(&nm, page, count); | 200 | ssize_t ret = queue_var_store(&nm, page, count); |
| 200 | 201 | ||
| 201 | spin_lock_irq(q->queue_lock); | 202 | spin_lock_irq(q->queue_lock); |
| 202 | if (nm) | 203 | queue_flag_clear(QUEUE_FLAG_NOMERGES, q); |
| 204 | queue_flag_clear(QUEUE_FLAG_NOXMERGES, q); | ||
| 205 | if (nm == 2) | ||
| 203 | queue_flag_set(QUEUE_FLAG_NOMERGES, q); | 206 | queue_flag_set(QUEUE_FLAG_NOMERGES, q); |
| 204 | else | 207 | else if (nm) |
| 205 | queue_flag_clear(QUEUE_FLAG_NOMERGES, q); | 208 | queue_flag_set(QUEUE_FLAG_NOXMERGES, q); |
| 206 | spin_unlock_irq(q->queue_lock); | 209 | spin_unlock_irq(q->queue_lock); |
| 207 | 210 | ||
| 208 | return ret; | 211 | return ret; |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 023f4e69a337..dee9d9378fee 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
| @@ -19,7 +19,7 @@ | |||
| 19 | * tunables | 19 | * tunables |
| 20 | */ | 20 | */ |
| 21 | /* max queue in one round of service */ | 21 | /* max queue in one round of service */ |
| 22 | static const int cfq_quantum = 4; | 22 | static const int cfq_quantum = 8; |
| 23 | static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; | 23 | static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; |
| 24 | /* maximum backwards seek, in KiB */ | 24 | /* maximum backwards seek, in KiB */ |
| 25 | static const int cfq_back_max = 16 * 1024; | 25 | static const int cfq_back_max = 16 * 1024; |
| @@ -46,8 +46,9 @@ static const int cfq_hist_divisor = 4; | |||
| 46 | #define CFQ_HW_QUEUE_MIN (5) | 46 | #define CFQ_HW_QUEUE_MIN (5) |
| 47 | #define CFQ_SERVICE_SHIFT 12 | 47 | #define CFQ_SERVICE_SHIFT 12 |
| 48 | 48 | ||
| 49 | #define CFQQ_SEEK_THR 8 * 1024 | 49 | #define CFQQ_SEEK_THR (sector_t)(8 * 100) |
| 50 | #define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR) | 50 | #define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32) |
| 51 | #define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8) | ||
| 51 | 52 | ||
| 52 | #define RQ_CIC(rq) \ | 53 | #define RQ_CIC(rq) \ |
| 53 | ((struct cfq_io_context *) (rq)->elevator_private) | 54 | ((struct cfq_io_context *) (rq)->elevator_private) |
| @@ -77,11 +78,12 @@ struct cfq_rb_root { | |||
| 77 | struct rb_root rb; | 78 | struct rb_root rb; |
| 78 | struct rb_node *left; | 79 | struct rb_node *left; |
| 79 | unsigned count; | 80 | unsigned count; |
| 81 | unsigned total_weight; | ||
| 80 | u64 min_vdisktime; | 82 | u64 min_vdisktime; |
| 81 | struct rb_node *active; | 83 | struct rb_node *active; |
| 82 | unsigned total_weight; | ||
| 83 | }; | 84 | }; |
| 84 | #define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, 0, 0, } | 85 | #define CFQ_RB_ROOT (struct cfq_rb_root) { .rb = RB_ROOT, .left = NULL, \ |
| 86 | .count = 0, .min_vdisktime = 0, } | ||
| 85 | 87 | ||
| 86 | /* | 88 | /* |
| 87 | * Per process-grouping structure | 89 | * Per process-grouping structure |
| @@ -115,11 +117,11 @@ struct cfq_queue { | |||
| 115 | /* time when queue got scheduled in to dispatch first request. */ | 117 | /* time when queue got scheduled in to dispatch first request. */ |
| 116 | unsigned long dispatch_start; | 118 | unsigned long dispatch_start; |
| 117 | unsigned int allocated_slice; | 119 | unsigned int allocated_slice; |
| 120 | unsigned int slice_dispatch; | ||
| 118 | /* time when first request from queue completed and slice started. */ | 121 | /* time when first request from queue completed and slice started. */ |
| 119 | unsigned long slice_start; | 122 | unsigned long slice_start; |
| 120 | unsigned long slice_end; | 123 | unsigned long slice_end; |
| 121 | long slice_resid; | 124 | long slice_resid; |
| 122 | unsigned int slice_dispatch; | ||
| 123 | 125 | ||
| 124 | /* pending metadata requests */ | 126 | /* pending metadata requests */ |
| 125 | int meta_pending; | 127 | int meta_pending; |
| @@ -130,13 +132,11 @@ struct cfq_queue { | |||
| 130 | unsigned short ioprio, org_ioprio; | 132 | unsigned short ioprio, org_ioprio; |
| 131 | unsigned short ioprio_class, org_ioprio_class; | 133 | unsigned short ioprio_class, org_ioprio_class; |
| 132 | 134 | ||
| 133 | unsigned int seek_samples; | ||
| 134 | u64 seek_total; | ||
| 135 | sector_t seek_mean; | ||
| 136 | sector_t last_request_pos; | ||
| 137 | |||
| 138 | pid_t pid; | 135 | pid_t pid; |
| 139 | 136 | ||
| 137 | u32 seek_history; | ||
| 138 | sector_t last_request_pos; | ||
| 139 | |||
| 140 | struct cfq_rb_root *service_tree; | 140 | struct cfq_rb_root *service_tree; |
| 141 | struct cfq_queue *new_cfqq; | 141 | struct cfq_queue *new_cfqq; |
| 142 | struct cfq_group *cfqg; | 142 | struct cfq_group *cfqg; |
| @@ -223,8 +223,8 @@ struct cfq_data { | |||
| 223 | 223 | ||
| 224 | unsigned int busy_queues; | 224 | unsigned int busy_queues; |
| 225 | 225 | ||
| 226 | int rq_in_driver[2]; | 226 | int rq_in_driver; |
| 227 | int sync_flight; | 227 | int rq_in_flight[2]; |
| 228 | 228 | ||
| 229 | /* | 229 | /* |
| 230 | * queue-depth detection | 230 | * queue-depth detection |
| @@ -417,11 +417,6 @@ static struct cfq_queue *cfq_get_queue(struct cfq_data *, bool, | |||
| 417 | static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *, | 417 | static struct cfq_io_context *cfq_cic_lookup(struct cfq_data *, |
| 418 | struct io_context *); | 418 | struct io_context *); |
| 419 | 419 | ||
| 420 | static inline int rq_in_driver(struct cfq_data *cfqd) | ||
| 421 | { | ||
| 422 | return cfqd->rq_in_driver[0] + cfqd->rq_in_driver[1]; | ||
| 423 | } | ||
| 424 | |||
| 425 | static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic, | 420 | static inline struct cfq_queue *cic_to_cfqq(struct cfq_io_context *cic, |
| 426 | bool is_sync) | 421 | bool is_sync) |
| 427 | { | 422 | { |
| @@ -951,10 +946,6 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create) | |||
| 951 | struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info; | 946 | struct backing_dev_info *bdi = &cfqd->queue->backing_dev_info; |
| 952 | unsigned int major, minor; | 947 | unsigned int major, minor; |
| 953 | 948 | ||
| 954 | /* Do we need to take this reference */ | ||
| 955 | if (!blkiocg_css_tryget(blkcg)) | ||
| 956 | return NULL;; | ||
| 957 | |||
| 958 | cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key)); | 949 | cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key)); |
| 959 | if (cfqg || !create) | 950 | if (cfqg || !create) |
| 960 | goto done; | 951 | goto done; |
| @@ -985,7 +976,6 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create) | |||
| 985 | hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list); | 976 | hlist_add_head(&cfqg->cfqd_node, &cfqd->cfqg_list); |
| 986 | 977 | ||
| 987 | done: | 978 | done: |
| 988 | blkiocg_css_put(blkcg); | ||
| 989 | return cfqg; | 979 | return cfqg; |
| 990 | } | 980 | } |
| 991 | 981 | ||
| @@ -1420,9 +1410,9 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq) | |||
| 1420 | { | 1410 | { |
| 1421 | struct cfq_data *cfqd = q->elevator->elevator_data; | 1411 | struct cfq_data *cfqd = q->elevator->elevator_data; |
| 1422 | 1412 | ||
| 1423 | cfqd->rq_in_driver[rq_is_sync(rq)]++; | 1413 | cfqd->rq_in_driver++; |
| 1424 | cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d", | 1414 | cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d", |
| 1425 | rq_in_driver(cfqd)); | 1415 | cfqd->rq_in_driver); |
| 1426 | 1416 | ||
| 1427 | cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); | 1417 | cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); |
| 1428 | } | 1418 | } |
| @@ -1430,12 +1420,11 @@ static void cfq_activate_request(struct request_queue *q, struct request *rq) | |||
| 1430 | static void cfq_deactivate_request(struct request_queue *q, struct request *rq) | 1420 | static void cfq_deactivate_request(struct request_queue *q, struct request *rq) |
| 1431 | { | 1421 | { |
| 1432 | struct cfq_data *cfqd = q->elevator->elevator_data; | 1422 | struct cfq_data *cfqd = q->elevator->elevator_data; |
| 1433 | const int sync = rq_is_sync(rq); | ||
| 1434 | 1423 | ||
| 1435 | WARN_ON(!cfqd->rq_in_driver[sync]); | 1424 | WARN_ON(!cfqd->rq_in_driver); |
| 1436 | cfqd->rq_in_driver[sync]--; | 1425 | cfqd->rq_in_driver--; |
| 1437 | cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d", | 1426 | cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "deactivate rq, drv=%d", |
| 1438 | rq_in_driver(cfqd)); | 1427 | cfqd->rq_in_driver); |
| 1439 | } | 1428 | } |
| 1440 | 1429 | ||
| 1441 | static void cfq_remove_request(struct request *rq) | 1430 | static void cfq_remove_request(struct request *rq) |
| @@ -1673,16 +1662,7 @@ static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, | |||
| 1673 | static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq, | 1662 | static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
| 1674 | struct request *rq, bool for_preempt) | 1663 | struct request *rq, bool for_preempt) |
| 1675 | { | 1664 | { |
| 1676 | sector_t sdist = cfqq->seek_mean; | 1665 | return cfq_dist_from_last(cfqd, rq) <= CFQQ_SEEK_THR; |
| 1677 | |||
| 1678 | if (!sample_valid(cfqq->seek_samples)) | ||
| 1679 | sdist = CFQQ_SEEK_THR; | ||
| 1680 | |||
| 1681 | /* if seek_mean is big, using it as close criteria is meaningless */ | ||
| 1682 | if (sdist > CFQQ_SEEK_THR && !for_preempt) | ||
| 1683 | sdist = CFQQ_SEEK_THR; | ||
| 1684 | |||
| 1685 | return cfq_dist_from_last(cfqd, rq) <= sdist; | ||
| 1686 | } | 1666 | } |
| 1687 | 1667 | ||
| 1688 | static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, | 1668 | static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, |
| @@ -1878,8 +1858,7 @@ static void cfq_dispatch_insert(struct request_queue *q, struct request *rq) | |||
| 1878 | cfqq->dispatched++; | 1858 | cfqq->dispatched++; |
| 1879 | elv_dispatch_sort(q, rq); | 1859 | elv_dispatch_sort(q, rq); |
| 1880 | 1860 | ||
| 1881 | if (cfq_cfqq_sync(cfqq)) | 1861 | cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]++; |
| 1882 | cfqd->sync_flight++; | ||
| 1883 | cfqq->nr_sectors += blk_rq_sectors(rq); | 1862 | cfqq->nr_sectors += blk_rq_sectors(rq); |
| 1884 | } | 1863 | } |
| 1885 | 1864 | ||
| @@ -2219,6 +2198,19 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd) | |||
| 2219 | return dispatched; | 2198 | return dispatched; |
| 2220 | } | 2199 | } |
| 2221 | 2200 | ||
| 2201 | static inline bool cfq_slice_used_soon(struct cfq_data *cfqd, | ||
| 2202 | struct cfq_queue *cfqq) | ||
| 2203 | { | ||
| 2204 | /* the queue hasn't finished any request, can't estimate */ | ||
| 2205 | if (cfq_cfqq_slice_new(cfqq)) | ||
| 2206 | return 1; | ||
| 2207 | if (time_after(jiffies + cfqd->cfq_slice_idle * cfqq->dispatched, | ||
| 2208 | cfqq->slice_end)) | ||
| 2209 | return 1; | ||
| 2210 | |||
| 2211 | return 0; | ||
| 2212 | } | ||
| 2213 | |||
| 2222 | static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) | 2214 | static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) |
| 2223 | { | 2215 | { |
| 2224 | unsigned int max_dispatch; | 2216 | unsigned int max_dispatch; |
| @@ -2226,16 +2218,16 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
| 2226 | /* | 2218 | /* |
| 2227 | * Drain async requests before we start sync IO | 2219 | * Drain async requests before we start sync IO |
| 2228 | */ | 2220 | */ |
| 2229 | if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_driver[BLK_RW_ASYNC]) | 2221 | if (cfq_should_idle(cfqd, cfqq) && cfqd->rq_in_flight[BLK_RW_ASYNC]) |
| 2230 | return false; | 2222 | return false; |
| 2231 | 2223 | ||
| 2232 | /* | 2224 | /* |
| 2233 | * If this is an async queue and we have sync IO in flight, let it wait | 2225 | * If this is an async queue and we have sync IO in flight, let it wait |
| 2234 | */ | 2226 | */ |
| 2235 | if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq)) | 2227 | if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq)) |
| 2236 | return false; | 2228 | return false; |
| 2237 | 2229 | ||
| 2238 | max_dispatch = cfqd->cfq_quantum; | 2230 | max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1); |
| 2239 | if (cfq_class_idle(cfqq)) | 2231 | if (cfq_class_idle(cfqq)) |
| 2240 | max_dispatch = 1; | 2232 | max_dispatch = 1; |
| 2241 | 2233 | ||
| @@ -2252,13 +2244,22 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
| 2252 | /* | 2244 | /* |
| 2253 | * We have other queues, don't allow more IO from this one | 2245 | * We have other queues, don't allow more IO from this one |
| 2254 | */ | 2246 | */ |
| 2255 | if (cfqd->busy_queues > 1) | 2247 | if (cfqd->busy_queues > 1 && cfq_slice_used_soon(cfqd, cfqq)) |
| 2256 | return false; | 2248 | return false; |
| 2257 | 2249 | ||
| 2258 | /* | 2250 | /* |
| 2259 | * Sole queue user, no limit | 2251 | * Sole queue user, no limit |
| 2260 | */ | 2252 | */ |
| 2261 | max_dispatch = -1; | 2253 | if (cfqd->busy_queues == 1) |
| 2254 | max_dispatch = -1; | ||
| 2255 | else | ||
| 2256 | /* | ||
| 2257 | * Normally we start throttling cfqq when cfq_quantum/2 | ||
| 2258 | * requests have been dispatched. But we can drive | ||
| 2259 | * deeper queue depths at the beginning of slice | ||
| 2260 | * subjected to upper limit of cfq_quantum. | ||
| 2261 | * */ | ||
| 2262 | max_dispatch = cfqd->cfq_quantum; | ||
| 2262 | } | 2263 | } |
| 2263 | 2264 | ||
| 2264 | /* | 2265 | /* |
| @@ -2980,30 +2981,20 @@ static void | |||
| 2980 | cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq, | 2981 | cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq, |
| 2981 | struct request *rq) | 2982 | struct request *rq) |
| 2982 | { | 2983 | { |
| 2983 | sector_t sdist; | 2984 | sector_t sdist = 0; |
| 2984 | u64 total; | 2985 | sector_t n_sec = blk_rq_sectors(rq); |
| 2986 | if (cfqq->last_request_pos) { | ||
| 2987 | if (cfqq->last_request_pos < blk_rq_pos(rq)) | ||
| 2988 | sdist = blk_rq_pos(rq) - cfqq->last_request_pos; | ||
| 2989 | else | ||
| 2990 | sdist = cfqq->last_request_pos - blk_rq_pos(rq); | ||
| 2991 | } | ||
| 2985 | 2992 | ||
| 2986 | if (!cfqq->last_request_pos) | 2993 | cfqq->seek_history <<= 1; |
| 2987 | sdist = 0; | 2994 | if (blk_queue_nonrot(cfqd->queue)) |
| 2988 | else if (cfqq->last_request_pos < blk_rq_pos(rq)) | 2995 | cfqq->seek_history |= (n_sec < CFQQ_SECT_THR_NONROT); |
| 2989 | sdist = blk_rq_pos(rq) - cfqq->last_request_pos; | ||
| 2990 | else | 2996 | else |
| 2991 | sdist = cfqq->last_request_pos - blk_rq_pos(rq); | 2997 | cfqq->seek_history |= (sdist > CFQQ_SEEK_THR); |
| 2992 | |||
| 2993 | /* | ||
| 2994 | * Don't allow the seek distance to get too large from the | ||
| 2995 | * odd fragment, pagein, etc | ||
| 2996 | */ | ||
| 2997 | if (cfqq->seek_samples <= 60) /* second&third seek */ | ||
| 2998 | sdist = min(sdist, (cfqq->seek_mean * 4) + 2*1024*1024); | ||
| 2999 | else | ||
| 3000 | sdist = min(sdist, (cfqq->seek_mean * 4) + 2*1024*64); | ||
| 3001 | |||
| 3002 | cfqq->seek_samples = (7*cfqq->seek_samples + 256) / 8; | ||
| 3003 | cfqq->seek_total = (7*cfqq->seek_total + (u64)256*sdist) / 8; | ||
| 3004 | total = cfqq->seek_total + (cfqq->seek_samples/2); | ||
| 3005 | do_div(total, cfqq->seek_samples); | ||
| 3006 | cfqq->seek_mean = (sector_t)total; | ||
| 3007 | } | 2998 | } |
| 3008 | 2999 | ||
| 3009 | /* | 3000 | /* |
| @@ -3028,8 +3019,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
| 3028 | cfq_mark_cfqq_deep(cfqq); | 3019 | cfq_mark_cfqq_deep(cfqq); |
| 3029 | 3020 | ||
| 3030 | if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || | 3021 | if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || |
| 3031 | (!cfq_cfqq_deep(cfqq) && sample_valid(cfqq->seek_samples) | 3022 | (!cfq_cfqq_deep(cfqq) && CFQQ_SEEKY(cfqq))) |
| 3032 | && CFQQ_SEEKY(cfqq))) | ||
| 3033 | enable_idle = 0; | 3023 | enable_idle = 0; |
| 3034 | else if (sample_valid(cic->ttime_samples)) { | 3024 | else if (sample_valid(cic->ttime_samples)) { |
| 3035 | if (cic->ttime_mean > cfqd->cfq_slice_idle) | 3025 | if (cic->ttime_mean > cfqd->cfq_slice_idle) |
| @@ -3215,14 +3205,14 @@ static void cfq_update_hw_tag(struct cfq_data *cfqd) | |||
| 3215 | { | 3205 | { |
| 3216 | struct cfq_queue *cfqq = cfqd->active_queue; | 3206 | struct cfq_queue *cfqq = cfqd->active_queue; |
| 3217 | 3207 | ||
| 3218 | if (rq_in_driver(cfqd) > cfqd->hw_tag_est_depth) | 3208 | if (cfqd->rq_in_driver > cfqd->hw_tag_est_depth) |
| 3219 | cfqd->hw_tag_est_depth = rq_in_driver(cfqd); | 3209 | cfqd->hw_tag_est_depth = cfqd->rq_in_driver; |
| 3220 | 3210 | ||
| 3221 | if (cfqd->hw_tag == 1) | 3211 | if (cfqd->hw_tag == 1) |
| 3222 | return; | 3212 | return; |
| 3223 | 3213 | ||
| 3224 | if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN && | 3214 | if (cfqd->rq_queued <= CFQ_HW_QUEUE_MIN && |
| 3225 | rq_in_driver(cfqd) <= CFQ_HW_QUEUE_MIN) | 3215 | cfqd->rq_in_driver <= CFQ_HW_QUEUE_MIN) |
| 3226 | return; | 3216 | return; |
| 3227 | 3217 | ||
| 3228 | /* | 3218 | /* |
| @@ -3232,7 +3222,7 @@ static void cfq_update_hw_tag(struct cfq_data *cfqd) | |||
| 3232 | */ | 3222 | */ |
| 3233 | if (cfqq && cfq_cfqq_idle_window(cfqq) && | 3223 | if (cfqq && cfq_cfqq_idle_window(cfqq) && |
| 3234 | cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] < | 3224 | cfqq->dispatched + cfqq->queued[0] + cfqq->queued[1] < |
| 3235 | CFQ_HW_QUEUE_MIN && rq_in_driver(cfqd) < CFQ_HW_QUEUE_MIN) | 3225 | CFQ_HW_QUEUE_MIN && cfqd->rq_in_driver < CFQ_HW_QUEUE_MIN) |
| 3236 | return; | 3226 | return; |
| 3237 | 3227 | ||
| 3238 | if (cfqd->hw_tag_samples++ < 50) | 3228 | if (cfqd->hw_tag_samples++ < 50) |
| @@ -3285,13 +3275,12 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
| 3285 | 3275 | ||
| 3286 | cfq_update_hw_tag(cfqd); | 3276 | cfq_update_hw_tag(cfqd); |
| 3287 | 3277 | ||
| 3288 | WARN_ON(!cfqd->rq_in_driver[sync]); | 3278 | WARN_ON(!cfqd->rq_in_driver); |
| 3289 | WARN_ON(!cfqq->dispatched); | 3279 | WARN_ON(!cfqq->dispatched); |
| 3290 | cfqd->rq_in_driver[sync]--; | 3280 | cfqd->rq_in_driver--; |
| 3291 | cfqq->dispatched--; | 3281 | cfqq->dispatched--; |
| 3292 | 3282 | ||
| 3293 | if (cfq_cfqq_sync(cfqq)) | 3283 | cfqd->rq_in_flight[cfq_cfqq_sync(cfqq)]--; |
| 3294 | cfqd->sync_flight--; | ||
| 3295 | 3284 | ||
| 3296 | if (sync) { | 3285 | if (sync) { |
| 3297 | RQ_CIC(rq)->last_end_request = now; | 3286 | RQ_CIC(rq)->last_end_request = now; |
| @@ -3345,7 +3334,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
| 3345 | } | 3334 | } |
| 3346 | } | 3335 | } |
| 3347 | 3336 | ||
| 3348 | if (!rq_in_driver(cfqd)) | 3337 | if (!cfqd->rq_in_driver) |
| 3349 | cfq_schedule_dispatch(cfqd); | 3338 | cfq_schedule_dispatch(cfqd); |
| 3350 | } | 3339 | } |
| 3351 | 3340 | ||
diff --git a/block/elevator.c b/block/elevator.c index 9ad5ccc4c5ee..ee3a883840f2 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
| @@ -474,6 +474,15 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) | |||
| 474 | int ret; | 474 | int ret; |
| 475 | 475 | ||
| 476 | /* | 476 | /* |
| 477 | * Levels of merges: | ||
| 478 | * nomerges: No merges at all attempted | ||
| 479 | * noxmerges: Only simple one-hit cache try | ||
| 480 | * merges: All merge tries attempted | ||
| 481 | */ | ||
| 482 | if (blk_queue_nomerges(q)) | ||
| 483 | return ELEVATOR_NO_MERGE; | ||
| 484 | |||
| 485 | /* | ||
| 477 | * First try one-hit cache. | 486 | * First try one-hit cache. |
| 478 | */ | 487 | */ |
| 479 | if (q->last_merge) { | 488 | if (q->last_merge) { |
| @@ -484,7 +493,7 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) | |||
| 484 | } | 493 | } |
| 485 | } | 494 | } |
| 486 | 495 | ||
| 487 | if (blk_queue_nomerges(q)) | 496 | if (blk_queue_noxmerges(q)) |
| 488 | return ELEVATOR_NO_MERGE; | 497 | return ELEVATOR_NO_MERGE; |
| 489 | 498 | ||
| 490 | /* | 499 | /* |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index d096fbcbc771..bea003a24d27 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
| @@ -1097,7 +1097,7 @@ static int ata_scsi_dev_config(struct scsi_device *sdev, | |||
| 1097 | dev->flags |= ATA_DFLAG_NO_UNLOAD; | 1097 | dev->flags |= ATA_DFLAG_NO_UNLOAD; |
| 1098 | 1098 | ||
| 1099 | /* configure max sectors */ | 1099 | /* configure max sectors */ |
| 1100 | blk_queue_max_sectors(sdev->request_queue, dev->max_sectors); | 1100 | blk_queue_max_hw_sectors(sdev->request_queue, dev->max_sectors); |
| 1101 | 1101 | ||
| 1102 | if (dev->class == ATA_DEV_ATAPI) { | 1102 | if (dev->class == ATA_DEV_ATAPI) { |
| 1103 | struct request_queue *q = sdev->request_queue; | 1103 | struct request_queue *q = sdev->request_queue; |
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c index 0c82d335c55d..684fe04dbbb7 100644 --- a/drivers/ata/sata_nv.c +++ b/drivers/ata/sata_nv.c | |||
| @@ -772,7 +772,7 @@ static int nv_adma_slave_config(struct scsi_device *sdev) | |||
| 772 | } | 772 | } |
| 773 | 773 | ||
| 774 | blk_queue_segment_boundary(sdev->request_queue, segment_boundary); | 774 | blk_queue_segment_boundary(sdev->request_queue, segment_boundary); |
| 775 | blk_queue_max_hw_segments(sdev->request_queue, sg_tablesize); | 775 | blk_queue_max_segments(sdev->request_queue, sg_tablesize); |
| 776 | ata_port_printk(ap, KERN_INFO, | 776 | ata_port_printk(ap, KERN_INFO, |
| 777 | "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n", | 777 | "DMA mask 0x%llX, segment boundary 0x%lX, hw segs %hu\n", |
| 778 | (unsigned long long)*ap->host->dev->dma_mask, | 778 | (unsigned long long)*ap->host->dev->dma_mask, |
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index ce1fa923c414..459f1bc25a7b 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c | |||
| @@ -2534,8 +2534,8 @@ static bool DAC960_RegisterBlockDevice(DAC960_Controller_T *Controller) | |||
| 2534 | blk_queue_bounce_limit(RequestQueue, Controller->BounceBufferLimit); | 2534 | blk_queue_bounce_limit(RequestQueue, Controller->BounceBufferLimit); |
| 2535 | RequestQueue->queuedata = Controller; | 2535 | RequestQueue->queuedata = Controller; |
| 2536 | blk_queue_max_hw_segments(RequestQueue, Controller->DriverScatterGatherLimit); | 2536 | blk_queue_max_hw_segments(RequestQueue, Controller->DriverScatterGatherLimit); |
| 2537 | blk_queue_max_phys_segments(RequestQueue, Controller->DriverScatterGatherLimit); | 2537 | blk_queue_max_segments(RequestQueue, Controller->DriverScatterGatherLimit); |
| 2538 | blk_queue_max_sectors(RequestQueue, Controller->MaxBlocksPerCommand); | 2538 | blk_queue_max_hw_sectors(RequestQueue, Controller->MaxBlocksPerCommand); |
| 2539 | disk->queue = RequestQueue; | 2539 | disk->queue = RequestQueue; |
| 2540 | sprintf(disk->disk_name, "rd/c%dd%d", Controller->ControllerNumber, n); | 2540 | sprintf(disk->disk_name, "rd/c%dd%d", Controller->ControllerNumber, n); |
| 2541 | disk->major = MajorNumber; | 2541 | disk->major = MajorNumber; |
| @@ -7134,7 +7134,7 @@ static struct DAC960_privdata DAC960_P_privdata = { | |||
| 7134 | .MemoryWindowSize = DAC960_PD_RegisterWindowSize, | 7134 | .MemoryWindowSize = DAC960_PD_RegisterWindowSize, |
| 7135 | }; | 7135 | }; |
| 7136 | 7136 | ||
| 7137 | static struct pci_device_id DAC960_id_table[] = { | 7137 | static const struct pci_device_id DAC960_id_table[] = { |
| 7138 | { | 7138 | { |
| 7139 | .vendor = PCI_VENDOR_ID_MYLEX, | 7139 | .vendor = PCI_VENDOR_ID_MYLEX, |
| 7140 | .device = PCI_DEVICE_ID_MYLEX_DAC960_GEM, | 7140 | .device = PCI_DEVICE_ID_MYLEX_DAC960_GEM, |
diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 4f688434daf1..c6ddeacb77fd 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c | |||
| @@ -434,7 +434,7 @@ static struct brd_device *brd_alloc(int i) | |||
| 434 | goto out_free_dev; | 434 | goto out_free_dev; |
| 435 | blk_queue_make_request(brd->brd_queue, brd_make_request); | 435 | blk_queue_make_request(brd->brd_queue, brd_make_request); |
| 436 | blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG, NULL); | 436 | blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG, NULL); |
| 437 | blk_queue_max_sectors(brd->brd_queue, 1024); | 437 | blk_queue_max_hw_sectors(brd->brd_queue, 1024); |
| 438 | blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY); | 438 | blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY); |
| 439 | 439 | ||
| 440 | disk = brd->brd_disk = alloc_disk(1 << part_shift); | 440 | disk = brd->brd_disk = alloc_disk(1 << part_shift); |
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 9291614ac6b7..9e3af307aae1 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
| @@ -257,6 +257,79 @@ static inline void removeQ(CommandList_struct *c) | |||
| 257 | hlist_del_init(&c->list); | 257 | hlist_del_init(&c->list); |
| 258 | } | 258 | } |
| 259 | 259 | ||
| 260 | static void cciss_free_sg_chain_blocks(SGDescriptor_struct **cmd_sg_list, | ||
| 261 | int nr_cmds) | ||
| 262 | { | ||
| 263 | int i; | ||
| 264 | |||
| 265 | if (!cmd_sg_list) | ||
| 266 | return; | ||
| 267 | for (i = 0; i < nr_cmds; i++) { | ||
| 268 | kfree(cmd_sg_list[i]); | ||
| 269 | cmd_sg_list[i] = NULL; | ||
| 270 | } | ||
| 271 | kfree(cmd_sg_list); | ||
| 272 | } | ||
| 273 | |||
| 274 | static SGDescriptor_struct **cciss_allocate_sg_chain_blocks( | ||
| 275 | ctlr_info_t *h, int chainsize, int nr_cmds) | ||
| 276 | { | ||
| 277 | int j; | ||
| 278 | SGDescriptor_struct **cmd_sg_list; | ||
| 279 | |||
| 280 | if (chainsize <= 0) | ||
| 281 | return NULL; | ||
| 282 | |||
| 283 | cmd_sg_list = kmalloc(sizeof(*cmd_sg_list) * nr_cmds, GFP_KERNEL); | ||
| 284 | if (!cmd_sg_list) | ||
| 285 | return NULL; | ||
| 286 | |||
| 287 | /* Build up chain blocks for each command */ | ||
| 288 | for (j = 0; j < nr_cmds; j++) { | ||
| 289 | /* Need a block of chainsized s/g elements. */ | ||
| 290 | cmd_sg_list[j] = kmalloc((chainsize * | ||
| 291 | sizeof(*cmd_sg_list[j])), GFP_KERNEL); | ||
| 292 | if (!cmd_sg_list[j]) { | ||
| 293 | dev_err(&h->pdev->dev, "Cannot get memory " | ||
| 294 | "for s/g chains.\n"); | ||
| 295 | goto clean; | ||
| 296 | } | ||
| 297 | } | ||
| 298 | return cmd_sg_list; | ||
| 299 | clean: | ||
| 300 | cciss_free_sg_chain_blocks(cmd_sg_list, nr_cmds); | ||
| 301 | return NULL; | ||
| 302 | } | ||
| 303 | |||
| 304 | static void cciss_unmap_sg_chain_block(ctlr_info_t *h, CommandList_struct *c) | ||
| 305 | { | ||
| 306 | SGDescriptor_struct *chain_sg; | ||
| 307 | u64bit temp64; | ||
| 308 | |||
| 309 | if (c->Header.SGTotal <= h->max_cmd_sgentries) | ||
| 310 | return; | ||
| 311 | |||
| 312 | chain_sg = &c->SG[h->max_cmd_sgentries - 1]; | ||
| 313 | temp64.val32.lower = chain_sg->Addr.lower; | ||
| 314 | temp64.val32.upper = chain_sg->Addr.upper; | ||
| 315 | pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE); | ||
| 316 | } | ||
| 317 | |||
| 318 | static void cciss_map_sg_chain_block(ctlr_info_t *h, CommandList_struct *c, | ||
| 319 | SGDescriptor_struct *chain_block, int len) | ||
| 320 | { | ||
| 321 | SGDescriptor_struct *chain_sg; | ||
| 322 | u64bit temp64; | ||
| 323 | |||
| 324 | chain_sg = &c->SG[h->max_cmd_sgentries - 1]; | ||
| 325 | chain_sg->Ext = CCISS_SG_CHAIN; | ||
| 326 | chain_sg->Len = len; | ||
| 327 | temp64.val = pci_map_single(h->pdev, chain_block, len, | ||
| 328 | PCI_DMA_TODEVICE); | ||
| 329 | chain_sg->Addr.lower = temp64.val32.lower; | ||
| 330 | chain_sg->Addr.upper = temp64.val32.upper; | ||
| 331 | } | ||
| 332 | |||
| 260 | #include "cciss_scsi.c" /* For SCSI tape support */ | 333 | #include "cciss_scsi.c" /* For SCSI tape support */ |
| 261 | 334 | ||
| 262 | static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", | 335 | static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", |
| @@ -1344,26 +1417,27 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode, | |||
| 1344 | kfree(buff); | 1417 | kfree(buff); |
| 1345 | return -ENOMEM; | 1418 | return -ENOMEM; |
| 1346 | } | 1419 | } |
| 1347 | // Fill in the command type | 1420 | /* Fill in the command type */ |
| 1348 | c->cmd_type = CMD_IOCTL_PEND; | 1421 | c->cmd_type = CMD_IOCTL_PEND; |
| 1349 | // Fill in Command Header | 1422 | /* Fill in Command Header */ |
| 1350 | c->Header.ReplyQueue = 0; // unused in simple mode | 1423 | c->Header.ReplyQueue = 0; /* unused in simple mode */ |
| 1351 | if (iocommand.buf_size > 0) // buffer to fill | 1424 | if (iocommand.buf_size > 0) /* buffer to fill */ |
| 1352 | { | 1425 | { |
| 1353 | c->Header.SGList = 1; | 1426 | c->Header.SGList = 1; |
| 1354 | c->Header.SGTotal = 1; | 1427 | c->Header.SGTotal = 1; |
| 1355 | } else // no buffers to fill | 1428 | } else /* no buffers to fill */ |
| 1356 | { | 1429 | { |
| 1357 | c->Header.SGList = 0; | 1430 | c->Header.SGList = 0; |
| 1358 | c->Header.SGTotal = 0; | 1431 | c->Header.SGTotal = 0; |
| 1359 | } | 1432 | } |
| 1360 | c->Header.LUN = iocommand.LUN_info; | 1433 | c->Header.LUN = iocommand.LUN_info; |
| 1361 | c->Header.Tag.lower = c->busaddr; // use the kernel address the cmd block for tag | 1434 | /* use the kernel address the cmd block for tag */ |
| 1435 | c->Header.Tag.lower = c->busaddr; | ||
| 1362 | 1436 | ||
| 1363 | // Fill in Request block | 1437 | /* Fill in Request block */ |
| 1364 | c->Request = iocommand.Request; | 1438 | c->Request = iocommand.Request; |
| 1365 | 1439 | ||
| 1366 | // Fill in the scatter gather information | 1440 | /* Fill in the scatter gather information */ |
| 1367 | if (iocommand.buf_size > 0) { | 1441 | if (iocommand.buf_size > 0) { |
| 1368 | temp64.val = pci_map_single(host->pdev, buff, | 1442 | temp64.val = pci_map_single(host->pdev, buff, |
| 1369 | iocommand.buf_size, | 1443 | iocommand.buf_size, |
| @@ -1371,7 +1445,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode, | |||
| 1371 | c->SG[0].Addr.lower = temp64.val32.lower; | 1445 | c->SG[0].Addr.lower = temp64.val32.lower; |
| 1372 | c->SG[0].Addr.upper = temp64.val32.upper; | 1446 | c->SG[0].Addr.upper = temp64.val32.upper; |
| 1373 | c->SG[0].Len = iocommand.buf_size; | 1447 | c->SG[0].Len = iocommand.buf_size; |
| 1374 | c->SG[0].Ext = 0; // we are not chaining | 1448 | c->SG[0].Ext = 0; /* we are not chaining */ |
| 1375 | } | 1449 | } |
| 1376 | c->waiting = &wait; | 1450 | c->waiting = &wait; |
| 1377 | 1451 | ||
| @@ -1670,14 +1744,9 @@ static void cciss_softirq_done(struct request *rq) | |||
| 1670 | /* unmap the DMA mapping for all the scatter gather elements */ | 1744 | /* unmap the DMA mapping for all the scatter gather elements */ |
| 1671 | for (i = 0; i < cmd->Header.SGList; i++) { | 1745 | for (i = 0; i < cmd->Header.SGList; i++) { |
| 1672 | if (curr_sg[sg_index].Ext == CCISS_SG_CHAIN) { | 1746 | if (curr_sg[sg_index].Ext == CCISS_SG_CHAIN) { |
| 1673 | temp64.val32.lower = cmd->SG[i].Addr.lower; | 1747 | cciss_unmap_sg_chain_block(h, cmd); |
| 1674 | temp64.val32.upper = cmd->SG[i].Addr.upper; | ||
| 1675 | pci_dma_sync_single_for_cpu(h->pdev, temp64.val, | ||
| 1676 | cmd->SG[i].Len, ddir); | ||
| 1677 | pci_unmap_single(h->pdev, temp64.val, | ||
| 1678 | cmd->SG[i].Len, ddir); | ||
| 1679 | /* Point to the next block */ | 1748 | /* Point to the next block */ |
| 1680 | curr_sg = h->cmd_sg_list[cmd->cmdindex]->sgchain; | 1749 | curr_sg = h->cmd_sg_list[cmd->cmdindex]; |
| 1681 | sg_index = 0; | 1750 | sg_index = 0; |
| 1682 | } | 1751 | } |
| 1683 | temp64.val32.lower = curr_sg[sg_index].Addr.lower; | 1752 | temp64.val32.lower = curr_sg[sg_index].Addr.lower; |
| @@ -1796,12 +1865,9 @@ static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, | |||
| 1796 | blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask); | 1865 | blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask); |
| 1797 | 1866 | ||
| 1798 | /* This is a hardware imposed limit. */ | 1867 | /* This is a hardware imposed limit. */ |
| 1799 | blk_queue_max_hw_segments(disk->queue, h->maxsgentries); | 1868 | blk_queue_max_segments(disk->queue, h->maxsgentries); |
| 1800 | |||
| 1801 | /* This is a limit in the driver and could be eliminated. */ | ||
| 1802 | blk_queue_max_phys_segments(disk->queue, h->maxsgentries); | ||
| 1803 | 1869 | ||
| 1804 | blk_queue_max_sectors(disk->queue, h->cciss_max_sectors); | 1870 | blk_queue_max_hw_sectors(disk->queue, h->cciss_max_sectors); |
| 1805 | 1871 | ||
| 1806 | blk_queue_softirq_done(disk->queue, cciss_softirq_done); | 1872 | blk_queue_softirq_done(disk->queue, cciss_softirq_done); |
| 1807 | 1873 | ||
| @@ -2425,7 +2491,7 @@ static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, | |||
| 2425 | c->Request.Type.Direction = XFER_READ; | 2491 | c->Request.Type.Direction = XFER_READ; |
| 2426 | c->Request.Timeout = 0; | 2492 | c->Request.Timeout = 0; |
| 2427 | c->Request.CDB[0] = cmd; | 2493 | c->Request.CDB[0] = cmd; |
| 2428 | c->Request.CDB[6] = (size >> 24) & 0xFF; //MSB | 2494 | c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ |
| 2429 | c->Request.CDB[7] = (size >> 16) & 0xFF; | 2495 | c->Request.CDB[7] = (size >> 16) & 0xFF; |
| 2430 | c->Request.CDB[8] = (size >> 8) & 0xFF; | 2496 | c->Request.CDB[8] = (size >> 8) & 0xFF; |
| 2431 | c->Request.CDB[9] = size & 0xFF; | 2497 | c->Request.CDB[9] = size & 0xFF; |
| @@ -2694,7 +2760,7 @@ static void cciss_geometry_inquiry(int ctlr, int logvol, | |||
| 2694 | "cciss: reading geometry failed, volume " | 2760 | "cciss: reading geometry failed, volume " |
| 2695 | "does not support reading geometry\n"); | 2761 | "does not support reading geometry\n"); |
| 2696 | drv->heads = 255; | 2762 | drv->heads = 255; |
| 2697 | drv->sectors = 32; // Sectors per track | 2763 | drv->sectors = 32; /* Sectors per track */ |
| 2698 | drv->cylinders = total_size + 1; | 2764 | drv->cylinders = total_size + 1; |
| 2699 | drv->raid_level = RAID_UNKNOWN; | 2765 | drv->raid_level = RAID_UNKNOWN; |
| 2700 | } else { | 2766 | } else { |
| @@ -3082,7 +3148,6 @@ static void do_cciss_request(struct request_queue *q) | |||
| 3082 | SGDescriptor_struct *curr_sg; | 3148 | SGDescriptor_struct *curr_sg; |
| 3083 | drive_info_struct *drv; | 3149 | drive_info_struct *drv; |
| 3084 | int i, dir; | 3150 | int i, dir; |
| 3085 | int nseg = 0; | ||
| 3086 | int sg_index = 0; | 3151 | int sg_index = 0; |
| 3087 | int chained = 0; | 3152 | int chained = 0; |
| 3088 | 3153 | ||
| @@ -3112,19 +3177,19 @@ static void do_cciss_request(struct request_queue *q) | |||
| 3112 | 3177 | ||
| 3113 | /* fill in the request */ | 3178 | /* fill in the request */ |
| 3114 | drv = creq->rq_disk->private_data; | 3179 | drv = creq->rq_disk->private_data; |
| 3115 | c->Header.ReplyQueue = 0; // unused in simple mode | 3180 | c->Header.ReplyQueue = 0; /* unused in simple mode */ |
| 3116 | /* got command from pool, so use the command block index instead */ | 3181 | /* got command from pool, so use the command block index instead */ |
| 3117 | /* for direct lookups. */ | 3182 | /* for direct lookups. */ |
| 3118 | /* The first 2 bits are reserved for controller error reporting. */ | 3183 | /* The first 2 bits are reserved for controller error reporting. */ |
| 3119 | c->Header.Tag.lower = (c->cmdindex << 3); | 3184 | c->Header.Tag.lower = (c->cmdindex << 3); |
| 3120 | c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */ | 3185 | c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */ |
| 3121 | memcpy(&c->Header.LUN, drv->LunID, sizeof(drv->LunID)); | 3186 | memcpy(&c->Header.LUN, drv->LunID, sizeof(drv->LunID)); |
| 3122 | c->Request.CDBLen = 10; // 12 byte commands not in FW yet; | 3187 | c->Request.CDBLen = 10; /* 12 byte commands not in FW yet; */ |
| 3123 | c->Request.Type.Type = TYPE_CMD; // It is a command. | 3188 | c->Request.Type.Type = TYPE_CMD; /* It is a command. */ |
| 3124 | c->Request.Type.Attribute = ATTR_SIMPLE; | 3189 | c->Request.Type.Attribute = ATTR_SIMPLE; |
| 3125 | c->Request.Type.Direction = | 3190 | c->Request.Type.Direction = |
| 3126 | (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE; | 3191 | (rq_data_dir(creq) == READ) ? XFER_READ : XFER_WRITE; |
| 3127 | c->Request.Timeout = 0; // Don't time out | 3192 | c->Request.Timeout = 0; /* Don't time out */ |
| 3128 | c->Request.CDB[0] = | 3193 | c->Request.CDB[0] = |
| 3129 | (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write; | 3194 | (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write; |
| 3130 | start_blk = blk_rq_pos(creq); | 3195 | start_blk = blk_rq_pos(creq); |
| @@ -3149,13 +3214,8 @@ static void do_cciss_request(struct request_queue *q) | |||
| 3149 | for (i = 0; i < seg; i++) { | 3214 | for (i = 0; i < seg; i++) { |
| 3150 | if (((sg_index+1) == (h->max_cmd_sgentries)) && | 3215 | if (((sg_index+1) == (h->max_cmd_sgentries)) && |
| 3151 | !chained && ((seg - i) > 1)) { | 3216 | !chained && ((seg - i) > 1)) { |
| 3152 | nseg = seg - i; | ||
| 3153 | curr_sg[sg_index].Len = (nseg) * | ||
| 3154 | sizeof(SGDescriptor_struct); | ||
| 3155 | curr_sg[sg_index].Ext = CCISS_SG_CHAIN; | ||
| 3156 | |||
| 3157 | /* Point to next chain block. */ | 3217 | /* Point to next chain block. */ |
| 3158 | curr_sg = h->cmd_sg_list[c->cmdindex]->sgchain; | 3218 | curr_sg = h->cmd_sg_list[c->cmdindex]; |
| 3159 | sg_index = 0; | 3219 | sg_index = 0; |
| 3160 | chained = 1; | 3220 | chained = 1; |
| 3161 | } | 3221 | } |
| @@ -3166,31 +3226,12 @@ static void do_cciss_request(struct request_queue *q) | |||
| 3166 | curr_sg[sg_index].Addr.lower = temp64.val32.lower; | 3226 | curr_sg[sg_index].Addr.lower = temp64.val32.lower; |
| 3167 | curr_sg[sg_index].Addr.upper = temp64.val32.upper; | 3227 | curr_sg[sg_index].Addr.upper = temp64.val32.upper; |
| 3168 | curr_sg[sg_index].Ext = 0; /* we are not chaining */ | 3228 | curr_sg[sg_index].Ext = 0; /* we are not chaining */ |
| 3169 | |||
| 3170 | ++sg_index; | 3229 | ++sg_index; |
| 3171 | } | 3230 | } |
| 3172 | 3231 | if (chained) | |
| 3173 | if (chained) { | 3232 | cciss_map_sg_chain_block(h, c, h->cmd_sg_list[c->cmdindex], |
| 3174 | int len; | 3233 | (seg - (h->max_cmd_sgentries - 1)) * |
| 3175 | curr_sg = c->SG; | 3234 | sizeof(SGDescriptor_struct)); |
| 3176 | sg_index = h->max_cmd_sgentries - 1; | ||
| 3177 | len = curr_sg[sg_index].Len; | ||
| 3178 | /* Setup pointer to next chain block. | ||
| 3179 | * Fill out last element in current chain | ||
| 3180 | * block with address of next chain block. | ||
| 3181 | */ | ||
| 3182 | temp64.val = pci_map_single(h->pdev, | ||
| 3183 | h->cmd_sg_list[c->cmdindex]->sgchain, | ||
| 3184 | len, dir); | ||
| 3185 | |||
| 3186 | h->cmd_sg_list[c->cmdindex]->sg_chain_dma = temp64.val; | ||
| 3187 | curr_sg[sg_index].Addr.lower = temp64.val32.lower; | ||
| 3188 | curr_sg[sg_index].Addr.upper = temp64.val32.upper; | ||
| 3189 | |||
| 3190 | pci_dma_sync_single_for_device(h->pdev, | ||
| 3191 | h->cmd_sg_list[c->cmdindex]->sg_chain_dma, | ||
| 3192 | len, dir); | ||
| 3193 | } | ||
| 3194 | 3235 | ||
| 3195 | /* track how many SG entries we are using */ | 3236 | /* track how many SG entries we are using */ |
| 3196 | if (seg > h->maxSG) | 3237 | if (seg > h->maxSG) |
| @@ -3209,11 +3250,11 @@ static void do_cciss_request(struct request_queue *q) | |||
| 3209 | if (likely(blk_fs_request(creq))) { | 3250 | if (likely(blk_fs_request(creq))) { |
| 3210 | if(h->cciss_read == CCISS_READ_10) { | 3251 | if(h->cciss_read == CCISS_READ_10) { |
| 3211 | c->Request.CDB[1] = 0; | 3252 | c->Request.CDB[1] = 0; |
| 3212 | c->Request.CDB[2] = (start_blk >> 24) & 0xff; //MSB | 3253 | c->Request.CDB[2] = (start_blk >> 24) & 0xff; /* MSB */ |
| 3213 | c->Request.CDB[3] = (start_blk >> 16) & 0xff; | 3254 | c->Request.CDB[3] = (start_blk >> 16) & 0xff; |
| 3214 | c->Request.CDB[4] = (start_blk >> 8) & 0xff; | 3255 | c->Request.CDB[4] = (start_blk >> 8) & 0xff; |
| 3215 | c->Request.CDB[5] = start_blk & 0xff; | 3256 | c->Request.CDB[5] = start_blk & 0xff; |
| 3216 | c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB | 3257 | c->Request.CDB[6] = 0; /* (sect >> 24) & 0xff; MSB */ |
| 3217 | c->Request.CDB[7] = (blk_rq_sectors(creq) >> 8) & 0xff; | 3258 | c->Request.CDB[7] = (blk_rq_sectors(creq) >> 8) & 0xff; |
| 3218 | c->Request.CDB[8] = blk_rq_sectors(creq) & 0xff; | 3259 | c->Request.CDB[8] = blk_rq_sectors(creq) & 0xff; |
| 3219 | c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0; | 3260 | c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0; |
| @@ -3222,7 +3263,7 @@ static void do_cciss_request(struct request_queue *q) | |||
| 3222 | 3263 | ||
| 3223 | c->Request.CDBLen = 16; | 3264 | c->Request.CDBLen = 16; |
| 3224 | c->Request.CDB[1]= 0; | 3265 | c->Request.CDB[1]= 0; |
| 3225 | c->Request.CDB[2]= (upper32 >> 24) & 0xff; //MSB | 3266 | c->Request.CDB[2]= (upper32 >> 24) & 0xff; /* MSB */ |
| 3226 | c->Request.CDB[3]= (upper32 >> 16) & 0xff; | 3267 | c->Request.CDB[3]= (upper32 >> 16) & 0xff; |
| 3227 | c->Request.CDB[4]= (upper32 >> 8) & 0xff; | 3268 | c->Request.CDB[4]= (upper32 >> 8) & 0xff; |
| 3228 | c->Request.CDB[5]= upper32 & 0xff; | 3269 | c->Request.CDB[5]= upper32 & 0xff; |
| @@ -4240,37 +4281,10 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
| 4240 | goto clean4; | 4281 | goto clean4; |
| 4241 | } | 4282 | } |
| 4242 | } | 4283 | } |
| 4243 | hba[i]->cmd_sg_list = kmalloc(sizeof(struct Cmd_sg_list *) * | 4284 | hba[i]->cmd_sg_list = cciss_allocate_sg_chain_blocks(hba[i], |
| 4244 | hba[i]->nr_cmds, | 4285 | hba[i]->chainsize, hba[i]->nr_cmds); |
| 4245 | GFP_KERNEL); | 4286 | if (!hba[i]->cmd_sg_list && hba[i]->chainsize > 0) |
| 4246 | if (!hba[i]->cmd_sg_list) { | ||
| 4247 | printk(KERN_ERR "cciss%d: Cannot get memory for " | ||
| 4248 | "s/g chaining.\n", i); | ||
| 4249 | goto clean4; | 4287 | goto clean4; |
| 4250 | } | ||
| 4251 | /* Build up chain blocks for each command */ | ||
| 4252 | if (hba[i]->chainsize > 0) { | ||
| 4253 | for (j = 0; j < hba[i]->nr_cmds; j++) { | ||
| 4254 | hba[i]->cmd_sg_list[j] = | ||
| 4255 | kmalloc(sizeof(struct Cmd_sg_list), | ||
| 4256 | GFP_KERNEL); | ||
| 4257 | if (!hba[i]->cmd_sg_list[j]) { | ||
| 4258 | printk(KERN_ERR "cciss%d: Cannot get memory " | ||
| 4259 | "for chain block.\n", i); | ||
| 4260 | goto clean4; | ||
| 4261 | } | ||
| 4262 | /* Need a block of chainsized s/g elements. */ | ||
| 4263 | hba[i]->cmd_sg_list[j]->sgchain = | ||
| 4264 | kmalloc((hba[i]->chainsize * | ||
| 4265 | sizeof(SGDescriptor_struct)), | ||
| 4266 | GFP_KERNEL); | ||
| 4267 | if (!hba[i]->cmd_sg_list[j]->sgchain) { | ||
| 4268 | printk(KERN_ERR "cciss%d: Cannot get memory " | ||
| 4269 | "for s/g chains\n", i); | ||
| 4270 | goto clean4; | ||
| 4271 | } | ||
| 4272 | } | ||
| 4273 | } | ||
| 4274 | 4288 | ||
| 4275 | spin_lock_init(&hba[i]->lock); | 4289 | spin_lock_init(&hba[i]->lock); |
| 4276 | 4290 | ||
| @@ -4329,16 +4343,7 @@ clean4: | |||
| 4329 | for (k = 0; k < hba[i]->nr_cmds; k++) | 4343 | for (k = 0; k < hba[i]->nr_cmds; k++) |
| 4330 | kfree(hba[i]->scatter_list[k]); | 4344 | kfree(hba[i]->scatter_list[k]); |
| 4331 | kfree(hba[i]->scatter_list); | 4345 | kfree(hba[i]->scatter_list); |
| 4332 | /* Only free up extra s/g lists if controller supports them */ | 4346 | cciss_free_sg_chain_blocks(hba[i]->cmd_sg_list, hba[i]->nr_cmds); |
| 4333 | if (hba[i]->chainsize > 0) { | ||
| 4334 | for (j = 0; j < hba[i]->nr_cmds; j++) { | ||
| 4335 | if (hba[i]->cmd_sg_list[j]) { | ||
| 4336 | kfree(hba[i]->cmd_sg_list[j]->sgchain); | ||
| 4337 | kfree(hba[i]->cmd_sg_list[j]); | ||
| 4338 | } | ||
| 4339 | } | ||
| 4340 | kfree(hba[i]->cmd_sg_list); | ||
| 4341 | } | ||
| 4342 | if (hba[i]->cmd_pool) | 4347 | if (hba[i]->cmd_pool) |
| 4343 | pci_free_consistent(hba[i]->pdev, | 4348 | pci_free_consistent(hba[i]->pdev, |
| 4344 | hba[i]->nr_cmds * sizeof(CommandList_struct), | 4349 | hba[i]->nr_cmds * sizeof(CommandList_struct), |
| @@ -4456,16 +4461,7 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev) | |||
| 4456 | for (j = 0; j < hba[i]->nr_cmds; j++) | 4461 | for (j = 0; j < hba[i]->nr_cmds; j++) |
| 4457 | kfree(hba[i]->scatter_list[j]); | 4462 | kfree(hba[i]->scatter_list[j]); |
| 4458 | kfree(hba[i]->scatter_list); | 4463 | kfree(hba[i]->scatter_list); |
| 4459 | /* Only free up extra s/g lists if controller supports them */ | 4464 | cciss_free_sg_chain_blocks(hba[i]->cmd_sg_list, hba[i]->nr_cmds); |
| 4460 | if (hba[i]->chainsize > 0) { | ||
| 4461 | for (j = 0; j < hba[i]->nr_cmds; j++) { | ||
| 4462 | if (hba[i]->cmd_sg_list[j]) { | ||
| 4463 | kfree(hba[i]->cmd_sg_list[j]->sgchain); | ||
| 4464 | kfree(hba[i]->cmd_sg_list[j]); | ||
| 4465 | } | ||
| 4466 | } | ||
| 4467 | kfree(hba[i]->cmd_sg_list); | ||
| 4468 | } | ||
| 4469 | /* | 4465 | /* |
| 4470 | * Deliberately omit pci_disable_device(): it does something nasty to | 4466 | * Deliberately omit pci_disable_device(): it does something nasty to |
| 4471 | * Smart Array controllers that pci_enable_device does not undo | 4467 | * Smart Array controllers that pci_enable_device does not undo |
| @@ -4498,7 +4494,7 @@ static int __init cciss_init(void) | |||
| 4498 | * boundary. Given that we use pci_alloc_consistent() to allocate an | 4494 | * boundary. Given that we use pci_alloc_consistent() to allocate an |
| 4499 | * array of them, the size must be a multiple of 8 bytes. | 4495 | * array of them, the size must be a multiple of 8 bytes. |
| 4500 | */ | 4496 | */ |
| 4501 | BUILD_BUG_ON(sizeof(CommandList_struct) % 8); | 4497 | BUILD_BUG_ON(sizeof(CommandList_struct) % COMMANDLIST_ALIGNMENT); |
| 4502 | 4498 | ||
| 4503 | printk(KERN_INFO DRIVER_NAME "\n"); | 4499 | printk(KERN_INFO DRIVER_NAME "\n"); |
| 4504 | 4500 | ||
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h index 1d95db254069..c5d411174db0 100644 --- a/drivers/block/cciss.h +++ b/drivers/block/cciss.h | |||
| @@ -55,18 +55,12 @@ typedef struct _drive_info_struct | |||
| 55 | char device_initialized; /* indicates whether dev is initialized */ | 55 | char device_initialized; /* indicates whether dev is initialized */ |
| 56 | } drive_info_struct; | 56 | } drive_info_struct; |
| 57 | 57 | ||
| 58 | struct Cmd_sg_list { | ||
| 59 | SGDescriptor_struct *sgchain; | ||
| 60 | dma_addr_t sg_chain_dma; | ||
| 61 | int chain_block_size; | ||
| 62 | }; | ||
| 63 | |||
| 64 | struct ctlr_info | 58 | struct ctlr_info |
| 65 | { | 59 | { |
| 66 | int ctlr; | 60 | int ctlr; |
| 67 | char devname[8]; | 61 | char devname[8]; |
| 68 | char *product_name; | 62 | char *product_name; |
| 69 | char firm_ver[4]; // Firmware version | 63 | char firm_ver[4]; /* Firmware version */ |
| 70 | struct pci_dev *pdev; | 64 | struct pci_dev *pdev; |
| 71 | __u32 board_id; | 65 | __u32 board_id; |
| 72 | void __iomem *vaddr; | 66 | void __iomem *vaddr; |
| @@ -89,7 +83,7 @@ struct ctlr_info | |||
| 89 | int maxsgentries; | 83 | int maxsgentries; |
| 90 | int chainsize; | 84 | int chainsize; |
| 91 | int max_cmd_sgentries; | 85 | int max_cmd_sgentries; |
| 92 | struct Cmd_sg_list **cmd_sg_list; | 86 | SGDescriptor_struct **cmd_sg_list; |
| 93 | 87 | ||
| 94 | # define DOORBELL_INT 0 | 88 | # define DOORBELL_INT 0 |
| 95 | # define PERF_MODE_INT 1 | 89 | # define PERF_MODE_INT 1 |
| @@ -103,7 +97,7 @@ struct ctlr_info | |||
| 103 | BYTE cciss_write; | 97 | BYTE cciss_write; |
| 104 | BYTE cciss_read_capacity; | 98 | BYTE cciss_read_capacity; |
| 105 | 99 | ||
| 106 | // information about each logical volume | 100 | /* information about each logical volume */ |
| 107 | drive_info_struct *drv[CISS_MAX_LUN]; | 101 | drive_info_struct *drv[CISS_MAX_LUN]; |
| 108 | 102 | ||
| 109 | struct access_method access; | 103 | struct access_method access; |
| @@ -116,7 +110,7 @@ struct ctlr_info | |||
| 116 | unsigned int maxSG; | 110 | unsigned int maxSG; |
| 117 | spinlock_t lock; | 111 | spinlock_t lock; |
| 118 | 112 | ||
| 119 | //* pointers to command and error info pool */ | 113 | /* pointers to command and error info pool */ |
| 120 | CommandList_struct *cmd_pool; | 114 | CommandList_struct *cmd_pool; |
| 121 | dma_addr_t cmd_pool_dhandle; | 115 | dma_addr_t cmd_pool_dhandle; |
| 122 | ErrorInfo_struct *errinfo_pool; | 116 | ErrorInfo_struct *errinfo_pool; |
| @@ -134,12 +128,10 @@ struct ctlr_info | |||
| 134 | */ | 128 | */ |
| 135 | int next_to_run; | 129 | int next_to_run; |
| 136 | 130 | ||
| 137 | // Disk structures we need to pass back | 131 | /* Disk structures we need to pass back */ |
| 138 | struct gendisk *gendisk[CISS_MAX_LUN]; | 132 | struct gendisk *gendisk[CISS_MAX_LUN]; |
| 139 | #ifdef CONFIG_CISS_SCSI_TAPE | 133 | #ifdef CONFIG_CISS_SCSI_TAPE |
| 140 | void *scsi_ctlr; /* ptr to structure containing scsi related stuff */ | 134 | struct cciss_scsi_adapter_data_t *scsi_ctlr; |
| 141 | /* list of block side commands the scsi error handling sucked up */ | ||
| 142 | /* and saved for later processing */ | ||
| 143 | #endif | 135 | #endif |
| 144 | unsigned char alive; | 136 | unsigned char alive; |
| 145 | struct list_head scan_list; | 137 | struct list_head scan_list; |
| @@ -315,4 +307,3 @@ struct board_type { | |||
| 315 | #define CCISS_LOCK(i) (&hba[i]->lock) | 307 | #define CCISS_LOCK(i) (&hba[i]->lock) |
| 316 | 308 | ||
| 317 | #endif /* CCISS_H */ | 309 | #endif /* CCISS_H */ |
| 318 | |||
diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h index 6afa700890ff..e624ff959cb6 100644 --- a/drivers/block/cciss_cmd.h +++ b/drivers/block/cciss_cmd.h | |||
| @@ -1,31 +1,16 @@ | |||
| 1 | #ifndef CCISS_CMD_H | 1 | #ifndef CCISS_CMD_H |
| 2 | #define CCISS_CMD_H | 2 | #define CCISS_CMD_H |
| 3 | //########################################################################### | 3 | |
| 4 | //DEFINES | 4 | #include <linux/cciss_defs.h> |
| 5 | //########################################################################### | 5 | |
| 6 | /* DEFINES */ | ||
| 6 | #define CISS_VERSION "1.00" | 7 | #define CISS_VERSION "1.00" |
| 7 | 8 | ||
| 8 | //general boundary definitions | 9 | /* general boundary definitions */ |
| 9 | #define SENSEINFOBYTES 32//note that this value may vary between host implementations | ||
| 10 | #define MAXSGENTRIES 32 | 10 | #define MAXSGENTRIES 32 |
| 11 | #define CCISS_SG_CHAIN 0x80000000 | 11 | #define CCISS_SG_CHAIN 0x80000000 |
| 12 | #define MAXREPLYQS 256 | 12 | #define MAXREPLYQS 256 |
| 13 | 13 | ||
| 14 | //Command Status value | ||
| 15 | #define CMD_SUCCESS 0x0000 | ||
| 16 | #define CMD_TARGET_STATUS 0x0001 | ||
| 17 | #define CMD_DATA_UNDERRUN 0x0002 | ||
| 18 | #define CMD_DATA_OVERRUN 0x0003 | ||
| 19 | #define CMD_INVALID 0x0004 | ||
| 20 | #define CMD_PROTOCOL_ERR 0x0005 | ||
| 21 | #define CMD_HARDWARE_ERR 0x0006 | ||
| 22 | #define CMD_CONNECTION_LOST 0x0007 | ||
| 23 | #define CMD_ABORTED 0x0008 | ||
| 24 | #define CMD_ABORT_FAILED 0x0009 | ||
| 25 | #define CMD_UNSOLICITED_ABORT 0x000A | ||
| 26 | #define CMD_TIMEOUT 0x000B | ||
| 27 | #define CMD_UNABORTABLE 0x000C | ||
| 28 | |||
| 29 | /* Unit Attentions ASC's as defined for the MSA2012sa */ | 14 | /* Unit Attentions ASC's as defined for the MSA2012sa */ |
| 30 | #define POWER_OR_RESET 0x29 | 15 | #define POWER_OR_RESET 0x29 |
| 31 | #define STATE_CHANGED 0x2a | 16 | #define STATE_CHANGED 0x2a |
| @@ -49,30 +34,13 @@ | |||
| 49 | #define ASYM_ACCESS_CHANGED 0x06 | 34 | #define ASYM_ACCESS_CHANGED 0x06 |
| 50 | #define LUN_CAPACITY_CHANGED 0x09 | 35 | #define LUN_CAPACITY_CHANGED 0x09 |
| 51 | 36 | ||
| 52 | //transfer direction | 37 | /* config space register offsets */ |
| 53 | #define XFER_NONE 0x00 | ||
| 54 | #define XFER_WRITE 0x01 | ||
| 55 | #define XFER_READ 0x02 | ||
| 56 | #define XFER_RSVD 0x03 | ||
| 57 | |||
| 58 | //task attribute | ||
| 59 | #define ATTR_UNTAGGED 0x00 | ||
| 60 | #define ATTR_SIMPLE 0x04 | ||
| 61 | #define ATTR_HEADOFQUEUE 0x05 | ||
| 62 | #define ATTR_ORDERED 0x06 | ||
| 63 | #define ATTR_ACA 0x07 | ||
| 64 | |||
| 65 | //cdb type | ||
| 66 | #define TYPE_CMD 0x00 | ||
| 67 | #define TYPE_MSG 0x01 | ||
| 68 | |||
| 69 | //config space register offsets | ||
| 70 | #define CFG_VENDORID 0x00 | 38 | #define CFG_VENDORID 0x00 |
| 71 | #define CFG_DEVICEID 0x02 | 39 | #define CFG_DEVICEID 0x02 |
| 72 | #define CFG_I2OBAR 0x10 | 40 | #define CFG_I2OBAR 0x10 |
| 73 | #define CFG_MEM1BAR 0x14 | 41 | #define CFG_MEM1BAR 0x14 |
| 74 | 42 | ||
| 75 | //i2o space register offsets | 43 | /* i2o space register offsets */ |
| 76 | #define I2O_IBDB_SET 0x20 | 44 | #define I2O_IBDB_SET 0x20 |
| 77 | #define I2O_IBDB_CLEAR 0x70 | 45 | #define I2O_IBDB_CLEAR 0x70 |
| 78 | #define I2O_INT_STATUS 0x30 | 46 | #define I2O_INT_STATUS 0x30 |
| @@ -81,7 +49,7 @@ | |||
| 81 | #define I2O_OBPOST_Q 0x44 | 49 | #define I2O_OBPOST_Q 0x44 |
| 82 | #define I2O_DMA1_CFG 0x214 | 50 | #define I2O_DMA1_CFG 0x214 |
| 83 | 51 | ||
| 84 | //Configuration Table | 52 | /* Configuration Table */ |
| 85 | #define CFGTBL_ChangeReq 0x00000001l | 53 | #define CFGTBL_ChangeReq 0x00000001l |
| 86 | #define CFGTBL_AccCmds 0x00000001l | 54 | #define CFGTBL_AccCmds 0x00000001l |
| 87 | 55 | ||
| @@ -103,24 +71,17 @@ typedef union _u64bit | |||
| 103 | __u64 val; | 71 | __u64 val; |
| 104 | } u64bit; | 72 | } u64bit; |
| 105 | 73 | ||
| 106 | // Type defs used in the following structs | 74 | /* Type defs used in the following structs */ |
| 107 | #define BYTE __u8 | ||
| 108 | #define WORD __u16 | ||
| 109 | #define HWORD __u16 | ||
| 110 | #define DWORD __u32 | ||
| 111 | #define QWORD vals32 | 75 | #define QWORD vals32 |
| 112 | 76 | ||
| 113 | //########################################################################### | 77 | /* STRUCTURES */ |
| 114 | //STRUCTURES | ||
| 115 | //########################################################################### | ||
| 116 | #define CISS_MAX_LUN 1024 | ||
| 117 | #define CISS_MAX_PHYS_LUN 1024 | 78 | #define CISS_MAX_PHYS_LUN 1024 |
| 118 | // SCSI-3 Cmmands | 79 | /* SCSI-3 Cmmands */ |
| 119 | 80 | ||
| 120 | #pragma pack(1) | 81 | #pragma pack(1) |
| 121 | 82 | ||
| 122 | #define CISS_INQUIRY 0x12 | 83 | #define CISS_INQUIRY 0x12 |
| 123 | //Date returned | 84 | /* Date returned */ |
| 124 | typedef struct _InquiryData_struct | 85 | typedef struct _InquiryData_struct |
| 125 | { | 86 | { |
| 126 | BYTE data_byte[36]; | 87 | BYTE data_byte[36]; |
| @@ -128,7 +89,7 @@ typedef struct _InquiryData_struct | |||
| 128 | 89 | ||
| 129 | #define CISS_REPORT_LOG 0xc2 /* Report Logical LUNs */ | 90 | #define CISS_REPORT_LOG 0xc2 /* Report Logical LUNs */ |
| 130 | #define CISS_REPORT_PHYS 0xc3 /* Report Physical LUNs */ | 91 | #define CISS_REPORT_PHYS 0xc3 /* Report Physical LUNs */ |
| 131 | // Data returned | 92 | /* Data returned */ |
| 132 | typedef struct _ReportLUNdata_struct | 93 | typedef struct _ReportLUNdata_struct |
| 133 | { | 94 | { |
| 134 | BYTE LUNListLength[4]; | 95 | BYTE LUNListLength[4]; |
| @@ -139,8 +100,8 @@ typedef struct _ReportLUNdata_struct | |||
| 139 | #define CCISS_READ_CAPACITY 0x25 /* Read Capacity */ | 100 | #define CCISS_READ_CAPACITY 0x25 /* Read Capacity */ |
| 140 | typedef struct _ReadCapdata_struct | 101 | typedef struct _ReadCapdata_struct |
| 141 | { | 102 | { |
| 142 | BYTE total_size[4]; // Total size in blocks | 103 | BYTE total_size[4]; /* Total size in blocks */ |
| 143 | BYTE block_size[4]; // Size of blocks in bytes | 104 | BYTE block_size[4]; /* Size of blocks in bytes */ |
| 144 | } ReadCapdata_struct; | 105 | } ReadCapdata_struct; |
| 145 | 106 | ||
| 146 | #define CCISS_READ_CAPACITY_16 0x9e /* Read Capacity 16 */ | 107 | #define CCISS_READ_CAPACITY_16 0x9e /* Read Capacity 16 */ |
| @@ -172,52 +133,13 @@ typedef struct _ReadCapdata_struct_16 | |||
| 172 | #define CDB_LEN10 10 | 133 | #define CDB_LEN10 10 |
| 173 | #define CDB_LEN16 16 | 134 | #define CDB_LEN16 16 |
| 174 | 135 | ||
| 175 | // BMIC commands | 136 | /* BMIC commands */ |
| 176 | #define BMIC_READ 0x26 | 137 | #define BMIC_READ 0x26 |
| 177 | #define BMIC_WRITE 0x27 | 138 | #define BMIC_WRITE 0x27 |
| 178 | #define BMIC_CACHE_FLUSH 0xc2 | 139 | #define BMIC_CACHE_FLUSH 0xc2 |
| 179 | #define CCISS_CACHE_FLUSH 0x01 //C2 was already being used by CCISS | 140 | #define CCISS_CACHE_FLUSH 0x01 /* C2 was already being used by CCISS */ |
| 180 | |||
| 181 | //Command List Structure | ||
| 182 | typedef union _SCSI3Addr_struct { | ||
| 183 | struct { | ||
| 184 | BYTE Dev; | ||
| 185 | BYTE Bus:6; | ||
| 186 | BYTE Mode:2; // b00 | ||
| 187 | } PeripDev; | ||
| 188 | struct { | ||
| 189 | BYTE DevLSB; | ||
| 190 | BYTE DevMSB:6; | ||
| 191 | BYTE Mode:2; // b01 | ||
| 192 | } LogDev; | ||
| 193 | struct { | ||
| 194 | BYTE Dev:5; | ||
| 195 | BYTE Bus:3; | ||
| 196 | BYTE Targ:6; | ||
| 197 | BYTE Mode:2; // b10 | ||
| 198 | } LogUnit; | ||
| 199 | } SCSI3Addr_struct; | ||
| 200 | |||
| 201 | typedef struct _PhysDevAddr_struct { | ||
| 202 | DWORD TargetId:24; | ||
| 203 | DWORD Bus:6; | ||
| 204 | DWORD Mode:2; | ||
| 205 | SCSI3Addr_struct Target[2]; //2 level target device addr | ||
| 206 | } PhysDevAddr_struct; | ||
| 207 | |||
| 208 | typedef struct _LogDevAddr_struct { | ||
| 209 | DWORD VolId:30; | ||
| 210 | DWORD Mode:2; | ||
| 211 | BYTE reserved[4]; | ||
| 212 | } LogDevAddr_struct; | ||
| 213 | |||
| 214 | typedef union _LUNAddr_struct { | ||
| 215 | BYTE LunAddrBytes[8]; | ||
| 216 | SCSI3Addr_struct SCSI3Lun[4]; | ||
| 217 | PhysDevAddr_struct PhysDev; | ||
| 218 | LogDevAddr_struct LogDev; | ||
| 219 | } LUNAddr_struct; | ||
| 220 | 141 | ||
| 142 | /* Command List Structure */ | ||
| 221 | #define CTLR_LUNID "\0\0\0\0\0\0\0\0" | 143 | #define CTLR_LUNID "\0\0\0\0\0\0\0\0" |
| 222 | 144 | ||
| 223 | typedef struct _CommandListHeader_struct { | 145 | typedef struct _CommandListHeader_struct { |
| @@ -227,16 +149,6 @@ typedef struct _CommandListHeader_struct { | |||
| 227 | QWORD Tag; | 149 | QWORD Tag; |
| 228 | LUNAddr_struct LUN; | 150 | LUNAddr_struct LUN; |
| 229 | } CommandListHeader_struct; | 151 | } CommandListHeader_struct; |
| 230 | typedef struct _RequestBlock_struct { | ||
| 231 | BYTE CDBLen; | ||
| 232 | struct { | ||
| 233 | BYTE Type:3; | ||
| 234 | BYTE Attribute:3; | ||
| 235 | BYTE Direction:2; | ||
| 236 | } Type; | ||
| 237 | HWORD Timeout; | ||
| 238 | BYTE CDB[16]; | ||
| 239 | } RequestBlock_struct; | ||
| 240 | typedef struct _ErrDescriptor_struct { | 152 | typedef struct _ErrDescriptor_struct { |
| 241 | QWORD Addr; | 153 | QWORD Addr; |
| 242 | DWORD Len; | 154 | DWORD Len; |
| @@ -247,28 +159,6 @@ typedef struct _SGDescriptor_struct { | |||
| 247 | DWORD Ext; | 159 | DWORD Ext; |
| 248 | } SGDescriptor_struct; | 160 | } SGDescriptor_struct; |
| 249 | 161 | ||
| 250 | typedef union _MoreErrInfo_struct{ | ||
| 251 | struct { | ||
| 252 | BYTE Reserved[3]; | ||
| 253 | BYTE Type; | ||
| 254 | DWORD ErrorInfo; | ||
| 255 | }Common_Info; | ||
| 256 | struct{ | ||
| 257 | BYTE Reserved[2]; | ||
| 258 | BYTE offense_size;//size of offending entry | ||
| 259 | BYTE offense_num; //byte # of offense 0-base | ||
| 260 | DWORD offense_value; | ||
| 261 | }Invalid_Cmd; | ||
| 262 | }MoreErrInfo_struct; | ||
| 263 | typedef struct _ErrorInfo_struct { | ||
| 264 | BYTE ScsiStatus; | ||
| 265 | BYTE SenseLen; | ||
| 266 | HWORD CommandStatus; | ||
| 267 | DWORD ResidualCnt; | ||
| 268 | MoreErrInfo_struct MoreErrInfo; | ||
| 269 | BYTE SenseInfo[SENSEINFOBYTES]; | ||
| 270 | } ErrorInfo_struct; | ||
| 271 | |||
| 272 | /* Command types */ | 162 | /* Command types */ |
| 273 | #define CMD_RWREQ 0x00 | 163 | #define CMD_RWREQ 0x00 |
| 274 | #define CMD_IOCTL_PEND 0x01 | 164 | #define CMD_IOCTL_PEND 0x01 |
| @@ -277,10 +167,18 @@ typedef struct _ErrorInfo_struct { | |||
| 277 | #define CMD_MSG_TIMEOUT 0x05 | 167 | #define CMD_MSG_TIMEOUT 0x05 |
| 278 | #define CMD_MSG_STALE 0xff | 168 | #define CMD_MSG_STALE 0xff |
| 279 | 169 | ||
| 280 | /* This structure needs to be divisible by 8 for new | 170 | /* This structure needs to be divisible by COMMANDLIST_ALIGNMENT |
| 281 | * indexing method. | 171 | * because low bits of the address are used to to indicate that |
| 172 | * whether the tag contains an index or an address. PAD_32 and | ||
| 173 | * PAD_64 can be adjusted independently as needed for 32-bit | ||
| 174 | * and 64-bits systems. | ||
| 282 | */ | 175 | */ |
| 283 | #define PADSIZE (sizeof(long) - 4) | 176 | #define COMMANDLIST_ALIGNMENT (8) |
| 177 | #define IS_64_BIT ((sizeof(long) - 4)/4) | ||
| 178 | #define IS_32_BIT (!IS_64_BIT) | ||
| 179 | #define PAD_32 (0) | ||
| 180 | #define PAD_64 (4) | ||
| 181 | #define PADSIZE (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64) | ||
| 284 | typedef struct _CommandList_struct { | 182 | typedef struct _CommandList_struct { |
| 285 | CommandListHeader_struct Header; | 183 | CommandListHeader_struct Header; |
| 286 | RequestBlock_struct Request; | 184 | RequestBlock_struct Request; |
| @@ -300,7 +198,7 @@ typedef struct _CommandList_struct { | |||
| 300 | char pad[PADSIZE]; | 198 | char pad[PADSIZE]; |
| 301 | } CommandList_struct; | 199 | } CommandList_struct; |
| 302 | 200 | ||
| 303 | //Configuration Table Structure | 201 | /* Configuration Table Structure */ |
| 304 | typedef struct _HostWrite_struct { | 202 | typedef struct _HostWrite_struct { |
| 305 | DWORD TransportRequest; | 203 | DWORD TransportRequest; |
| 306 | DWORD Reserved; | 204 | DWORD Reserved; |
| @@ -326,4 +224,4 @@ typedef struct _CfgTable_struct { | |||
| 326 | DWORD MaxPhysicalDrivesPerLogicalUnit; | 224 | DWORD MaxPhysicalDrivesPerLogicalUnit; |
| 327 | } CfgTable_struct; | 225 | } CfgTable_struct; |
| 328 | #pragma pack() | 226 | #pragma pack() |
| 329 | #endif // CCISS_CMD_H | 227 | #endif /* CCISS_CMD_H */ |
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c index 5d0e46dc3632..e1d0e2cfec72 100644 --- a/drivers/block/cciss_scsi.c +++ b/drivers/block/cciss_scsi.c | |||
| @@ -84,7 +84,6 @@ static struct scsi_host_template cciss_driver_template = { | |||
| 84 | .queuecommand = cciss_scsi_queue_command, | 84 | .queuecommand = cciss_scsi_queue_command, |
| 85 | .can_queue = SCSI_CCISS_CAN_QUEUE, | 85 | .can_queue = SCSI_CCISS_CAN_QUEUE, |
| 86 | .this_id = 7, | 86 | .this_id = 7, |
| 87 | .sg_tablesize = MAXSGENTRIES, | ||
| 88 | .cmd_per_lun = 1, | 87 | .cmd_per_lun = 1, |
| 89 | .use_clustering = DISABLE_CLUSTERING, | 88 | .use_clustering = DISABLE_CLUSTERING, |
| 90 | /* Can't have eh_bus_reset_handler or eh_host_reset_handler for cciss */ | 89 | /* Can't have eh_bus_reset_handler or eh_host_reset_handler for cciss */ |
| @@ -93,11 +92,16 @@ static struct scsi_host_template cciss_driver_template = { | |||
| 93 | }; | 92 | }; |
| 94 | 93 | ||
| 95 | #pragma pack(1) | 94 | #pragma pack(1) |
| 95 | |||
| 96 | #define SCSI_PAD_32 0 | ||
| 97 | #define SCSI_PAD_64 0 | ||
| 98 | |||
| 96 | struct cciss_scsi_cmd_stack_elem_t { | 99 | struct cciss_scsi_cmd_stack_elem_t { |
| 97 | CommandList_struct cmd; | 100 | CommandList_struct cmd; |
| 98 | ErrorInfo_struct Err; | 101 | ErrorInfo_struct Err; |
| 99 | __u32 busaddr; | 102 | __u32 busaddr; |
| 100 | __u32 pad; | 103 | int cmdindex; |
| 104 | u8 pad[IS_32_BIT * SCSI_PAD_32 + IS_64_BIT * SCSI_PAD_64]; | ||
| 101 | }; | 105 | }; |
| 102 | 106 | ||
| 103 | #pragma pack() | 107 | #pragma pack() |
| @@ -118,16 +122,15 @@ struct cciss_scsi_cmd_stack_t { | |||
| 118 | struct cciss_scsi_adapter_data_t { | 122 | struct cciss_scsi_adapter_data_t { |
| 119 | struct Scsi_Host *scsi_host; | 123 | struct Scsi_Host *scsi_host; |
| 120 | struct cciss_scsi_cmd_stack_t cmd_stack; | 124 | struct cciss_scsi_cmd_stack_t cmd_stack; |
| 125 | SGDescriptor_struct **cmd_sg_list; | ||
| 121 | int registered; | 126 | int registered; |
| 122 | spinlock_t lock; // to protect ccissscsi[ctlr]; | 127 | spinlock_t lock; // to protect ccissscsi[ctlr]; |
| 123 | }; | 128 | }; |
| 124 | 129 | ||
| 125 | #define CPQ_TAPE_LOCK(ctlr, flags) spin_lock_irqsave( \ | 130 | #define CPQ_TAPE_LOCK(ctlr, flags) spin_lock_irqsave( \ |
| 126 | &(((struct cciss_scsi_adapter_data_t *) \ | 131 | &hba[ctlr]->scsi_ctlr->lock, flags); |
| 127 | hba[ctlr]->scsi_ctlr)->lock), flags); | ||
| 128 | #define CPQ_TAPE_UNLOCK(ctlr, flags) spin_unlock_irqrestore( \ | 132 | #define CPQ_TAPE_UNLOCK(ctlr, flags) spin_unlock_irqrestore( \ |
| 129 | &(((struct cciss_scsi_adapter_data_t *) \ | 133 | &hba[ctlr]->scsi_ctlr->lock, flags); |
| 130 | hba[ctlr]->scsi_ctlr)->lock), flags); | ||
| 131 | 134 | ||
| 132 | static CommandList_struct * | 135 | static CommandList_struct * |
| 133 | scsi_cmd_alloc(ctlr_info_t *h) | 136 | scsi_cmd_alloc(ctlr_info_t *h) |
| @@ -143,7 +146,7 @@ scsi_cmd_alloc(ctlr_info_t *h) | |||
| 143 | struct cciss_scsi_cmd_stack_t *stk; | 146 | struct cciss_scsi_cmd_stack_t *stk; |
| 144 | u64bit temp64; | 147 | u64bit temp64; |
| 145 | 148 | ||
| 146 | sa = (struct cciss_scsi_adapter_data_t *) h->scsi_ctlr; | 149 | sa = h->scsi_ctlr; |
| 147 | stk = &sa->cmd_stack; | 150 | stk = &sa->cmd_stack; |
| 148 | 151 | ||
| 149 | if (stk->top < 0) | 152 | if (stk->top < 0) |
| @@ -154,6 +157,7 @@ scsi_cmd_alloc(ctlr_info_t *h) | |||
| 154 | memset(&c->Err, 0, sizeof(c->Err)); | 157 | memset(&c->Err, 0, sizeof(c->Err)); |
| 155 | /* set physical addr of cmd and addr of scsi parameters */ | 158 | /* set physical addr of cmd and addr of scsi parameters */ |
| 156 | c->cmd.busaddr = c->busaddr; | 159 | c->cmd.busaddr = c->busaddr; |
| 160 | c->cmd.cmdindex = c->cmdindex; | ||
| 157 | /* (__u32) (stk->cmd_pool_handle + | 161 | /* (__u32) (stk->cmd_pool_handle + |
| 158 | (sizeof(struct cciss_scsi_cmd_stack_elem_t)*stk->top)); */ | 162 | (sizeof(struct cciss_scsi_cmd_stack_elem_t)*stk->top)); */ |
| 159 | 163 | ||
| @@ -182,7 +186,7 @@ scsi_cmd_free(ctlr_info_t *h, CommandList_struct *cmd) | |||
| 182 | struct cciss_scsi_adapter_data_t *sa; | 186 | struct cciss_scsi_adapter_data_t *sa; |
| 183 | struct cciss_scsi_cmd_stack_t *stk; | 187 | struct cciss_scsi_cmd_stack_t *stk; |
| 184 | 188 | ||
| 185 | sa = (struct cciss_scsi_adapter_data_t *) h->scsi_ctlr; | 189 | sa = h->scsi_ctlr; |
| 186 | stk = &sa->cmd_stack; | 190 | stk = &sa->cmd_stack; |
| 187 | if (stk->top >= CMD_STACK_SIZE) { | 191 | if (stk->top >= CMD_STACK_SIZE) { |
| 188 | printk("cciss: scsi_cmd_free called too many times.\n"); | 192 | printk("cciss: scsi_cmd_free called too many times.\n"); |
| @@ -199,24 +203,31 @@ scsi_cmd_stack_setup(int ctlr, struct cciss_scsi_adapter_data_t *sa) | |||
| 199 | struct cciss_scsi_cmd_stack_t *stk; | 203 | struct cciss_scsi_cmd_stack_t *stk; |
| 200 | size_t size; | 204 | size_t size; |
| 201 | 205 | ||
| 206 | sa->cmd_sg_list = cciss_allocate_sg_chain_blocks(hba[ctlr], | ||
| 207 | hba[ctlr]->chainsize, CMD_STACK_SIZE); | ||
| 208 | if (!sa->cmd_sg_list && hba[ctlr]->chainsize > 0) | ||
| 209 | return -ENOMEM; | ||
| 210 | |||
| 202 | stk = &sa->cmd_stack; | 211 | stk = &sa->cmd_stack; |
| 203 | size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * CMD_STACK_SIZE; | 212 | size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * CMD_STACK_SIZE; |
| 204 | 213 | ||
| 205 | // pci_alloc_consistent guarantees 32-bit DMA address will | 214 | /* Check alignment, see cciss_cmd.h near CommandList_struct def. */ |
| 206 | // be used | 215 | BUILD_BUG_ON((sizeof(*stk->pool) % COMMANDLIST_ALIGNMENT) != 0); |
| 207 | 216 | /* pci_alloc_consistent guarantees 32-bit DMA address will be used */ | |
| 208 | stk->pool = (struct cciss_scsi_cmd_stack_elem_t *) | 217 | stk->pool = (struct cciss_scsi_cmd_stack_elem_t *) |
| 209 | pci_alloc_consistent(hba[ctlr]->pdev, size, &stk->cmd_pool_handle); | 218 | pci_alloc_consistent(hba[ctlr]->pdev, size, &stk->cmd_pool_handle); |
| 210 | 219 | ||
| 211 | if (stk->pool == NULL) { | 220 | if (stk->pool == NULL) { |
| 212 | printk("stk->pool is null\n"); | 221 | cciss_free_sg_chain_blocks(sa->cmd_sg_list, CMD_STACK_SIZE); |
| 213 | return -1; | 222 | sa->cmd_sg_list = NULL; |
| 223 | return -ENOMEM; | ||
| 214 | } | 224 | } |
| 215 | 225 | ||
| 216 | for (i=0; i<CMD_STACK_SIZE; i++) { | 226 | for (i=0; i<CMD_STACK_SIZE; i++) { |
| 217 | stk->elem[i] = &stk->pool[i]; | 227 | stk->elem[i] = &stk->pool[i]; |
| 218 | stk->elem[i]->busaddr = (__u32) (stk->cmd_pool_handle + | 228 | stk->elem[i]->busaddr = (__u32) (stk->cmd_pool_handle + |
| 219 | (sizeof(struct cciss_scsi_cmd_stack_elem_t) * i)); | 229 | (sizeof(struct cciss_scsi_cmd_stack_elem_t) * i)); |
| 230 | stk->elem[i]->cmdindex = i; | ||
| 220 | } | 231 | } |
| 221 | stk->top = CMD_STACK_SIZE-1; | 232 | stk->top = CMD_STACK_SIZE-1; |
| 222 | return 0; | 233 | return 0; |
| @@ -229,7 +240,7 @@ scsi_cmd_stack_free(int ctlr) | |||
| 229 | struct cciss_scsi_cmd_stack_t *stk; | 240 | struct cciss_scsi_cmd_stack_t *stk; |
| 230 | size_t size; | 241 | size_t size; |
| 231 | 242 | ||
| 232 | sa = (struct cciss_scsi_adapter_data_t *) hba[ctlr]->scsi_ctlr; | 243 | sa = hba[ctlr]->scsi_ctlr; |
| 233 | stk = &sa->cmd_stack; | 244 | stk = &sa->cmd_stack; |
| 234 | if (stk->top != CMD_STACK_SIZE-1) { | 245 | if (stk->top != CMD_STACK_SIZE-1) { |
| 235 | printk( "cciss: %d scsi commands are still outstanding.\n", | 246 | printk( "cciss: %d scsi commands are still outstanding.\n", |
| @@ -241,6 +252,7 @@ scsi_cmd_stack_free(int ctlr) | |||
| 241 | 252 | ||
| 242 | pci_free_consistent(hba[ctlr]->pdev, size, stk->pool, stk->cmd_pool_handle); | 253 | pci_free_consistent(hba[ctlr]->pdev, size, stk->pool, stk->cmd_pool_handle); |
| 243 | stk->pool = NULL; | 254 | stk->pool = NULL; |
| 255 | cciss_free_sg_chain_blocks(sa->cmd_sg_list, CMD_STACK_SIZE); | ||
| 244 | } | 256 | } |
| 245 | 257 | ||
| 246 | #if 0 | 258 | #if 0 |
| @@ -530,8 +542,7 @@ adjust_cciss_scsi_table(int ctlr, int hostno, | |||
| 530 | CPQ_TAPE_LOCK(ctlr, flags); | 542 | CPQ_TAPE_LOCK(ctlr, flags); |
| 531 | 543 | ||
| 532 | if (hostno != -1) /* if it's not the first time... */ | 544 | if (hostno != -1) /* if it's not the first time... */ |
| 533 | sh = ((struct cciss_scsi_adapter_data_t *) | 545 | sh = hba[ctlr]->scsi_ctlr->scsi_host; |
| 534 | hba[ctlr]->scsi_ctlr)->scsi_host; | ||
| 535 | 546 | ||
| 536 | /* find any devices in ccissscsi[] that are not in | 547 | /* find any devices in ccissscsi[] that are not in |
| 537 | sd[] and remove them from ccissscsi[] */ | 548 | sd[] and remove them from ccissscsi[] */ |
| @@ -702,7 +713,7 @@ cciss_scsi_setup(int cntl_num) | |||
| 702 | kfree(shba); | 713 | kfree(shba); |
| 703 | shba = NULL; | 714 | shba = NULL; |
| 704 | } | 715 | } |
| 705 | hba[cntl_num]->scsi_ctlr = (void *) shba; | 716 | hba[cntl_num]->scsi_ctlr = shba; |
| 706 | return; | 717 | return; |
| 707 | } | 718 | } |
| 708 | 719 | ||
| @@ -725,6 +736,8 @@ complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag) | |||
| 725 | ctlr = hba[cp->ctlr]; | 736 | ctlr = hba[cp->ctlr]; |
| 726 | 737 | ||
| 727 | scsi_dma_unmap(cmd); | 738 | scsi_dma_unmap(cmd); |
| 739 | if (cp->Header.SGTotal > ctlr->max_cmd_sgentries) | ||
| 740 | cciss_unmap_sg_chain_block(ctlr, cp); | ||
| 728 | 741 | ||
| 729 | cmd->result = (DID_OK << 16); /* host byte */ | 742 | cmd->result = (DID_OK << 16); /* host byte */ |
| 730 | cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ | 743 | cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ |
| @@ -847,9 +860,10 @@ cciss_scsi_detect(int ctlr) | |||
| 847 | sh->io_port = 0; // good enough? FIXME, | 860 | sh->io_port = 0; // good enough? FIXME, |
| 848 | sh->n_io_port = 0; // I don't think we use these two... | 861 | sh->n_io_port = 0; // I don't think we use these two... |
| 849 | sh->this_id = SELF_SCSI_ID; | 862 | sh->this_id = SELF_SCSI_ID; |
| 863 | sh->sg_tablesize = hba[ctlr]->maxsgentries; | ||
| 850 | 864 | ||
| 851 | ((struct cciss_scsi_adapter_data_t *) | 865 | ((struct cciss_scsi_adapter_data_t *) |
| 852 | hba[ctlr]->scsi_ctlr)->scsi_host = (void *) sh; | 866 | hba[ctlr]->scsi_ctlr)->scsi_host = sh; |
| 853 | sh->hostdata[0] = (unsigned long) hba[ctlr]; | 867 | sh->hostdata[0] = (unsigned long) hba[ctlr]; |
| 854 | sh->irq = hba[ctlr]->intr[SIMPLE_MODE_INT]; | 868 | sh->irq = hba[ctlr]->intr[SIMPLE_MODE_INT]; |
| 855 | sh->unique_id = sh->irq; | 869 | sh->unique_id = sh->irq; |
| @@ -1364,34 +1378,54 @@ cciss_scsi_proc_info(struct Scsi_Host *sh, | |||
| 1364 | dma mapping and fills in the scatter gather entries of the | 1378 | dma mapping and fills in the scatter gather entries of the |
| 1365 | cciss command, cp. */ | 1379 | cciss command, cp. */ |
| 1366 | 1380 | ||
| 1367 | static void | 1381 | static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *cp, |
| 1368 | cciss_scatter_gather(struct pci_dev *pdev, | 1382 | struct scsi_cmnd *cmd) |
| 1369 | CommandList_struct *cp, | ||
| 1370 | struct scsi_cmnd *cmd) | ||
| 1371 | { | 1383 | { |
| 1372 | unsigned int len; | 1384 | unsigned int len; |
| 1373 | struct scatterlist *sg; | 1385 | struct scatterlist *sg; |
| 1374 | __u64 addr64; | 1386 | __u64 addr64; |
| 1375 | int use_sg, i; | 1387 | int request_nsgs, i, chained, sg_index; |
| 1376 | 1388 | struct cciss_scsi_adapter_data_t *sa = h->scsi_ctlr; | |
| 1377 | BUG_ON(scsi_sg_count(cmd) > MAXSGENTRIES); | 1389 | SGDescriptor_struct *curr_sg; |
| 1378 | 1390 | ||
| 1379 | use_sg = scsi_dma_map(cmd); | 1391 | BUG_ON(scsi_sg_count(cmd) > h->maxsgentries); |
| 1380 | if (use_sg) { /* not too many addrs? */ | 1392 | |
| 1381 | scsi_for_each_sg(cmd, sg, use_sg, i) { | 1393 | chained = 0; |
| 1394 | sg_index = 0; | ||
| 1395 | curr_sg = cp->SG; | ||
| 1396 | request_nsgs = scsi_dma_map(cmd); | ||
| 1397 | if (request_nsgs) { | ||
| 1398 | scsi_for_each_sg(cmd, sg, request_nsgs, i) { | ||
| 1399 | if (sg_index + 1 == h->max_cmd_sgentries && | ||
| 1400 | !chained && request_nsgs - i > 1) { | ||
| 1401 | chained = 1; | ||
| 1402 | sg_index = 0; | ||
| 1403 | curr_sg = sa->cmd_sg_list[cp->cmdindex]; | ||
| 1404 | } | ||
| 1382 | addr64 = (__u64) sg_dma_address(sg); | 1405 | addr64 = (__u64) sg_dma_address(sg); |
| 1383 | len = sg_dma_len(sg); | 1406 | len = sg_dma_len(sg); |
| 1384 | cp->SG[i].Addr.lower = | 1407 | curr_sg[sg_index].Addr.lower = |
| 1385 | (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF); | 1408 | (__u32) (addr64 & 0x0FFFFFFFFULL); |
| 1386 | cp->SG[i].Addr.upper = | 1409 | curr_sg[sg_index].Addr.upper = |
| 1387 | (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF); | 1410 | (__u32) ((addr64 >> 32) & 0x0FFFFFFFFULL); |
| 1388 | cp->SG[i].Len = len; | 1411 | curr_sg[sg_index].Len = len; |
| 1389 | cp->SG[i].Ext = 0; // we are not chaining | 1412 | curr_sg[sg_index].Ext = 0; |
| 1413 | ++sg_index; | ||
| 1390 | } | 1414 | } |
| 1415 | if (chained) | ||
| 1416 | cciss_map_sg_chain_block(h, cp, | ||
| 1417 | sa->cmd_sg_list[cp->cmdindex], | ||
| 1418 | (request_nsgs - (h->max_cmd_sgentries - 1)) * | ||
| 1419 | sizeof(SGDescriptor_struct)); | ||
| 1391 | } | 1420 | } |
| 1392 | 1421 | /* track how many SG entries we are using */ | |
| 1393 | cp->Header.SGList = (__u8) use_sg; /* no. SGs contig in this cmd */ | 1422 | if (request_nsgs > h->maxSG) |
| 1394 | cp->Header.SGTotal = (__u16) use_sg; /* total sgs in this cmd list */ | 1423 | h->maxSG = request_nsgs; |
| 1424 | cp->Header.SGTotal = (__u8) request_nsgs + chained; | ||
| 1425 | if (request_nsgs > h->max_cmd_sgentries) | ||
| 1426 | cp->Header.SGList = h->max_cmd_sgentries; | ||
| 1427 | else | ||
| 1428 | cp->Header.SGList = cp->Header.SGTotal; | ||
| 1395 | return; | 1429 | return; |
| 1396 | } | 1430 | } |
| 1397 | 1431 | ||
| @@ -1399,7 +1433,7 @@ cciss_scatter_gather(struct pci_dev *pdev, | |||
| 1399 | static int | 1433 | static int |
| 1400 | cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) | 1434 | cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) |
| 1401 | { | 1435 | { |
| 1402 | ctlr_info_t **c; | 1436 | ctlr_info_t *c; |
| 1403 | int ctlr, rc; | 1437 | int ctlr, rc; |
| 1404 | unsigned char scsi3addr[8]; | 1438 | unsigned char scsi3addr[8]; |
| 1405 | CommandList_struct *cp; | 1439 | CommandList_struct *cp; |
| @@ -1407,8 +1441,8 @@ cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd | |||
| 1407 | 1441 | ||
| 1408 | // Get the ptr to our adapter structure (hba[i]) out of cmd->host. | 1442 | // Get the ptr to our adapter structure (hba[i]) out of cmd->host. |
| 1409 | // We violate cmd->host privacy here. (Is there another way?) | 1443 | // We violate cmd->host privacy here. (Is there another way?) |
| 1410 | c = (ctlr_info_t **) &cmd->device->host->hostdata[0]; | 1444 | c = (ctlr_info_t *) cmd->device->host->hostdata[0]; |
| 1411 | ctlr = (*c)->ctlr; | 1445 | ctlr = c->ctlr; |
| 1412 | 1446 | ||
| 1413 | rc = lookup_scsi3addr(ctlr, cmd->device->channel, cmd->device->id, | 1447 | rc = lookup_scsi3addr(ctlr, cmd->device->channel, cmd->device->id, |
| 1414 | cmd->device->lun, scsi3addr); | 1448 | cmd->device->lun, scsi3addr); |
| @@ -1431,7 +1465,7 @@ cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd | |||
| 1431 | see what the device thinks of it. */ | 1465 | see what the device thinks of it. */ |
| 1432 | 1466 | ||
| 1433 | spin_lock_irqsave(CCISS_LOCK(ctlr), flags); | 1467 | spin_lock_irqsave(CCISS_LOCK(ctlr), flags); |
| 1434 | cp = scsi_cmd_alloc(*c); | 1468 | cp = scsi_cmd_alloc(c); |
| 1435 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); | 1469 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); |
| 1436 | if (cp == NULL) { /* trouble... */ | 1470 | if (cp == NULL) { /* trouble... */ |
| 1437 | printk("scsi_cmd_alloc returned NULL!\n"); | 1471 | printk("scsi_cmd_alloc returned NULL!\n"); |
| @@ -1489,15 +1523,14 @@ cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd | |||
| 1489 | BUG(); | 1523 | BUG(); |
| 1490 | break; | 1524 | break; |
| 1491 | } | 1525 | } |
| 1492 | 1526 | cciss_scatter_gather(c, cp, cmd); | |
| 1493 | cciss_scatter_gather((*c)->pdev, cp, cmd); // Fill the SG list | ||
| 1494 | 1527 | ||
| 1495 | /* Put the request on the tail of the request queue */ | 1528 | /* Put the request on the tail of the request queue */ |
| 1496 | 1529 | ||
| 1497 | spin_lock_irqsave(CCISS_LOCK(ctlr), flags); | 1530 | spin_lock_irqsave(CCISS_LOCK(ctlr), flags); |
| 1498 | addQ(&(*c)->reqQ, cp); | 1531 | addQ(&c->reqQ, cp); |
| 1499 | (*c)->Qdepth++; | 1532 | c->Qdepth++; |
| 1500 | start_io(*c); | 1533 | start_io(c); |
| 1501 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); | 1534 | spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); |
| 1502 | 1535 | ||
| 1503 | /* the cmd'll come back via intr handler in complete_scsi_command() */ | 1536 | /* the cmd'll come back via intr handler in complete_scsi_command() */ |
| @@ -1514,7 +1547,7 @@ cciss_unregister_scsi(int ctlr) | |||
| 1514 | /* we are being forcibly unloaded, and may not refuse. */ | 1547 | /* we are being forcibly unloaded, and may not refuse. */ |
| 1515 | 1548 | ||
| 1516 | spin_lock_irqsave(CCISS_LOCK(ctlr), flags); | 1549 | spin_lock_irqsave(CCISS_LOCK(ctlr), flags); |
| 1517 | sa = (struct cciss_scsi_adapter_data_t *) hba[ctlr]->scsi_ctlr; | 1550 | sa = hba[ctlr]->scsi_ctlr; |
| 1518 | stk = &sa->cmd_stack; | 1551 | stk = &sa->cmd_stack; |
| 1519 | 1552 | ||
| 1520 | /* if we weren't ever actually registered, don't unregister */ | 1553 | /* if we weren't ever actually registered, don't unregister */ |
| @@ -1541,7 +1574,7 @@ cciss_engage_scsi(int ctlr) | |||
| 1541 | unsigned long flags; | 1574 | unsigned long flags; |
| 1542 | 1575 | ||
| 1543 | spin_lock_irqsave(CCISS_LOCK(ctlr), flags); | 1576 | spin_lock_irqsave(CCISS_LOCK(ctlr), flags); |
| 1544 | sa = (struct cciss_scsi_adapter_data_t *) hba[ctlr]->scsi_ctlr; | 1577 | sa = hba[ctlr]->scsi_ctlr; |
| 1545 | stk = &sa->cmd_stack; | 1578 | stk = &sa->cmd_stack; |
| 1546 | 1579 | ||
| 1547 | if (sa->registered) { | 1580 | if (sa->registered) { |
| @@ -1654,14 +1687,14 @@ static int cciss_eh_device_reset_handler(struct scsi_cmnd *scsicmd) | |||
| 1654 | int rc; | 1687 | int rc; |
| 1655 | CommandList_struct *cmd_in_trouble; | 1688 | CommandList_struct *cmd_in_trouble; |
| 1656 | unsigned char lunaddr[8]; | 1689 | unsigned char lunaddr[8]; |
| 1657 | ctlr_info_t **c; | 1690 | ctlr_info_t *c; |
| 1658 | int ctlr; | 1691 | int ctlr; |
| 1659 | 1692 | ||
| 1660 | /* find the controller to which the command to be aborted was sent */ | 1693 | /* find the controller to which the command to be aborted was sent */ |
| 1661 | c = (ctlr_info_t **) &scsicmd->device->host->hostdata[0]; | 1694 | c = (ctlr_info_t *) scsicmd->device->host->hostdata[0]; |
| 1662 | if (c == NULL) /* paranoia */ | 1695 | if (c == NULL) /* paranoia */ |
| 1663 | return FAILED; | 1696 | return FAILED; |
| 1664 | ctlr = (*c)->ctlr; | 1697 | ctlr = c->ctlr; |
| 1665 | printk(KERN_WARNING "cciss%d: resetting tape drive or medium changer.\n", ctlr); | 1698 | printk(KERN_WARNING "cciss%d: resetting tape drive or medium changer.\n", ctlr); |
| 1666 | /* find the command that's giving us trouble */ | 1699 | /* find the command that's giving us trouble */ |
| 1667 | cmd_in_trouble = (CommandList_struct *) scsicmd->host_scribble; | 1700 | cmd_in_trouble = (CommandList_struct *) scsicmd->host_scribble; |
| @@ -1671,7 +1704,7 @@ static int cciss_eh_device_reset_handler(struct scsi_cmnd *scsicmd) | |||
| 1671 | /* send a reset to the SCSI LUN which the command was sent to */ | 1704 | /* send a reset to the SCSI LUN which the command was sent to */ |
| 1672 | rc = sendcmd_withirq(CCISS_RESET_MSG, ctlr, NULL, 0, 0, lunaddr, | 1705 | rc = sendcmd_withirq(CCISS_RESET_MSG, ctlr, NULL, 0, 0, lunaddr, |
| 1673 | TYPE_MSG); | 1706 | TYPE_MSG); |
| 1674 | if (rc == 0 && wait_for_device_to_become_ready(*c, lunaddr) == 0) | 1707 | if (rc == 0 && wait_for_device_to_become_ready(c, lunaddr) == 0) |
| 1675 | return SUCCESS; | 1708 | return SUCCESS; |
| 1676 | printk(KERN_WARNING "cciss%d: resetting device failed.\n", ctlr); | 1709 | printk(KERN_WARNING "cciss%d: resetting device failed.\n", ctlr); |
| 1677 | return FAILED; | 1710 | return FAILED; |
| @@ -1682,14 +1715,14 @@ static int cciss_eh_abort_handler(struct scsi_cmnd *scsicmd) | |||
| 1682 | int rc; | 1715 | int rc; |
| 1683 | CommandList_struct *cmd_to_abort; | 1716 | CommandList_struct *cmd_to_abort; |
| 1684 | unsigned char lunaddr[8]; | 1717 | unsigned char lunaddr[8]; |
| 1685 | ctlr_info_t **c; | 1718 | ctlr_info_t *c; |
| 1686 | int ctlr; | 1719 | int ctlr; |
| 1687 | 1720 | ||
| 1688 | /* find the controller to which the command to be aborted was sent */ | 1721 | /* find the controller to which the command to be aborted was sent */ |
| 1689 | c = (ctlr_info_t **) &scsicmd->device->host->hostdata[0]; | 1722 | c = (ctlr_info_t *) scsicmd->device->host->hostdata[0]; |
| 1690 | if (c == NULL) /* paranoia */ | 1723 | if (c == NULL) /* paranoia */ |
| 1691 | return FAILED; | 1724 | return FAILED; |
| 1692 | ctlr = (*c)->ctlr; | 1725 | ctlr = c->ctlr; |
| 1693 | printk(KERN_WARNING "cciss%d: aborting tardy SCSI cmd\n", ctlr); | 1726 | printk(KERN_WARNING "cciss%d: aborting tardy SCSI cmd\n", ctlr); |
| 1694 | 1727 | ||
| 1695 | /* find the command to be aborted */ | 1728 | /* find the command to be aborted */ |
diff --git a/drivers/block/cciss_scsi.h b/drivers/block/cciss_scsi.h index 7b750245ae76..6d5822fe851a 100644 --- a/drivers/block/cciss_scsi.h +++ b/drivers/block/cciss_scsi.h | |||
| @@ -25,16 +25,16 @@ | |||
| 25 | 25 | ||
| 26 | #include <scsi/scsicam.h> /* possibly irrelevant, since we don't show disks */ | 26 | #include <scsi/scsicam.h> /* possibly irrelevant, since we don't show disks */ |
| 27 | 27 | ||
| 28 | // the scsi id of the adapter... | 28 | /* the scsi id of the adapter... */ |
| 29 | #define SELF_SCSI_ID 15 | 29 | #define SELF_SCSI_ID 15 |
| 30 | // 15 is somewhat arbitrary, since the scsi-2 bus | 30 | /* 15 is somewhat arbitrary, since the scsi-2 bus |
| 31 | // that's presented by the driver to the OS is | 31 | that's presented by the driver to the OS is |
| 32 | // fabricated. The "real" scsi-3 bus the | 32 | fabricated. The "real" scsi-3 bus the |
| 33 | // hardware presents is fabricated too. | 33 | hardware presents is fabricated too. |
| 34 | // The actual, honest-to-goodness physical | 34 | The actual, honest-to-goodness physical |
| 35 | // bus that the devices are attached to is not | 35 | bus that the devices are attached to is not |
| 36 | // addressible natively, and may in fact turn | 36 | addressible natively, and may in fact turn |
| 37 | // out to be not scsi at all. | 37 | out to be not scsi at all. */ |
| 38 | 38 | ||
| 39 | #define SCSI_CCISS_CAN_QUEUE 2 | 39 | #define SCSI_CCISS_CAN_QUEUE 2 |
| 40 | 40 | ||
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c index 6422651ec364..91d11631cec9 100644 --- a/drivers/block/cpqarray.c +++ b/drivers/block/cpqarray.c | |||
| @@ -448,11 +448,8 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev) | |||
| 448 | blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask); | 448 | blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask); |
| 449 | 449 | ||
| 450 | /* This is a hardware imposed limit. */ | 450 | /* This is a hardware imposed limit. */ |
| 451 | blk_queue_max_hw_segments(q, SG_MAX); | 451 | blk_queue_max_segments(q, SG_MAX); |
| 452 | 452 | ||
| 453 | /* This is a driver limit and could be eliminated. */ | ||
| 454 | blk_queue_max_phys_segments(q, SG_MAX); | ||
| 455 | |||
| 456 | init_timer(&hba[i]->timer); | 453 | init_timer(&hba[i]->timer); |
| 457 | hba[i]->timer.expires = jiffies + IDA_TIMER; | 454 | hba[i]->timer.expires = jiffies + IDA_TIMER; |
| 458 | hba[i]->timer.data = (unsigned long)hba[i]; | 455 | hba[i]->timer.data = (unsigned long)hba[i]; |
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c index 1292e0620663..4df3b40b1057 100644 --- a/drivers/block/drbd/drbd_nl.c +++ b/drivers/block/drbd/drbd_nl.c | |||
| @@ -709,9 +709,8 @@ void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_seg_s) __mu | |||
| 709 | 709 | ||
| 710 | max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s); | 710 | max_seg_s = min(queue_max_sectors(b) * queue_logical_block_size(b), max_seg_s); |
| 711 | 711 | ||
| 712 | blk_queue_max_sectors(q, max_seg_s >> 9); | 712 | blk_queue_max_hw_sectors(q, max_seg_s >> 9); |
| 713 | blk_queue_max_phys_segments(q, max_segments ? max_segments : MAX_PHYS_SEGMENTS); | 713 | blk_queue_max_segments(q, max_segments ? max_segments : BLK_MAX_SEGMENTS); |
| 714 | blk_queue_max_hw_segments(q, max_segments ? max_segments : MAX_HW_SEGMENTS); | ||
| 715 | blk_queue_max_segment_size(q, max_seg_s); | 714 | blk_queue_max_segment_size(q, max_seg_s); |
| 716 | blk_queue_logical_block_size(q, 512); | 715 | blk_queue_logical_block_size(q, 512); |
| 717 | blk_queue_segment_boundary(q, PAGE_SIZE-1); | 716 | blk_queue_segment_boundary(q, PAGE_SIZE-1); |
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index 3266b4f65daa..b9b117059b62 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c | |||
| @@ -4234,7 +4234,7 @@ static int __init floppy_init(void) | |||
| 4234 | err = -ENOMEM; | 4234 | err = -ENOMEM; |
| 4235 | goto out_unreg_driver; | 4235 | goto out_unreg_driver; |
| 4236 | } | 4236 | } |
| 4237 | blk_queue_max_sectors(floppy_queue, 64); | 4237 | blk_queue_max_hw_sectors(floppy_queue, 64); |
| 4238 | 4238 | ||
| 4239 | blk_register_region(MKDEV(FLOPPY_MAJOR, 0), 256, THIS_MODULE, | 4239 | blk_register_region(MKDEV(FLOPPY_MAJOR, 0), 256, THIS_MODULE, |
| 4240 | floppy_find, NULL, NULL); | 4240 | floppy_find, NULL, NULL); |
diff --git a/drivers/block/hd.c b/drivers/block/hd.c index d5cdce08ffd2..5116c65c07cb 100644 --- a/drivers/block/hd.c +++ b/drivers/block/hd.c | |||
| @@ -719,7 +719,7 @@ static int __init hd_init(void) | |||
| 719 | return -ENOMEM; | 719 | return -ENOMEM; |
| 720 | } | 720 | } |
| 721 | 721 | ||
| 722 | blk_queue_max_sectors(hd_queue, 255); | 722 | blk_queue_max_hw_sectors(hd_queue, 255); |
| 723 | init_timer(&device_timer); | 723 | init_timer(&device_timer); |
| 724 | device_timer.function = hd_times_out; | 724 | device_timer.function = hd_times_out; |
| 725 | blk_queue_logical_block_size(hd_queue, 512); | 725 | blk_queue_logical_block_size(hd_queue, 512); |
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c index 02b2583df7fc..5416c9a606e4 100644 --- a/drivers/block/mg_disk.c +++ b/drivers/block/mg_disk.c | |||
| @@ -980,7 +980,7 @@ static int mg_probe(struct platform_device *plat_dev) | |||
| 980 | __func__, __LINE__); | 980 | __func__, __LINE__); |
| 981 | goto probe_err_6; | 981 | goto probe_err_6; |
| 982 | } | 982 | } |
| 983 | blk_queue_max_sectors(host->breq, MG_MAX_SECTS); | 983 | blk_queue_max_hw_sectors(host->breq, MG_MAX_SECTS); |
| 984 | blk_queue_logical_block_size(host->breq, MG_SECTOR_SIZE); | 984 | blk_queue_logical_block_size(host->breq, MG_SECTOR_SIZE); |
| 985 | 985 | ||
| 986 | init_timer(&host->timer); | 986 | init_timer(&host->timer); |
diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index 569e39e8f114..e712cd51af15 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c | |||
| @@ -906,7 +906,7 @@ static int __init pd_init(void) | |||
| 906 | if (!pd_queue) | 906 | if (!pd_queue) |
| 907 | goto out1; | 907 | goto out1; |
| 908 | 908 | ||
| 909 | blk_queue_max_sectors(pd_queue, cluster); | 909 | blk_queue_max_hw_sectors(pd_queue, cluster); |
| 910 | 910 | ||
| 911 | if (register_blkdev(major, name)) | 911 | if (register_blkdev(major, name)) |
| 912 | goto out2; | 912 | goto out2; |
diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index ea54ea393553..ddb4f9abd480 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c | |||
| @@ -956,8 +956,7 @@ static int __init pf_init(void) | |||
| 956 | return -ENOMEM; | 956 | return -ENOMEM; |
| 957 | } | 957 | } |
| 958 | 958 | ||
| 959 | blk_queue_max_phys_segments(pf_queue, cluster); | 959 | blk_queue_max_segments(pf_queue, cluster); |
| 960 | blk_queue_max_hw_segments(pf_queue, cluster); | ||
| 961 | 960 | ||
| 962 | for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { | 961 | for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { |
| 963 | struct gendisk *disk = pf->disk; | 962 | struct gendisk *disk = pf->disk; |
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index 68b5957f107c..b72935b8f203 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
| @@ -569,6 +569,7 @@ static struct packet_data *pkt_alloc_packet_data(int frames) | |||
| 569 | } | 569 | } |
| 570 | 570 | ||
| 571 | spin_lock_init(&pkt->lock); | 571 | spin_lock_init(&pkt->lock); |
| 572 | bio_list_init(&pkt->orig_bios); | ||
| 572 | 573 | ||
| 573 | for (i = 0; i < frames; i++) { | 574 | for (i = 0; i < frames; i++) { |
| 574 | struct bio *bio = pkt_bio_alloc(1); | 575 | struct bio *bio = pkt_bio_alloc(1); |
| @@ -721,43 +722,6 @@ static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *nod | |||
| 721 | } | 722 | } |
| 722 | 723 | ||
| 723 | /* | 724 | /* |
| 724 | * Add a bio to a single linked list defined by its head and tail pointers. | ||
| 725 | */ | ||
| 726 | static void pkt_add_list_last(struct bio *bio, struct bio **list_head, struct bio **list_tail) | ||
| 727 | { | ||
| 728 | bio->bi_next = NULL; | ||
| 729 | if (*list_tail) { | ||
| 730 | BUG_ON((*list_head) == NULL); | ||
| 731 | (*list_tail)->bi_next = bio; | ||
| 732 | (*list_tail) = bio; | ||
| 733 | } else { | ||
| 734 | BUG_ON((*list_head) != NULL); | ||
| 735 | (*list_head) = bio; | ||
| 736 | (*list_tail) = bio; | ||
| 737 | } | ||
| 738 | } | ||
| 739 | |||
| 740 | /* | ||
| 741 | * Remove and return the first bio from a single linked list defined by its | ||
| 742 | * head and tail pointers. | ||
| 743 | */ | ||
| 744 | static inline struct bio *pkt_get_list_first(struct bio **list_head, struct bio **list_tail) | ||
| 745 | { | ||
| 746 | struct bio *bio; | ||
| 747 | |||
| 748 | if (*list_head == NULL) | ||
| 749 | return NULL; | ||
| 750 | |||
| 751 | bio = *list_head; | ||
| 752 | *list_head = bio->bi_next; | ||
| 753 | if (*list_head == NULL) | ||
| 754 | *list_tail = NULL; | ||
| 755 | |||
| 756 | bio->bi_next = NULL; | ||
| 757 | return bio; | ||
| 758 | } | ||
| 759 | |||
| 760 | /* | ||
| 761 | * Send a packet_command to the underlying block device and | 725 | * Send a packet_command to the underlying block device and |
| 762 | * wait for completion. | 726 | * wait for completion. |
| 763 | */ | 727 | */ |
| @@ -876,13 +840,10 @@ static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd, | |||
| 876 | static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio) | 840 | static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio) |
| 877 | { | 841 | { |
| 878 | spin_lock(&pd->iosched.lock); | 842 | spin_lock(&pd->iosched.lock); |
| 879 | if (bio_data_dir(bio) == READ) { | 843 | if (bio_data_dir(bio) == READ) |
| 880 | pkt_add_list_last(bio, &pd->iosched.read_queue, | 844 | bio_list_add(&pd->iosched.read_queue, bio); |
| 881 | &pd->iosched.read_queue_tail); | 845 | else |
| 882 | } else { | 846 | bio_list_add(&pd->iosched.write_queue, bio); |
| 883 | pkt_add_list_last(bio, &pd->iosched.write_queue, | ||
| 884 | &pd->iosched.write_queue_tail); | ||
| 885 | } | ||
| 886 | spin_unlock(&pd->iosched.lock); | 847 | spin_unlock(&pd->iosched.lock); |
| 887 | 848 | ||
| 888 | atomic_set(&pd->iosched.attention, 1); | 849 | atomic_set(&pd->iosched.attention, 1); |
| @@ -917,8 +878,8 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd) | |||
| 917 | int reads_queued, writes_queued; | 878 | int reads_queued, writes_queued; |
| 918 | 879 | ||
| 919 | spin_lock(&pd->iosched.lock); | 880 | spin_lock(&pd->iosched.lock); |
| 920 | reads_queued = (pd->iosched.read_queue != NULL); | 881 | reads_queued = !bio_list_empty(&pd->iosched.read_queue); |
| 921 | writes_queued = (pd->iosched.write_queue != NULL); | 882 | writes_queued = !bio_list_empty(&pd->iosched.write_queue); |
| 922 | spin_unlock(&pd->iosched.lock); | 883 | spin_unlock(&pd->iosched.lock); |
| 923 | 884 | ||
| 924 | if (!reads_queued && !writes_queued) | 885 | if (!reads_queued && !writes_queued) |
| @@ -927,7 +888,7 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd) | |||
| 927 | if (pd->iosched.writing) { | 888 | if (pd->iosched.writing) { |
| 928 | int need_write_seek = 1; | 889 | int need_write_seek = 1; |
| 929 | spin_lock(&pd->iosched.lock); | 890 | spin_lock(&pd->iosched.lock); |
| 930 | bio = pd->iosched.write_queue; | 891 | bio = bio_list_peek(&pd->iosched.write_queue); |
| 931 | spin_unlock(&pd->iosched.lock); | 892 | spin_unlock(&pd->iosched.lock); |
| 932 | if (bio && (bio->bi_sector == pd->iosched.last_write)) | 893 | if (bio && (bio->bi_sector == pd->iosched.last_write)) |
| 933 | need_write_seek = 0; | 894 | need_write_seek = 0; |
| @@ -950,13 +911,10 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd) | |||
| 950 | } | 911 | } |
| 951 | 912 | ||
| 952 | spin_lock(&pd->iosched.lock); | 913 | spin_lock(&pd->iosched.lock); |
| 953 | if (pd->iosched.writing) { | 914 | if (pd->iosched.writing) |
| 954 | bio = pkt_get_list_first(&pd->iosched.write_queue, | 915 | bio = bio_list_pop(&pd->iosched.write_queue); |
| 955 | &pd->iosched.write_queue_tail); | 916 | else |
| 956 | } else { | 917 | bio = bio_list_pop(&pd->iosched.read_queue); |
| 957 | bio = pkt_get_list_first(&pd->iosched.read_queue, | ||
| 958 | &pd->iosched.read_queue_tail); | ||
| 959 | } | ||
| 960 | spin_unlock(&pd->iosched.lock); | 918 | spin_unlock(&pd->iosched.lock); |
| 961 | 919 | ||
| 962 | if (!bio) | 920 | if (!bio) |
| @@ -992,14 +950,14 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd) | |||
| 992 | static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q) | 950 | static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q) |
| 993 | { | 951 | { |
| 994 | if ((pd->settings.size << 9) / CD_FRAMESIZE | 952 | if ((pd->settings.size << 9) / CD_FRAMESIZE |
| 995 | <= queue_max_phys_segments(q)) { | 953 | <= queue_max_segments(q)) { |
| 996 | /* | 954 | /* |
| 997 | * The cdrom device can handle one segment/frame | 955 | * The cdrom device can handle one segment/frame |
| 998 | */ | 956 | */ |
| 999 | clear_bit(PACKET_MERGE_SEGS, &pd->flags); | 957 | clear_bit(PACKET_MERGE_SEGS, &pd->flags); |
| 1000 | return 0; | 958 | return 0; |
| 1001 | } else if ((pd->settings.size << 9) / PAGE_SIZE | 959 | } else if ((pd->settings.size << 9) / PAGE_SIZE |
| 1002 | <= queue_max_phys_segments(q)) { | 960 | <= queue_max_segments(q)) { |
| 1003 | /* | 961 | /* |
| 1004 | * We can handle this case at the expense of some extra memory | 962 | * We can handle this case at the expense of some extra memory |
| 1005 | * copies during write operations | 963 | * copies during write operations |
| @@ -1114,7 +1072,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) | |||
| 1114 | int f; | 1072 | int f; |
| 1115 | char written[PACKET_MAX_SIZE]; | 1073 | char written[PACKET_MAX_SIZE]; |
| 1116 | 1074 | ||
| 1117 | BUG_ON(!pkt->orig_bios); | 1075 | BUG_ON(bio_list_empty(&pkt->orig_bios)); |
| 1118 | 1076 | ||
| 1119 | atomic_set(&pkt->io_wait, 0); | 1077 | atomic_set(&pkt->io_wait, 0); |
| 1120 | atomic_set(&pkt->io_errors, 0); | 1078 | atomic_set(&pkt->io_errors, 0); |
| @@ -1124,7 +1082,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) | |||
| 1124 | */ | 1082 | */ |
| 1125 | memset(written, 0, sizeof(written)); | 1083 | memset(written, 0, sizeof(written)); |
| 1126 | spin_lock(&pkt->lock); | 1084 | spin_lock(&pkt->lock); |
| 1127 | for (bio = pkt->orig_bios; bio; bio = bio->bi_next) { | 1085 | bio_list_for_each(bio, &pkt->orig_bios) { |
| 1128 | int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9); | 1086 | int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9); |
| 1129 | int num_frames = bio->bi_size / CD_FRAMESIZE; | 1087 | int num_frames = bio->bi_size / CD_FRAMESIZE; |
| 1130 | pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9); | 1088 | pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9); |
| @@ -1363,7 +1321,7 @@ try_next_bio: | |||
| 1363 | break; | 1321 | break; |
| 1364 | pkt_rbtree_erase(pd, node); | 1322 | pkt_rbtree_erase(pd, node); |
| 1365 | spin_lock(&pkt->lock); | 1323 | spin_lock(&pkt->lock); |
| 1366 | pkt_add_list_last(bio, &pkt->orig_bios, &pkt->orig_bios_tail); | 1324 | bio_list_add(&pkt->orig_bios, bio); |
| 1367 | pkt->write_size += bio->bi_size / CD_FRAMESIZE; | 1325 | pkt->write_size += bio->bi_size / CD_FRAMESIZE; |
| 1368 | spin_unlock(&pkt->lock); | 1326 | spin_unlock(&pkt->lock); |
| 1369 | } | 1327 | } |
| @@ -1409,7 +1367,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) | |||
| 1409 | */ | 1367 | */ |
| 1410 | frames_write = 0; | 1368 | frames_write = 0; |
| 1411 | spin_lock(&pkt->lock); | 1369 | spin_lock(&pkt->lock); |
| 1412 | for (bio = pkt->orig_bios; bio; bio = bio->bi_next) { | 1370 | bio_list_for_each(bio, &pkt->orig_bios) { |
| 1413 | int segment = bio->bi_idx; | 1371 | int segment = bio->bi_idx; |
| 1414 | int src_offs = 0; | 1372 | int src_offs = 0; |
| 1415 | int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9); | 1373 | int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9); |
| @@ -1472,20 +1430,14 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) | |||
| 1472 | 1430 | ||
| 1473 | static void pkt_finish_packet(struct packet_data *pkt, int uptodate) | 1431 | static void pkt_finish_packet(struct packet_data *pkt, int uptodate) |
| 1474 | { | 1432 | { |
| 1475 | struct bio *bio, *next; | 1433 | struct bio *bio; |
| 1476 | 1434 | ||
| 1477 | if (!uptodate) | 1435 | if (!uptodate) |
| 1478 | pkt->cache_valid = 0; | 1436 | pkt->cache_valid = 0; |
| 1479 | 1437 | ||
| 1480 | /* Finish all bios corresponding to this packet */ | 1438 | /* Finish all bios corresponding to this packet */ |
| 1481 | bio = pkt->orig_bios; | 1439 | while ((bio = bio_list_pop(&pkt->orig_bios))) |
| 1482 | while (bio) { | ||
| 1483 | next = bio->bi_next; | ||
| 1484 | bio->bi_next = NULL; | ||
| 1485 | bio_endio(bio, uptodate ? 0 : -EIO); | 1440 | bio_endio(bio, uptodate ? 0 : -EIO); |
| 1486 | bio = next; | ||
| 1487 | } | ||
| 1488 | pkt->orig_bios = pkt->orig_bios_tail = NULL; | ||
| 1489 | } | 1441 | } |
| 1490 | 1442 | ||
| 1491 | static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt) | 1443 | static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt) |
| @@ -2360,7 +2312,7 @@ static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write) | |||
| 2360 | * even if the size is a multiple of the packet size. | 2312 | * even if the size is a multiple of the packet size. |
| 2361 | */ | 2313 | */ |
| 2362 | spin_lock_irq(q->queue_lock); | 2314 | spin_lock_irq(q->queue_lock); |
| 2363 | blk_queue_max_sectors(q, pd->settings.size); | 2315 | blk_queue_max_hw_sectors(q, pd->settings.size); |
| 2364 | spin_unlock_irq(q->queue_lock); | 2316 | spin_unlock_irq(q->queue_lock); |
| 2365 | set_bit(PACKET_WRITABLE, &pd->flags); | 2317 | set_bit(PACKET_WRITABLE, &pd->flags); |
| 2366 | } else { | 2318 | } else { |
| @@ -2567,8 +2519,7 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio) | |||
| 2567 | spin_lock(&pkt->lock); | 2519 | spin_lock(&pkt->lock); |
| 2568 | if ((pkt->state == PACKET_WAITING_STATE) || | 2520 | if ((pkt->state == PACKET_WAITING_STATE) || |
| 2569 | (pkt->state == PACKET_READ_WAIT_STATE)) { | 2521 | (pkt->state == PACKET_READ_WAIT_STATE)) { |
| 2570 | pkt_add_list_last(bio, &pkt->orig_bios, | 2522 | bio_list_add(&pkt->orig_bios, bio); |
| 2571 | &pkt->orig_bios_tail); | ||
| 2572 | pkt->write_size += bio->bi_size / CD_FRAMESIZE; | 2523 | pkt->write_size += bio->bi_size / CD_FRAMESIZE; |
| 2573 | if ((pkt->write_size >= pkt->frames) && | 2524 | if ((pkt->write_size >= pkt->frames) && |
| 2574 | (pkt->state == PACKET_WAITING_STATE)) { | 2525 | (pkt->state == PACKET_WAITING_STATE)) { |
| @@ -2662,7 +2613,7 @@ static void pkt_init_queue(struct pktcdvd_device *pd) | |||
| 2662 | 2613 | ||
| 2663 | blk_queue_make_request(q, pkt_make_request); | 2614 | blk_queue_make_request(q, pkt_make_request); |
| 2664 | blk_queue_logical_block_size(q, CD_FRAMESIZE); | 2615 | blk_queue_logical_block_size(q, CD_FRAMESIZE); |
| 2665 | blk_queue_max_sectors(q, PACKET_MAX_SECTORS); | 2616 | blk_queue_max_hw_sectors(q, PACKET_MAX_SECTORS); |
| 2666 | blk_queue_merge_bvec(q, pkt_merge_bvec); | 2617 | blk_queue_merge_bvec(q, pkt_merge_bvec); |
| 2667 | q->queuedata = pd; | 2618 | q->queuedata = pd; |
| 2668 | } | 2619 | } |
| @@ -2898,6 +2849,8 @@ static int pkt_setup_dev(dev_t dev, dev_t* pkt_dev) | |||
| 2898 | 2849 | ||
| 2899 | spin_lock_init(&pd->lock); | 2850 | spin_lock_init(&pd->lock); |
| 2900 | spin_lock_init(&pd->iosched.lock); | 2851 | spin_lock_init(&pd->iosched.lock); |
| 2852 | bio_list_init(&pd->iosched.read_queue); | ||
| 2853 | bio_list_init(&pd->iosched.write_queue); | ||
| 2901 | sprintf(pd->name, DRIVER_NAME"%d", idx); | 2854 | sprintf(pd->name, DRIVER_NAME"%d", idx); |
| 2902 | init_waitqueue_head(&pd->wqueue); | 2855 | init_waitqueue_head(&pd->wqueue); |
| 2903 | pd->bio_queue = RB_ROOT; | 2856 | pd->bio_queue = RB_ROOT; |
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c index 03a130dca8ab..bc95469d33c1 100644 --- a/drivers/block/ps3disk.c +++ b/drivers/block/ps3disk.c | |||
| @@ -474,7 +474,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev) | |||
| 474 | 474 | ||
| 475 | blk_queue_bounce_limit(queue, BLK_BOUNCE_HIGH); | 475 | blk_queue_bounce_limit(queue, BLK_BOUNCE_HIGH); |
| 476 | 476 | ||
| 477 | blk_queue_max_sectors(queue, dev->bounce_size >> 9); | 477 | blk_queue_max_hw_sectors(queue, dev->bounce_size >> 9); |
| 478 | blk_queue_segment_boundary(queue, -1UL); | 478 | blk_queue_segment_boundary(queue, -1UL); |
| 479 | blk_queue_dma_alignment(queue, dev->blk_size-1); | 479 | blk_queue_dma_alignment(queue, dev->blk_size-1); |
| 480 | blk_queue_logical_block_size(queue, dev->blk_size); | 480 | blk_queue_logical_block_size(queue, dev->blk_size); |
| @@ -482,8 +482,7 @@ static int __devinit ps3disk_probe(struct ps3_system_bus_device *_dev) | |||
| 482 | blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH, | 482 | blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH, |
| 483 | ps3disk_prepare_flush); | 483 | ps3disk_prepare_flush); |
| 484 | 484 | ||
| 485 | blk_queue_max_phys_segments(queue, -1); | 485 | blk_queue_max_segments(queue, -1); |
| 486 | blk_queue_max_hw_segments(queue, -1); | ||
| 487 | blk_queue_max_segment_size(queue, dev->bounce_size); | 486 | blk_queue_max_segment_size(queue, dev->bounce_size); |
| 488 | 487 | ||
| 489 | gendisk = alloc_disk(PS3DISK_MINORS); | 488 | gendisk = alloc_disk(PS3DISK_MINORS); |
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c index 1fb6c3135fc8..e44608229972 100644 --- a/drivers/block/ps3vram.c +++ b/drivers/block/ps3vram.c | |||
| @@ -751,10 +751,9 @@ static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev) | |||
| 751 | priv->queue = queue; | 751 | priv->queue = queue; |
| 752 | queue->queuedata = dev; | 752 | queue->queuedata = dev; |
| 753 | blk_queue_make_request(queue, ps3vram_make_request); | 753 | blk_queue_make_request(queue, ps3vram_make_request); |
| 754 | blk_queue_max_phys_segments(queue, MAX_PHYS_SEGMENTS); | 754 | blk_queue_max_segments(queue, BLK_MAX_SEGMENTS); |
| 755 | blk_queue_max_hw_segments(queue, MAX_HW_SEGMENTS); | 755 | blk_queue_max_segment_size(queue, BLK_MAX_SEGMENT_SIZE); |
| 756 | blk_queue_max_segment_size(queue, MAX_SEGMENT_SIZE); | 756 | blk_queue_max_hw_sectors(queue, BLK_SAFE_MAX_SECTORS); |
| 757 | blk_queue_max_sectors(queue, SAFE_MAX_SECTORS); | ||
| 758 | 757 | ||
| 759 | gendisk = alloc_disk(1); | 758 | gendisk = alloc_disk(1); |
| 760 | if (!gendisk) { | 759 | if (!gendisk) { |
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index 411f064760b4..48e8fee9f2d4 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c | |||
| @@ -691,9 +691,8 @@ static int probe_disk(struct vdc_port *port) | |||
| 691 | 691 | ||
| 692 | port->disk = g; | 692 | port->disk = g; |
| 693 | 693 | ||
| 694 | blk_queue_max_hw_segments(q, port->ring_cookies); | 694 | blk_queue_max_segments(q, port->ring_cookies); |
| 695 | blk_queue_max_phys_segments(q, port->ring_cookies); | 695 | blk_queue_max_hw_sectors(q, port->max_xfer_size); |
| 696 | blk_queue_max_sectors(q, port->max_xfer_size); | ||
| 697 | g->major = vdc_major; | 696 | g->major = vdc_major; |
| 698 | g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT; | 697 | g->first_minor = port->vio.vdev->dev_no << PARTITION_SHIFT; |
| 699 | strcpy(g->disk_name, port->disk_name); | 698 | strcpy(g->disk_name, port->disk_name); |
diff --git a/drivers/block/sx8.c b/drivers/block/sx8.c index a7c4184f4a63..b70f0fca9a42 100644 --- a/drivers/block/sx8.c +++ b/drivers/block/sx8.c | |||
| @@ -409,7 +409,7 @@ static int carm_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) | |||
| 409 | static void carm_remove_one (struct pci_dev *pdev); | 409 | static void carm_remove_one (struct pci_dev *pdev); |
| 410 | static int carm_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo); | 410 | static int carm_bdev_getgeo(struct block_device *bdev, struct hd_geometry *geo); |
| 411 | 411 | ||
| 412 | static struct pci_device_id carm_pci_tbl[] = { | 412 | static const struct pci_device_id carm_pci_tbl[] = { |
| 413 | { PCI_VENDOR_ID_PROMISE, 0x8000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, | 413 | { PCI_VENDOR_ID_PROMISE, 0x8000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, |
| 414 | { PCI_VENDOR_ID_PROMISE, 0x8002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, | 414 | { PCI_VENDOR_ID_PROMISE, 0x8002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, |
| 415 | { } /* terminate list */ | 415 | { } /* terminate list */ |
| @@ -1518,8 +1518,7 @@ static int carm_init_disks(struct carm_host *host) | |||
| 1518 | break; | 1518 | break; |
| 1519 | } | 1519 | } |
| 1520 | disk->queue = q; | 1520 | disk->queue = q; |
| 1521 | blk_queue_max_hw_segments(q, CARM_MAX_REQ_SG); | 1521 | blk_queue_max_segments(q, CARM_MAX_REQ_SG); |
| 1522 | blk_queue_max_phys_segments(q, CARM_MAX_REQ_SG); | ||
| 1523 | blk_queue_segment_boundary(q, CARM_SG_BOUNDARY); | 1522 | blk_queue_segment_boundary(q, CARM_SG_BOUNDARY); |
| 1524 | 1523 | ||
| 1525 | q->queuedata = port; | 1524 | q->queuedata = port; |
diff --git a/drivers/block/ub.c b/drivers/block/ub.c index c739b203fe91..2e889838e819 100644 --- a/drivers/block/ub.c +++ b/drivers/block/ub.c | |||
| @@ -393,7 +393,7 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum); | |||
| 393 | #define ub_usb_ids usb_storage_usb_ids | 393 | #define ub_usb_ids usb_storage_usb_ids |
| 394 | #else | 394 | #else |
| 395 | 395 | ||
| 396 | static struct usb_device_id ub_usb_ids[] = { | 396 | static const struct usb_device_id ub_usb_ids[] = { |
| 397 | { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_BULK) }, | 397 | { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, US_SC_SCSI, US_PR_BULK) }, |
| 398 | { } | 398 | { } |
| 399 | }; | 399 | }; |
| @@ -2320,10 +2320,9 @@ static int ub_probe_lun(struct ub_dev *sc, int lnum) | |||
| 2320 | disk->queue = q; | 2320 | disk->queue = q; |
| 2321 | 2321 | ||
| 2322 | blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); | 2322 | blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); |
| 2323 | blk_queue_max_hw_segments(q, UB_MAX_REQ_SG); | 2323 | blk_queue_max_segments(q, UB_MAX_REQ_SG); |
| 2324 | blk_queue_max_phys_segments(q, UB_MAX_REQ_SG); | ||
| 2325 | blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */ | 2324 | blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */ |
| 2326 | blk_queue_max_sectors(q, UB_MAX_SECTORS); | 2325 | blk_queue_max_hw_sectors(q, UB_MAX_SECTORS); |
| 2327 | blk_queue_logical_block_size(q, lun->capacity.bsize); | 2326 | blk_queue_logical_block_size(q, lun->capacity.bsize); |
| 2328 | 2327 | ||
| 2329 | lun->disk = disk; | 2328 | lun->disk = disk; |
diff --git a/drivers/block/viodasd.c b/drivers/block/viodasd.c index 1b3def1e8591..788d93882ab9 100644 --- a/drivers/block/viodasd.c +++ b/drivers/block/viodasd.c | |||
| @@ -462,9 +462,8 @@ retry: | |||
| 462 | } | 462 | } |
| 463 | 463 | ||
| 464 | d->disk = g; | 464 | d->disk = g; |
| 465 | blk_queue_max_hw_segments(q, VIOMAXBLOCKDMA); | 465 | blk_queue_max_segments(q, VIOMAXBLOCKDMA); |
| 466 | blk_queue_max_phys_segments(q, VIOMAXBLOCKDMA); | 466 | blk_queue_max_hw_sectors(q, VIODASD_MAXSECTORS); |
| 467 | blk_queue_max_sectors(q, VIODASD_MAXSECTORS); | ||
| 468 | g->major = VIODASD_MAJOR; | 467 | g->major = VIODASD_MAJOR; |
| 469 | g->first_minor = dev_no << PARTITION_SHIFT; | 468 | g->first_minor = dev_no << PARTITION_SHIFT; |
| 470 | if (dev_no >= 26) | 469 | if (dev_no >= 26) |
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 7eff828b2117..3c64af05fa82 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c | |||
| @@ -435,7 +435,7 @@ static void __devexit virtblk_remove(struct virtio_device *vdev) | |||
| 435 | kfree(vblk); | 435 | kfree(vblk); |
| 436 | } | 436 | } |
| 437 | 437 | ||
| 438 | static struct virtio_device_id id_table[] = { | 438 | static const struct virtio_device_id id_table[] = { |
| 439 | { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID }, | 439 | { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID }, |
| 440 | { 0 }, | 440 | { 0 }, |
| 441 | }; | 441 | }; |
diff --git a/drivers/block/xd.c b/drivers/block/xd.c index d1fd032e7514..1a325fb05c92 100644 --- a/drivers/block/xd.c +++ b/drivers/block/xd.c | |||
| @@ -242,7 +242,7 @@ static int __init xd_init(void) | |||
| 242 | } | 242 | } |
| 243 | 243 | ||
| 244 | /* xd_maxsectors depends on controller - so set after detection */ | 244 | /* xd_maxsectors depends on controller - so set after detection */ |
| 245 | blk_queue_max_sectors(xd_queue, xd_maxsectors); | 245 | blk_queue_max_hw_sectors(xd_queue, xd_maxsectors); |
| 246 | 246 | ||
| 247 | for (i = 0; i < xd_drives; i++) | 247 | for (i = 0; i < xd_drives; i++) |
| 248 | add_disk(xd_gendisk[i]); | 248 | add_disk(xd_gendisk[i]); |
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 05a31e55d278..9c09694b2520 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c | |||
| @@ -346,15 +346,14 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size) | |||
| 346 | 346 | ||
| 347 | /* Hard sector size and max sectors impersonate the equiv. hardware. */ | 347 | /* Hard sector size and max sectors impersonate the equiv. hardware. */ |
| 348 | blk_queue_logical_block_size(rq, sector_size); | 348 | blk_queue_logical_block_size(rq, sector_size); |
| 349 | blk_queue_max_sectors(rq, 512); | 349 | blk_queue_max_hw_sectors(rq, 512); |
| 350 | 350 | ||
| 351 | /* Each segment in a request is up to an aligned page in size. */ | 351 | /* Each segment in a request is up to an aligned page in size. */ |
| 352 | blk_queue_segment_boundary(rq, PAGE_SIZE - 1); | 352 | blk_queue_segment_boundary(rq, PAGE_SIZE - 1); |
| 353 | blk_queue_max_segment_size(rq, PAGE_SIZE); | 353 | blk_queue_max_segment_size(rq, PAGE_SIZE); |
| 354 | 354 | ||
| 355 | /* Ensure a merged request will fit in a single I/O ring slot. */ | 355 | /* Ensure a merged request will fit in a single I/O ring slot. */ |
| 356 | blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); | 356 | blk_queue_max_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); |
| 357 | blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST); | ||
| 358 | 357 | ||
| 359 | /* Make sure buffer addresses are sector-aligned. */ | 358 | /* Make sure buffer addresses are sector-aligned. */ |
| 360 | blk_queue_dma_alignment(rq, 511); | 359 | blk_queue_dma_alignment(rq, 511); |
| @@ -1050,7 +1049,7 @@ static const struct block_device_operations xlvbd_block_fops = | |||
| 1050 | }; | 1049 | }; |
| 1051 | 1050 | ||
| 1052 | 1051 | ||
| 1053 | static struct xenbus_device_id blkfront_ids[] = { | 1052 | static const struct xenbus_device_id blkfront_ids[] = { |
| 1054 | { "vbd" }, | 1053 | { "vbd" }, |
| 1055 | { "" } | 1054 | { "" } |
| 1056 | }; | 1055 | }; |
diff --git a/drivers/block/xsysace.c b/drivers/block/xsysace.c index e5c5415eb45e..e1c95e208a66 100644 --- a/drivers/block/xsysace.c +++ b/drivers/block/xsysace.c | |||
| @@ -1227,7 +1227,7 @@ static int __devexit ace_of_remove(struct of_device *op) | |||
| 1227 | } | 1227 | } |
| 1228 | 1228 | ||
| 1229 | /* Match table for of_platform binding */ | 1229 | /* Match table for of_platform binding */ |
| 1230 | static struct of_device_id ace_of_match[] __devinitdata = { | 1230 | static const struct of_device_id ace_of_match[] __devinitconst = { |
| 1231 | { .compatible = "xlnx,opb-sysace-1.00.b", }, | 1231 | { .compatible = "xlnx,opb-sysace-1.00.b", }, |
| 1232 | { .compatible = "xlnx,opb-sysace-1.00.c", }, | 1232 | { .compatible = "xlnx,opb-sysace-1.00.c", }, |
| 1233 | { .compatible = "xlnx,xps-sysace-1.00.a", }, | 1233 | { .compatible = "xlnx,xps-sysace-1.00.a", }, |
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c index e789e6c9a422..03c71f7698cb 100644 --- a/drivers/cdrom/gdrom.c +++ b/drivers/cdrom/gdrom.c | |||
| @@ -741,7 +741,7 @@ static int __devinit probe_gdrom_setupqueue(void) | |||
| 741 | { | 741 | { |
| 742 | blk_queue_logical_block_size(gd.gdrom_rq, GDROM_HARD_SECTOR); | 742 | blk_queue_logical_block_size(gd.gdrom_rq, GDROM_HARD_SECTOR); |
| 743 | /* using DMA so memory will need to be contiguous */ | 743 | /* using DMA so memory will need to be contiguous */ |
| 744 | blk_queue_max_hw_segments(gd.gdrom_rq, 1); | 744 | blk_queue_max_segments(gd.gdrom_rq, 1); |
| 745 | /* set a large max size to get most from DMA */ | 745 | /* set a large max size to get most from DMA */ |
| 746 | blk_queue_max_segment_size(gd.gdrom_rq, 0x40000); | 746 | blk_queue_max_segment_size(gd.gdrom_rq, 0x40000); |
| 747 | gd.disk->queue = gd.gdrom_rq; | 747 | gd.disk->queue = gd.gdrom_rq; |
diff --git a/drivers/cdrom/viocd.c b/drivers/cdrom/viocd.c index 57ca69e0ac55..cc435be0bc13 100644 --- a/drivers/cdrom/viocd.c +++ b/drivers/cdrom/viocd.c | |||
| @@ -616,9 +616,8 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
| 616 | gendisk->first_minor = deviceno; | 616 | gendisk->first_minor = deviceno; |
| 617 | strncpy(gendisk->disk_name, c->name, | 617 | strncpy(gendisk->disk_name, c->name, |
| 618 | sizeof(gendisk->disk_name)); | 618 | sizeof(gendisk->disk_name)); |
| 619 | blk_queue_max_hw_segments(q, 1); | 619 | blk_queue_max_segments(q, 1); |
| 620 | blk_queue_max_phys_segments(q, 1); | 620 | blk_queue_max_hw_sectors(q, 4096 / 512); |
| 621 | blk_queue_max_sectors(q, 4096 / 512); | ||
| 622 | gendisk->queue = q; | 621 | gendisk->queue = q; |
| 623 | gendisk->fops = &viocd_fops; | 622 | gendisk->fops = &viocd_fops; |
| 624 | gendisk->flags = GENHD_FL_CD|GENHD_FL_REMOVABLE; | 623 | gendisk->flags = GENHD_FL_CD|GENHD_FL_REMOVABLE; |
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index d485cdd8cbac..70fef40cd22f 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c | |||
| @@ -1571,7 +1571,7 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev) | |||
| 1571 | sdev->start_stop_pwr_cond = 1; | 1571 | sdev->start_stop_pwr_cond = 1; |
| 1572 | 1572 | ||
| 1573 | if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS) | 1573 | if (lu->tgt->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS) |
| 1574 | blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512); | 1574 | blk_queue_max_hw_sectors(sdev->request_queue, 128 * 1024 / 512); |
| 1575 | 1575 | ||
| 1576 | blk_queue_max_segment_size(sdev->request_queue, SBP2_MAX_SEG_SIZE); | 1576 | blk_queue_max_segment_size(sdev->request_queue, SBP2_MAX_SEG_SIZE); |
| 1577 | 1577 | ||
diff --git a/drivers/ide/ide-disk.c b/drivers/ide/ide-disk.c index 7f878017b736..3b128dce9c3a 100644 --- a/drivers/ide/ide-disk.c +++ b/drivers/ide/ide-disk.c | |||
| @@ -679,7 +679,7 @@ static void ide_disk_setup(ide_drive_t *drive) | |||
| 679 | if (max_s > hwif->rqsize) | 679 | if (max_s > hwif->rqsize) |
| 680 | max_s = hwif->rqsize; | 680 | max_s = hwif->rqsize; |
| 681 | 681 | ||
| 682 | blk_queue_max_sectors(q, max_s); | 682 | blk_queue_max_hw_sectors(q, max_s); |
| 683 | } | 683 | } |
| 684 | 684 | ||
| 685 | printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name, | 685 | printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name, |
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c index fefbdfc8db06..efd907623469 100644 --- a/drivers/ide/ide-floppy.c +++ b/drivers/ide/ide-floppy.c | |||
| @@ -486,7 +486,7 @@ static void ide_floppy_setup(ide_drive_t *drive) | |||
| 486 | drive->atapi_flags |= IDE_AFLAG_ZIP_DRIVE; | 486 | drive->atapi_flags |= IDE_AFLAG_ZIP_DRIVE; |
| 487 | /* This value will be visible in the /proc/ide/hdx/settings */ | 487 | /* This value will be visible in the /proc/ide/hdx/settings */ |
| 488 | drive->pc_delay = IDEFLOPPY_PC_DELAY; | 488 | drive->pc_delay = IDEFLOPPY_PC_DELAY; |
| 489 | blk_queue_max_sectors(drive->queue, 64); | 489 | blk_queue_max_hw_sectors(drive->queue, 64); |
| 490 | } | 490 | } |
| 491 | 491 | ||
| 492 | /* | 492 | /* |
| @@ -494,7 +494,7 @@ static void ide_floppy_setup(ide_drive_t *drive) | |||
| 494 | * nasty clicking noises without it, so please don't remove this. | 494 | * nasty clicking noises without it, so please don't remove this. |
| 495 | */ | 495 | */ |
| 496 | if (strncmp((char *)&id[ATA_ID_PROD], "IOMEGA Clik!", 11) == 0) { | 496 | if (strncmp((char *)&id[ATA_ID_PROD], "IOMEGA Clik!", 11) == 0) { |
| 497 | blk_queue_max_sectors(drive->queue, 64); | 497 | blk_queue_max_hw_sectors(drive->queue, 64); |
| 498 | drive->atapi_flags |= IDE_AFLAG_CLIK_DRIVE; | 498 | drive->atapi_flags |= IDE_AFLAG_CLIK_DRIVE; |
| 499 | /* IOMEGA Clik! drives do not support lock/unlock commands */ | 499 | /* IOMEGA Clik! drives do not support lock/unlock commands */ |
| 500 | drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING; | 500 | drive->dev_flags &= ~IDE_DFLAG_DOORLOCKING; |
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 4d76ba473097..f8c1ae6ad74c 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c | |||
| @@ -774,7 +774,7 @@ static int ide_init_queue(ide_drive_t *drive) | |||
| 774 | 774 | ||
| 775 | if (hwif->rqsize < max_sectors) | 775 | if (hwif->rqsize < max_sectors) |
| 776 | max_sectors = hwif->rqsize; | 776 | max_sectors = hwif->rqsize; |
| 777 | blk_queue_max_sectors(q, max_sectors); | 777 | blk_queue_max_hw_sectors(q, max_sectors); |
| 778 | 778 | ||
| 779 | #ifdef CONFIG_PCI | 779 | #ifdef CONFIG_PCI |
| 780 | /* When we have an IOMMU, we may have a problem where pci_map_sg() | 780 | /* When we have an IOMMU, we may have a problem where pci_map_sg() |
| @@ -790,8 +790,7 @@ static int ide_init_queue(ide_drive_t *drive) | |||
| 790 | max_sg_entries >>= 1; | 790 | max_sg_entries >>= 1; |
| 791 | #endif /* CONFIG_PCI */ | 791 | #endif /* CONFIG_PCI */ |
| 792 | 792 | ||
| 793 | blk_queue_max_hw_segments(q, max_sg_entries); | 793 | blk_queue_max_segments(q, max_sg_entries); |
| 794 | blk_queue_max_phys_segments(q, max_sg_entries); | ||
| 795 | 794 | ||
| 796 | /* assign drive queue */ | 795 | /* assign drive queue */ |
| 797 | drive->queue = q; | 796 | drive->queue = q; |
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c index f199896c4113..c88696a6cf8a 100644 --- a/drivers/ieee1394/sbp2.c +++ b/drivers/ieee1394/sbp2.c | |||
| @@ -2020,7 +2020,7 @@ static int sbp2scsi_slave_configure(struct scsi_device *sdev) | |||
| 2020 | if (lu->workarounds & SBP2_WORKAROUND_POWER_CONDITION) | 2020 | if (lu->workarounds & SBP2_WORKAROUND_POWER_CONDITION) |
| 2021 | sdev->start_stop_pwr_cond = 1; | 2021 | sdev->start_stop_pwr_cond = 1; |
| 2022 | if (lu->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS) | 2022 | if (lu->workarounds & SBP2_WORKAROUND_128K_MAX_TRANS) |
| 2023 | blk_queue_max_sectors(sdev->request_queue, 128 * 1024 / 512); | 2023 | blk_queue_max_hw_sectors(sdev->request_queue, 128 * 1024 / 512); |
| 2024 | 2024 | ||
| 2025 | blk_queue_max_segment_size(sdev->request_queue, SBP2_MAX_SEG_SIZE); | 2025 | blk_queue_max_segment_size(sdev->request_queue, SBP2_MAX_SEG_SIZE); |
| 2026 | return 0; | 2026 | return 0; |
diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 00435bd20699..af2d39d603c7 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c | |||
| @@ -177,7 +177,7 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks) | |||
| 177 | */ | 177 | */ |
| 178 | if (rdev->bdev->bd_disk->queue->merge_bvec_fn && | 178 | if (rdev->bdev->bd_disk->queue->merge_bvec_fn && |
| 179 | queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) | 179 | queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) |
| 180 | blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); | 180 | blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); |
| 181 | 181 | ||
| 182 | conf->array_sectors += rdev->sectors; | 182 | conf->array_sectors += rdev->sectors; |
| 183 | cnt++; | 183 | cnt++; |
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 32a662fc55c9..4b323f45ad74 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
| @@ -308,7 +308,7 @@ static int multipath_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) | |||
| 308 | */ | 308 | */ |
| 309 | if (q->merge_bvec_fn && | 309 | if (q->merge_bvec_fn && |
| 310 | queue_max_sectors(q) > (PAGE_SIZE>>9)) | 310 | queue_max_sectors(q) > (PAGE_SIZE>>9)) |
| 311 | blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); | 311 | blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); |
| 312 | 312 | ||
| 313 | conf->working_disks++; | 313 | conf->working_disks++; |
| 314 | mddev->degraded--; | 314 | mddev->degraded--; |
| @@ -478,7 +478,7 @@ static int multipath_run (mddev_t *mddev) | |||
| 478 | * a merge_bvec_fn to be involved in multipath */ | 478 | * a merge_bvec_fn to be involved in multipath */ |
| 479 | if (rdev->bdev->bd_disk->queue->merge_bvec_fn && | 479 | if (rdev->bdev->bd_disk->queue->merge_bvec_fn && |
| 480 | queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) | 480 | queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) |
| 481 | blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); | 481 | blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); |
| 482 | 482 | ||
| 483 | if (!test_bit(Faulty, &rdev->flags)) | 483 | if (!test_bit(Faulty, &rdev->flags)) |
| 484 | conf->working_disks++; | 484 | conf->working_disks++; |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 77605cdceaf1..a1f7147b757f 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
| @@ -182,7 +182,7 @@ static int create_strip_zones(mddev_t *mddev) | |||
| 182 | 182 | ||
| 183 | if (rdev1->bdev->bd_disk->queue->merge_bvec_fn && | 183 | if (rdev1->bdev->bd_disk->queue->merge_bvec_fn && |
| 184 | queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) | 184 | queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) |
| 185 | blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); | 185 | blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); |
| 186 | 186 | ||
| 187 | if (!smallest || (rdev1->sectors < smallest->sectors)) | 187 | if (!smallest || (rdev1->sectors < smallest->sectors)) |
| 188 | smallest = rdev1; | 188 | smallest = rdev1; |
| @@ -325,7 +325,7 @@ static int raid0_run(mddev_t *mddev) | |||
| 325 | } | 325 | } |
| 326 | if (md_check_no_bitmap(mddev)) | 326 | if (md_check_no_bitmap(mddev)) |
| 327 | return -EINVAL; | 327 | return -EINVAL; |
| 328 | blk_queue_max_sectors(mddev->queue, mddev->chunk_sectors); | 328 | blk_queue_max_hw_sectors(mddev->queue, mddev->chunk_sectors); |
| 329 | mddev->queue->queue_lock = &mddev->queue->__queue_lock; | 329 | mddev->queue->queue_lock = &mddev->queue->__queue_lock; |
| 330 | 330 | ||
| 331 | ret = create_strip_zones(mddev); | 331 | ret = create_strip_zones(mddev); |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 859bd3ffe435..5a06122abd3b 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
| @@ -1158,7 +1158,7 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) | |||
| 1158 | */ | 1158 | */ |
| 1159 | if (rdev->bdev->bd_disk->queue->merge_bvec_fn && | 1159 | if (rdev->bdev->bd_disk->queue->merge_bvec_fn && |
| 1160 | queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) | 1160 | queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) |
| 1161 | blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); | 1161 | blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); |
| 1162 | 1162 | ||
| 1163 | p->head_position = 0; | 1163 | p->head_position = 0; |
| 1164 | rdev->raid_disk = mirror; | 1164 | rdev->raid_disk = mirror; |
| @@ -2103,7 +2103,7 @@ static int run(mddev_t *mddev) | |||
| 2103 | */ | 2103 | */ |
| 2104 | if (rdev->bdev->bd_disk->queue->merge_bvec_fn && | 2104 | if (rdev->bdev->bd_disk->queue->merge_bvec_fn && |
| 2105 | queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) | 2105 | queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) |
| 2106 | blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); | 2106 | blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); |
| 2107 | } | 2107 | } |
| 2108 | 2108 | ||
| 2109 | mddev->degraded = 0; | 2109 | mddev->degraded = 0; |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index d119b7b75e71..7584f9ab9bcf 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
| @@ -1161,7 +1161,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) | |||
| 1161 | */ | 1161 | */ |
| 1162 | if (rdev->bdev->bd_disk->queue->merge_bvec_fn && | 1162 | if (rdev->bdev->bd_disk->queue->merge_bvec_fn && |
| 1163 | queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) | 1163 | queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) |
| 1164 | blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); | 1164 | blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); |
| 1165 | 1165 | ||
| 1166 | p->head_position = 0; | 1166 | p->head_position = 0; |
| 1167 | rdev->raid_disk = mirror; | 1167 | rdev->raid_disk = mirror; |
| @@ -2260,7 +2260,7 @@ static int run(mddev_t *mddev) | |||
| 2260 | */ | 2260 | */ |
| 2261 | if (rdev->bdev->bd_disk->queue->merge_bvec_fn && | 2261 | if (rdev->bdev->bd_disk->queue->merge_bvec_fn && |
| 2262 | queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) | 2262 | queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) |
| 2263 | blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); | 2263 | blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9); |
| 2264 | 2264 | ||
| 2265 | disk->head_position = 0; | 2265 | disk->head_position = 0; |
| 2266 | } | 2266 | } |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index ceb24afdc147..509c8f3dd9a5 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
| @@ -3739,7 +3739,7 @@ static int bio_fits_rdev(struct bio *bi) | |||
| 3739 | if ((bi->bi_size>>9) > queue_max_sectors(q)) | 3739 | if ((bi->bi_size>>9) > queue_max_sectors(q)) |
| 3740 | return 0; | 3740 | return 0; |
| 3741 | blk_recount_segments(q, bi); | 3741 | blk_recount_segments(q, bi); |
| 3742 | if (bi->bi_phys_segments > queue_max_phys_segments(q)) | 3742 | if (bi->bi_phys_segments > queue_max_segments(q)) |
| 3743 | return 0; | 3743 | return 0; |
| 3744 | 3744 | ||
| 3745 | if (q->merge_bvec_fn) | 3745 | if (q->merge_bvec_fn) |
diff --git a/drivers/memstick/core/mspro_block.c b/drivers/memstick/core/mspro_block.c index bd83fa0a4970..972b87069d55 100644 --- a/drivers/memstick/core/mspro_block.c +++ b/drivers/memstick/core/mspro_block.c | |||
| @@ -1226,9 +1226,8 @@ static int mspro_block_init_disk(struct memstick_dev *card) | |||
| 1226 | blk_queue_prep_rq(msb->queue, mspro_block_prepare_req); | 1226 | blk_queue_prep_rq(msb->queue, mspro_block_prepare_req); |
| 1227 | 1227 | ||
| 1228 | blk_queue_bounce_limit(msb->queue, limit); | 1228 | blk_queue_bounce_limit(msb->queue, limit); |
| 1229 | blk_queue_max_sectors(msb->queue, MSPRO_BLOCK_MAX_PAGES); | 1229 | blk_queue_max_hw_sectors(msb->queue, MSPRO_BLOCK_MAX_PAGES); |
| 1230 | blk_queue_max_phys_segments(msb->queue, MSPRO_BLOCK_MAX_SEGS); | 1230 | blk_queue_max_segments(msb->queue, MSPRO_BLOCK_MAX_SEGS); |
| 1231 | blk_queue_max_hw_segments(msb->queue, MSPRO_BLOCK_MAX_SEGS); | ||
| 1232 | blk_queue_max_segment_size(msb->queue, | 1231 | blk_queue_max_segment_size(msb->queue, |
| 1233 | MSPRO_BLOCK_MAX_PAGES * msb->page_size); | 1232 | MSPRO_BLOCK_MAX_PAGES * msb->page_size); |
| 1234 | 1233 | ||
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c index e39986a78273..2658b1484a2c 100644 --- a/drivers/message/i2o/i2o_block.c +++ b/drivers/message/i2o/i2o_block.c | |||
| @@ -1065,9 +1065,8 @@ static int i2o_block_probe(struct device *dev) | |||
| 1065 | queue = gd->queue; | 1065 | queue = gd->queue; |
| 1066 | queue->queuedata = i2o_blk_dev; | 1066 | queue->queuedata = i2o_blk_dev; |
| 1067 | 1067 | ||
| 1068 | blk_queue_max_phys_segments(queue, I2O_MAX_PHYS_SEGMENTS); | 1068 | blk_queue_max_hw_sectors(queue, max_sectors); |
| 1069 | blk_queue_max_sectors(queue, max_sectors); | 1069 | blk_queue_max_segments(queue, i2o_sg_tablesize(c, body_size)); |
| 1070 | blk_queue_max_hw_segments(queue, i2o_sg_tablesize(c, body_size)); | ||
| 1071 | 1070 | ||
| 1072 | osm_debug("max sectors = %d\n", queue->max_sectors); | 1071 | osm_debug("max sectors = %d\n", queue->max_sectors); |
| 1073 | osm_debug("phys segments = %d\n", queue->max_phys_segments); | 1072 | osm_debug("phys segments = %d\n", queue->max_phys_segments); |
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index c5a7a855f4b1..381fe032caa1 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c | |||
| @@ -154,9 +154,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock | |||
| 154 | 154 | ||
| 155 | if (mq->bounce_buf) { | 155 | if (mq->bounce_buf) { |
| 156 | blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); | 156 | blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); |
| 157 | blk_queue_max_sectors(mq->queue, bouncesz / 512); | 157 | blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); |
| 158 | blk_queue_max_phys_segments(mq->queue, bouncesz / 512); | 158 | blk_queue_max_segments(mq->queue, bouncesz / 512); |
| 159 | blk_queue_max_hw_segments(mq->queue, bouncesz / 512); | ||
| 160 | blk_queue_max_segment_size(mq->queue, bouncesz); | 159 | blk_queue_max_segment_size(mq->queue, bouncesz); |
| 161 | 160 | ||
| 162 | mq->sg = kmalloc(sizeof(struct scatterlist), | 161 | mq->sg = kmalloc(sizeof(struct scatterlist), |
| @@ -180,10 +179,9 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock | |||
| 180 | 179 | ||
| 181 | if (!mq->bounce_buf) { | 180 | if (!mq->bounce_buf) { |
| 182 | blk_queue_bounce_limit(mq->queue, limit); | 181 | blk_queue_bounce_limit(mq->queue, limit); |
| 183 | blk_queue_max_sectors(mq->queue, | 182 | blk_queue_max_hw_sectors(mq->queue, |
| 184 | min(host->max_blk_count, host->max_req_size / 512)); | 183 | min(host->max_blk_count, host->max_req_size / 512)); |
| 185 | blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); | 184 | blk_queue_max_segments(mq->queue, host->max_hw_segs); |
| 186 | blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); | ||
| 187 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); | 185 | blk_queue_max_segment_size(mq->queue, host->max_seg_size); |
| 188 | 186 | ||
| 189 | mq->sg = kmalloc(sizeof(struct scatterlist) * | 187 | mq->sg = kmalloc(sizeof(struct scatterlist) * |
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 9ab1ae40565f..4951aa82e9f5 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
| @@ -2139,9 +2139,8 @@ static void dasd_setup_queue(struct dasd_block *block) | |||
| 2139 | 2139 | ||
| 2140 | blk_queue_logical_block_size(block->request_queue, block->bp_block); | 2140 | blk_queue_logical_block_size(block->request_queue, block->bp_block); |
| 2141 | max = block->base->discipline->max_blocks << block->s2b_shift; | 2141 | max = block->base->discipline->max_blocks << block->s2b_shift; |
| 2142 | blk_queue_max_sectors(block->request_queue, max); | 2142 | blk_queue_max_hw_sectors(block->request_queue, max); |
| 2143 | blk_queue_max_phys_segments(block->request_queue, -1L); | 2143 | blk_queue_max_segments(block->request_queue, -1L); |
| 2144 | blk_queue_max_hw_segments(block->request_queue, -1L); | ||
| 2145 | /* with page sized segments we can translate each segement into | 2144 | /* with page sized segments we can translate each segement into |
| 2146 | * one idaw/tidaw | 2145 | * one idaw/tidaw |
| 2147 | */ | 2146 | */ |
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c index 8d3d720737da..097da8ce6be6 100644 --- a/drivers/s390/char/tape_block.c +++ b/drivers/s390/char/tape_block.c | |||
| @@ -222,9 +222,8 @@ tapeblock_setup_device(struct tape_device * device) | |||
| 222 | goto cleanup_queue; | 222 | goto cleanup_queue; |
| 223 | 223 | ||
| 224 | blk_queue_logical_block_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE); | 224 | blk_queue_logical_block_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE); |
| 225 | blk_queue_max_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC); | 225 | blk_queue_max_hw_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC); |
| 226 | blk_queue_max_phys_segments(blkdat->request_queue, -1L); | 226 | blk_queue_max_segments(blkdat->request_queue, -1L); |
| 227 | blk_queue_max_hw_segments(blkdat->request_queue, -1L); | ||
| 228 | blk_queue_max_segment_size(blkdat->request_queue, -1L); | 227 | blk_queue_max_segment_size(blkdat->request_queue, -1L); |
| 229 | blk_queue_segment_boundary(blkdat->request_queue, -1L); | 228 | blk_queue_segment_boundary(blkdat->request_queue, -1L); |
| 230 | 229 | ||
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 87b536a97cb4..732f6d35b4a8 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c | |||
| @@ -4195,7 +4195,7 @@ static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt) | |||
| 4195 | if (tgt->service_parms.class3_parms[0] & 0x80000000) | 4195 | if (tgt->service_parms.class3_parms[0] & 0x80000000) |
| 4196 | rport->supported_classes |= FC_COS_CLASS3; | 4196 | rport->supported_classes |= FC_COS_CLASS3; |
| 4197 | if (rport->rqst_q) | 4197 | if (rport->rqst_q) |
| 4198 | blk_queue_max_hw_segments(rport->rqst_q, 1); | 4198 | blk_queue_max_segments(rport->rqst_q, 1); |
| 4199 | } else | 4199 | } else |
| 4200 | tgt_dbg(tgt, "rport add failed\n"); | 4200 | tgt_dbg(tgt, "rport add failed\n"); |
| 4201 | spin_unlock_irqrestore(vhost->host->host_lock, flags); | 4201 | spin_unlock_irqrestore(vhost->host->host_lock, flags); |
| @@ -4669,7 +4669,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id) | |||
| 4669 | } | 4669 | } |
| 4670 | 4670 | ||
| 4671 | if (shost_to_fc_host(shost)->rqst_q) | 4671 | if (shost_to_fc_host(shost)->rqst_q) |
| 4672 | blk_queue_max_hw_segments(shost_to_fc_host(shost)->rqst_q, 1); | 4672 | blk_queue_max_segments(shost_to_fc_host(shost)->rqst_q, 1); |
| 4673 | dev_set_drvdata(dev, vhost); | 4673 | dev_set_drvdata(dev, vhost); |
| 4674 | spin_lock(&ibmvfc_driver_lock); | 4674 | spin_lock(&ibmvfc_driver_lock); |
| 4675 | list_add_tail(&vhost->queue, &ibmvfc_head); | 4675 | list_add_tail(&vhost->queue, &ibmvfc_head); |
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 9e52d16c7c39..032f0d0e6cb4 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
| @@ -3674,7 +3674,7 @@ static int ipr_slave_configure(struct scsi_device *sdev) | |||
| 3674 | if (ipr_is_vset_device(res)) { | 3674 | if (ipr_is_vset_device(res)) { |
| 3675 | blk_queue_rq_timeout(sdev->request_queue, | 3675 | blk_queue_rq_timeout(sdev->request_queue, |
| 3676 | IPR_VSET_RW_TIMEOUT); | 3676 | IPR_VSET_RW_TIMEOUT); |
| 3677 | blk_queue_max_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); | 3677 | blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); |
| 3678 | } | 3678 | } |
| 3679 | if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)) | 3679 | if (ipr_is_vset_device(res) || ipr_is_scsi_disk(res)) |
| 3680 | sdev->allow_restart = 1; | 3680 | sdev->allow_restart = 1; |
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index b6f1ef954af1..9b1c1433c26b 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c | |||
| @@ -235,7 +235,7 @@ static int pmcraid_slave_configure(struct scsi_device *scsi_dev) | |||
| 235 | scsi_dev->allow_restart = 1; | 235 | scsi_dev->allow_restart = 1; |
| 236 | blk_queue_rq_timeout(scsi_dev->request_queue, | 236 | blk_queue_rq_timeout(scsi_dev->request_queue, |
| 237 | PMCRAID_VSET_IO_TIMEOUT); | 237 | PMCRAID_VSET_IO_TIMEOUT); |
| 238 | blk_queue_max_sectors(scsi_dev->request_queue, | 238 | blk_queue_max_hw_sectors(scsi_dev->request_queue, |
| 239 | PMCRAID_VSET_MAX_SECTORS); | 239 | PMCRAID_VSET_MAX_SECTORS); |
| 240 | } | 240 | } |
| 241 | 241 | ||
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 56977097de9f..1646fe7cbd4b 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
| @@ -1630,10 +1630,10 @@ struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, | |||
| 1630 | /* | 1630 | /* |
| 1631 | * this limit is imposed by hardware restrictions | 1631 | * this limit is imposed by hardware restrictions |
| 1632 | */ | 1632 | */ |
| 1633 | blk_queue_max_hw_segments(q, shost->sg_tablesize); | 1633 | blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, |
| 1634 | blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS); | 1634 | SCSI_MAX_SG_CHAIN_SEGMENTS)); |
| 1635 | 1635 | ||
| 1636 | blk_queue_max_sectors(q, shost->max_sectors); | 1636 | blk_queue_max_hw_sectors(q, shost->max_sectors); |
| 1637 | blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); | 1637 | blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); |
| 1638 | blk_queue_segment_boundary(q, shost->dma_boundary); | 1638 | blk_queue_segment_boundary(q, shost->dma_boundary); |
| 1639 | dma_set_seg_boundary(dev, shost->dma_boundary); | 1639 | dma_set_seg_boundary(dev, shost->dma_boundary); |
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index f697229ae5a9..4bc8b77a2ef3 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c | |||
| @@ -879,7 +879,7 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, | |||
| 879 | * broken RA4x00 Compaq Disk Array | 879 | * broken RA4x00 Compaq Disk Array |
| 880 | */ | 880 | */ |
| 881 | if (*bflags & BLIST_MAX_512) | 881 | if (*bflags & BLIST_MAX_512) |
| 882 | blk_queue_max_sectors(sdev->request_queue, 512); | 882 | blk_queue_max_hw_sectors(sdev->request_queue, 512); |
| 883 | 883 | ||
| 884 | /* | 884 | /* |
| 885 | * Some devices may not want to have a start command automatically | 885 | * Some devices may not want to have a start command automatically |
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 040f751809ea..c996d98636f3 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c | |||
| @@ -287,8 +287,7 @@ sg_open(struct inode *inode, struct file *filp) | |||
| 287 | if (list_empty(&sdp->sfds)) { /* no existing opens on this device */ | 287 | if (list_empty(&sdp->sfds)) { /* no existing opens on this device */ |
| 288 | sdp->sgdebug = 0; | 288 | sdp->sgdebug = 0; |
| 289 | q = sdp->device->request_queue; | 289 | q = sdp->device->request_queue; |
| 290 | sdp->sg_tablesize = min(queue_max_hw_segments(q), | 290 | sdp->sg_tablesize = queue_max_segments(q); |
| 291 | queue_max_phys_segments(q)); | ||
| 292 | } | 291 | } |
| 293 | if ((sfp = sg_add_sfp(sdp, dev))) | 292 | if ((sfp = sg_add_sfp(sdp, dev))) |
| 294 | filp->private_data = sfp; | 293 | filp->private_data = sfp; |
| @@ -1376,8 +1375,7 @@ static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp) | |||
| 1376 | sdp->device = scsidp; | 1375 | sdp->device = scsidp; |
| 1377 | INIT_LIST_HEAD(&sdp->sfds); | 1376 | INIT_LIST_HEAD(&sdp->sfds); |
| 1378 | init_waitqueue_head(&sdp->o_excl_wait); | 1377 | init_waitqueue_head(&sdp->o_excl_wait); |
| 1379 | sdp->sg_tablesize = min(queue_max_hw_segments(q), | 1378 | sdp->sg_tablesize = queue_max_segments(q); |
| 1380 | queue_max_phys_segments(q)); | ||
| 1381 | sdp->index = k; | 1379 | sdp->index = k; |
| 1382 | kref_init(&sdp->d_ref); | 1380 | kref_init(&sdp->d_ref); |
| 1383 | 1381 | ||
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c index d04ea9a6f673..f67d1a159aad 100644 --- a/drivers/scsi/st.c +++ b/drivers/scsi/st.c | |||
| @@ -3983,8 +3983,7 @@ static int st_probe(struct device *dev) | |||
| 3983 | return -ENODEV; | 3983 | return -ENODEV; |
| 3984 | } | 3984 | } |
| 3985 | 3985 | ||
| 3986 | i = min(queue_max_hw_segments(SDp->request_queue), | 3986 | i = queue_max_segments(SDp->request_queue); |
| 3987 | queue_max_phys_segments(SDp->request_queue)); | ||
| 3988 | if (st_max_sg_segs < i) | 3987 | if (st_max_sg_segs < i) |
| 3989 | i = st_max_sg_segs; | 3988 | i = st_max_sg_segs; |
| 3990 | buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i); | 3989 | buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i); |
diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c index 62b282844a53..45d908114d11 100644 --- a/drivers/staging/hv/blkvsc_drv.c +++ b/drivers/staging/hv/blkvsc_drv.c | |||
| @@ -363,10 +363,7 @@ static int blkvsc_probe(struct device *device) | |||
| 363 | blkdev->gd->queue = blk_init_queue(blkvsc_request, &blkdev->lock); | 363 | blkdev->gd->queue = blk_init_queue(blkvsc_request, &blkdev->lock); |
| 364 | 364 | ||
| 365 | blk_queue_max_segment_size(blkdev->gd->queue, PAGE_SIZE); | 365 | blk_queue_max_segment_size(blkdev->gd->queue, PAGE_SIZE); |
| 366 | blk_queue_max_phys_segments(blkdev->gd->queue, | 366 | blk_queue_max_segments(blkdev->gd->queue, MAX_MULTIPAGE_BUFFER_COUNT); |
| 367 | MAX_MULTIPAGE_BUFFER_COUNT); | ||
| 368 | blk_queue_max_hw_segments(blkdev->gd->queue, | ||
| 369 | MAX_MULTIPAGE_BUFFER_COUNT); | ||
| 370 | blk_queue_segment_boundary(blkdev->gd->queue, PAGE_SIZE-1); | 367 | blk_queue_segment_boundary(blkdev->gd->queue, PAGE_SIZE-1); |
| 371 | blk_queue_bounce_limit(blkdev->gd->queue, BLK_BOUNCE_ANY); | 368 | blk_queue_bounce_limit(blkdev->gd->queue, BLK_BOUNCE_ANY); |
| 372 | blk_queue_dma_alignment(blkdev->gd->queue, 511); | 369 | blk_queue_dma_alignment(blkdev->gd->queue, 511); |
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index e5e6df39e737..aadc16b5eed7 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c | |||
| @@ -134,14 +134,14 @@ static int slave_configure(struct scsi_device *sdev) | |||
| 134 | if (us->fflags & US_FL_MAX_SECTORS_MIN) | 134 | if (us->fflags & US_FL_MAX_SECTORS_MIN) |
| 135 | max_sectors = PAGE_CACHE_SIZE >> 9; | 135 | max_sectors = PAGE_CACHE_SIZE >> 9; |
| 136 | if (queue_max_sectors(sdev->request_queue) > max_sectors) | 136 | if (queue_max_sectors(sdev->request_queue) > max_sectors) |
| 137 | blk_queue_max_sectors(sdev->request_queue, | 137 | blk_queue_max_hw_sectors(sdev->request_queue, |
| 138 | max_sectors); | 138 | max_sectors); |
| 139 | } else if (sdev->type == TYPE_TAPE) { | 139 | } else if (sdev->type == TYPE_TAPE) { |
| 140 | /* Tapes need much higher max_sector limits, so just | 140 | /* Tapes need much higher max_sector limits, so just |
| 141 | * raise it to the maximum possible (4 GB / 512) and | 141 | * raise it to the maximum possible (4 GB / 512) and |
| 142 | * let the queue segment size sort out the real limit. | 142 | * let the queue segment size sort out the real limit. |
| 143 | */ | 143 | */ |
| 144 | blk_queue_max_sectors(sdev->request_queue, 0x7FFFFF); | 144 | blk_queue_max_hw_sectors(sdev->request_queue, 0x7FFFFF); |
| 145 | } | 145 | } |
| 146 | 146 | ||
| 147 | /* Some USB host controllers can't do DMA; they have to use PIO. | 147 | /* Some USB host controllers can't do DMA; they have to use PIO. |
| @@ -495,7 +495,7 @@ static ssize_t store_max_sectors(struct device *dev, struct device_attribute *at | |||
| 495 | unsigned short ms; | 495 | unsigned short ms; |
| 496 | 496 | ||
| 497 | if (sscanf(buf, "%hu", &ms) > 0 && ms <= SCSI_DEFAULT_MAX_SECTORS) { | 497 | if (sscanf(buf, "%hu", &ms) > 0 && ms <= SCSI_DEFAULT_MAX_SECTORS) { |
| 498 | blk_queue_max_sectors(sdev->request_queue, ms); | 498 | blk_queue_max_hw_sectors(sdev->request_queue, ms); |
| 499 | return strlen(buf); | 499 | return strlen(buf); |
| 500 | } | 500 | } |
| 501 | return -EINVAL; | 501 | return -EINVAL; |
| @@ -507,10 +507,8 @@ int bio_get_nr_vecs(struct block_device *bdev) | |||
| 507 | int nr_pages; | 507 | int nr_pages; |
| 508 | 508 | ||
| 509 | nr_pages = ((queue_max_sectors(q) << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT; | 509 | nr_pages = ((queue_max_sectors(q) << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 510 | if (nr_pages > queue_max_phys_segments(q)) | 510 | if (nr_pages > queue_max_segments(q)) |
| 511 | nr_pages = queue_max_phys_segments(q); | 511 | nr_pages = queue_max_segments(q); |
| 512 | if (nr_pages > queue_max_hw_segments(q)) | ||
| 513 | nr_pages = queue_max_hw_segments(q); | ||
| 514 | 512 | ||
| 515 | return nr_pages; | 513 | return nr_pages; |
| 516 | } | 514 | } |
| @@ -557,7 +555,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page | |||
| 557 | .bi_rw = bio->bi_rw, | 555 | .bi_rw = bio->bi_rw, |
| 558 | }; | 556 | }; |
| 559 | 557 | ||
| 560 | if (q->merge_bvec_fn(q, &bvm, prev) < len) { | 558 | if (q->merge_bvec_fn(q, &bvm, prev) != prev->bv_len) { |
| 561 | prev->bv_len -= len; | 559 | prev->bv_len -= len; |
| 562 | return 0; | 560 | return 0; |
| 563 | } | 561 | } |
| @@ -575,8 +573,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page | |||
| 575 | * make this too complex. | 573 | * make this too complex. |
| 576 | */ | 574 | */ |
| 577 | 575 | ||
| 578 | while (bio->bi_phys_segments >= queue_max_phys_segments(q) | 576 | while (bio->bi_phys_segments >= queue_max_segments(q)) { |
| 579 | || bio->bi_phys_segments >= queue_max_hw_segments(q)) { | ||
| 580 | 577 | ||
| 581 | if (retried_segments) | 578 | if (retried_segments) |
| 582 | return 0; | 579 | return 0; |
| @@ -611,7 +608,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page | |||
| 611 | * merge_bvec_fn() returns number of bytes it can accept | 608 | * merge_bvec_fn() returns number of bytes it can accept |
| 612 | * at this offset | 609 | * at this offset |
| 613 | */ | 610 | */ |
| 614 | if (q->merge_bvec_fn(q, &bvm, bvec) < len) { | 611 | if (q->merge_bvec_fn(q, &bvm, bvec) != bvec->bv_len) { |
| 615 | bvec->bv_page = NULL; | 612 | bvec->bv_page = NULL; |
| 616 | bvec->bv_len = 0; | 613 | bvec->bv_len = 0; |
| 617 | bvec->bv_offset = 0; | 614 | bvec->bv_offset = 0; |
diff --git a/fs/partitions/check.c b/fs/partitions/check.c index 64bc8998ac9a..e8865c11777f 100644 --- a/fs/partitions/check.c +++ b/fs/partitions/check.c | |||
| @@ -412,9 +412,10 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno, | |||
| 412 | pdev = part_to_dev(p); | 412 | pdev = part_to_dev(p); |
| 413 | 413 | ||
| 414 | p->start_sect = start; | 414 | p->start_sect = start; |
| 415 | p->alignment_offset = queue_sector_alignment_offset(disk->queue, start); | 415 | p->alignment_offset = |
| 416 | p->discard_alignment = queue_sector_discard_alignment(disk->queue, | 416 | queue_limit_alignment_offset(&disk->queue->limits, start); |
| 417 | start); | 417 | p->discard_alignment = |
| 418 | queue_limit_discard_alignment(&disk->queue->limits, start); | ||
| 418 | p->nr_sects = len; | 419 | p->nr_sects = len; |
| 419 | p->partno = partno; | 420 | p->partno = partno; |
| 420 | p->policy = get_disk_ro(disk); | 421 | p->policy = get_disk_ro(disk); |
diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 756f831cbdd5..91be0d896322 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild | |||
| @@ -43,6 +43,7 @@ header-y += blkpg.h | |||
| 43 | header-y += bpqether.h | 43 | header-y += bpqether.h |
| 44 | header-y += bsg.h | 44 | header-y += bsg.h |
| 45 | header-y += can.h | 45 | header-y += can.h |
| 46 | header-y += cciss_defs.h | ||
| 46 | header-y += cdk.h | 47 | header-y += cdk.h |
| 47 | header-y += chio.h | 48 | header-y += chio.h |
| 48 | header-y += coda_psdev.h | 49 | header-y += coda_psdev.h |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 1896e868854f..ebd22dbed861 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -316,8 +316,7 @@ struct queue_limits { | |||
| 316 | unsigned int discard_alignment; | 316 | unsigned int discard_alignment; |
| 317 | 317 | ||
| 318 | unsigned short logical_block_size; | 318 | unsigned short logical_block_size; |
| 319 | unsigned short max_hw_segments; | 319 | unsigned short max_segments; |
| 320 | unsigned short max_phys_segments; | ||
| 321 | 320 | ||
| 322 | unsigned char misaligned; | 321 | unsigned char misaligned; |
| 323 | unsigned char discard_misaligned; | 322 | unsigned char discard_misaligned; |
| @@ -462,6 +461,7 @@ struct request_queue | |||
| 462 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ | 461 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ |
| 463 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ | 462 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ |
| 464 | #define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ | 463 | #define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ |
| 464 | #define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */ | ||
| 465 | 465 | ||
| 466 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | 466 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
| 467 | (1 << QUEUE_FLAG_CLUSTER) | \ | 467 | (1 << QUEUE_FLAG_CLUSTER) | \ |
| @@ -587,6 +587,8 @@ enum { | |||
| 587 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) | 587 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) |
| 588 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) | 588 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) |
| 589 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) | 589 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) |
| 590 | #define blk_queue_noxmerges(q) \ | ||
| 591 | test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) | ||
| 590 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) | 592 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) |
| 591 | #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) | 593 | #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) |
| 592 | #define blk_queue_flushing(q) ((q)->ordseq) | 594 | #define blk_queue_flushing(q) ((q)->ordseq) |
| @@ -918,10 +920,27 @@ extern struct request_queue *blk_init_queue(request_fn_proc *, spinlock_t *); | |||
| 918 | extern void blk_cleanup_queue(struct request_queue *); | 920 | extern void blk_cleanup_queue(struct request_queue *); |
| 919 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); | 921 | extern void blk_queue_make_request(struct request_queue *, make_request_fn *); |
| 920 | extern void blk_queue_bounce_limit(struct request_queue *, u64); | 922 | extern void blk_queue_bounce_limit(struct request_queue *, u64); |
| 921 | extern void blk_queue_max_sectors(struct request_queue *, unsigned int); | ||
| 922 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); | 923 | extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); |
| 923 | extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); | 924 | |
| 924 | extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); | 925 | /* Temporary compatibility wrapper */ |
| 926 | static inline void blk_queue_max_sectors(struct request_queue *q, unsigned int max) | ||
| 927 | { | ||
| 928 | blk_queue_max_hw_sectors(q, max); | ||
| 929 | } | ||
| 930 | |||
| 931 | extern void blk_queue_max_segments(struct request_queue *, unsigned short); | ||
| 932 | |||
| 933 | static inline void blk_queue_max_phys_segments(struct request_queue *q, unsigned short max) | ||
| 934 | { | ||
| 935 | blk_queue_max_segments(q, max); | ||
| 936 | } | ||
| 937 | |||
| 938 | static inline void blk_queue_max_hw_segments(struct request_queue *q, unsigned short max) | ||
| 939 | { | ||
| 940 | blk_queue_max_segments(q, max); | ||
| 941 | } | ||
| 942 | |||
| 943 | |||
| 925 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | 944 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); |
| 926 | extern void blk_queue_max_discard_sectors(struct request_queue *q, | 945 | extern void blk_queue_max_discard_sectors(struct request_queue *q, |
| 927 | unsigned int max_discard_sectors); | 946 | unsigned int max_discard_sectors); |
| @@ -1014,11 +1033,15 @@ extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); | |||
| 1014 | #define MAX_PHYS_SEGMENTS 128 | 1033 | #define MAX_PHYS_SEGMENTS 128 |
| 1015 | #define MAX_HW_SEGMENTS 128 | 1034 | #define MAX_HW_SEGMENTS 128 |
| 1016 | #define SAFE_MAX_SECTORS 255 | 1035 | #define SAFE_MAX_SECTORS 255 |
| 1017 | #define BLK_DEF_MAX_SECTORS 1024 | ||
| 1018 | |||
| 1019 | #define MAX_SEGMENT_SIZE 65536 | 1036 | #define MAX_SEGMENT_SIZE 65536 |
| 1020 | 1037 | ||
| 1021 | #define BLK_SEG_BOUNDARY_MASK 0xFFFFFFFFUL | 1038 | enum blk_default_limits { |
| 1039 | BLK_MAX_SEGMENTS = 128, | ||
| 1040 | BLK_SAFE_MAX_SECTORS = 255, | ||
| 1041 | BLK_DEF_MAX_SECTORS = 1024, | ||
| 1042 | BLK_MAX_SEGMENT_SIZE = 65536, | ||
| 1043 | BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL, | ||
| 1044 | }; | ||
| 1022 | 1045 | ||
| 1023 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) | 1046 | #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) |
| 1024 | 1047 | ||
| @@ -1042,14 +1065,9 @@ static inline unsigned int queue_max_hw_sectors(struct request_queue *q) | |||
| 1042 | return q->limits.max_hw_sectors; | 1065 | return q->limits.max_hw_sectors; |
| 1043 | } | 1066 | } |
| 1044 | 1067 | ||
| 1045 | static inline unsigned short queue_max_hw_segments(struct request_queue *q) | 1068 | static inline unsigned short queue_max_segments(struct request_queue *q) |
| 1046 | { | ||
| 1047 | return q->limits.max_hw_segments; | ||
| 1048 | } | ||
| 1049 | |||
| 1050 | static inline unsigned short queue_max_phys_segments(struct request_queue *q) | ||
| 1051 | { | 1069 | { |
| 1052 | return q->limits.max_phys_segments; | 1070 | return q->limits.max_segments; |
| 1053 | } | 1071 | } |
| 1054 | 1072 | ||
| 1055 | static inline unsigned int queue_max_segment_size(struct request_queue *q) | 1073 | static inline unsigned int queue_max_segment_size(struct request_queue *q) |
| @@ -1110,18 +1128,13 @@ static inline int queue_alignment_offset(struct request_queue *q) | |||
| 1110 | return q->limits.alignment_offset; | 1128 | return q->limits.alignment_offset; |
| 1111 | } | 1129 | } |
| 1112 | 1130 | ||
| 1113 | static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t offset) | 1131 | static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) |
| 1114 | { | 1132 | { |
| 1115 | unsigned int granularity = max(lim->physical_block_size, lim->io_min); | 1133 | unsigned int granularity = max(lim->physical_block_size, lim->io_min); |
| 1134 | unsigned int alignment = (sector << 9) & (granularity - 1); | ||
| 1116 | 1135 | ||
| 1117 | offset &= granularity - 1; | 1136 | return (granularity + lim->alignment_offset - alignment) |
| 1118 | return (granularity + lim->alignment_offset - offset) & (granularity - 1); | 1137 | & (granularity - 1); |
| 1119 | } | ||
| 1120 | |||
| 1121 | static inline int queue_sector_alignment_offset(struct request_queue *q, | ||
| 1122 | sector_t sector) | ||
| 1123 | { | ||
| 1124 | return queue_limit_alignment_offset(&q->limits, sector << 9); | ||
| 1125 | } | 1138 | } |
| 1126 | 1139 | ||
| 1127 | static inline int bdev_alignment_offset(struct block_device *bdev) | 1140 | static inline int bdev_alignment_offset(struct block_device *bdev) |
| @@ -1145,10 +1158,8 @@ static inline int queue_discard_alignment(struct request_queue *q) | |||
| 1145 | return q->limits.discard_alignment; | 1158 | return q->limits.discard_alignment; |
| 1146 | } | 1159 | } |
| 1147 | 1160 | ||
| 1148 | static inline int queue_sector_discard_alignment(struct request_queue *q, | 1161 | static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) |
| 1149 | sector_t sector) | ||
| 1150 | { | 1162 | { |
| 1151 | struct queue_limits *lim = &q->limits; | ||
| 1152 | unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1); | 1163 | unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1); |
| 1153 | 1164 | ||
| 1154 | return (lim->discard_granularity + lim->discard_alignment - alignment) | 1165 | return (lim->discard_granularity + lim->discard_alignment - alignment) |
diff --git a/include/linux/cciss_defs.h b/include/linux/cciss_defs.h new file mode 100644 index 000000000000..316b670d4e33 --- /dev/null +++ b/include/linux/cciss_defs.h | |||
| @@ -0,0 +1,130 @@ | |||
| 1 | #ifndef CCISS_DEFS_H | ||
| 2 | #define CCISS_DEFS_H | ||
| 3 | |||
| 4 | #include <linux/types.h> | ||
| 5 | |||
| 6 | /* general boundary definitions */ | ||
| 7 | #define SENSEINFOBYTES 32 /* note that this value may vary | ||
| 8 | between host implementations */ | ||
| 9 | |||
| 10 | /* Command Status value */ | ||
| 11 | #define CMD_SUCCESS 0x0000 | ||
| 12 | #define CMD_TARGET_STATUS 0x0001 | ||
| 13 | #define CMD_DATA_UNDERRUN 0x0002 | ||
| 14 | #define CMD_DATA_OVERRUN 0x0003 | ||
| 15 | #define CMD_INVALID 0x0004 | ||
| 16 | #define CMD_PROTOCOL_ERR 0x0005 | ||
| 17 | #define CMD_HARDWARE_ERR 0x0006 | ||
| 18 | #define CMD_CONNECTION_LOST 0x0007 | ||
| 19 | #define CMD_ABORTED 0x0008 | ||
| 20 | #define CMD_ABORT_FAILED 0x0009 | ||
| 21 | #define CMD_UNSOLICITED_ABORT 0x000A | ||
| 22 | #define CMD_TIMEOUT 0x000B | ||
| 23 | #define CMD_UNABORTABLE 0x000C | ||
| 24 | |||
| 25 | /* transfer direction */ | ||
| 26 | #define XFER_NONE 0x00 | ||
| 27 | #define XFER_WRITE 0x01 | ||
| 28 | #define XFER_READ 0x02 | ||
| 29 | #define XFER_RSVD 0x03 | ||
| 30 | |||
| 31 | /* task attribute */ | ||
| 32 | #define ATTR_UNTAGGED 0x00 | ||
| 33 | #define ATTR_SIMPLE 0x04 | ||
| 34 | #define ATTR_HEADOFQUEUE 0x05 | ||
| 35 | #define ATTR_ORDERED 0x06 | ||
| 36 | #define ATTR_ACA 0x07 | ||
| 37 | |||
| 38 | /* cdb type */ | ||
| 39 | #define TYPE_CMD 0x00 | ||
| 40 | #define TYPE_MSG 0x01 | ||
| 41 | |||
| 42 | /* Type defs used in the following structs */ | ||
| 43 | #define BYTE __u8 | ||
| 44 | #define WORD __u16 | ||
| 45 | #define HWORD __u16 | ||
| 46 | #define DWORD __u32 | ||
| 47 | |||
| 48 | #define CISS_MAX_LUN 1024 | ||
| 49 | |||
| 50 | #define LEVEL2LUN 1 /* index into Target(x) structure, due to byte swapping */ | ||
| 51 | #define LEVEL3LUN 0 | ||
| 52 | |||
| 53 | #pragma pack(1) | ||
| 54 | |||
| 55 | /* Command List Structure */ | ||
| 56 | typedef union _SCSI3Addr_struct { | ||
| 57 | struct { | ||
| 58 | BYTE Dev; | ||
| 59 | BYTE Bus:6; | ||
| 60 | BYTE Mode:2; /* b00 */ | ||
| 61 | } PeripDev; | ||
| 62 | struct { | ||
| 63 | BYTE DevLSB; | ||
| 64 | BYTE DevMSB:6; | ||
| 65 | BYTE Mode:2; /* b01 */ | ||
| 66 | } LogDev; | ||
| 67 | struct { | ||
| 68 | BYTE Dev:5; | ||
| 69 | BYTE Bus:3; | ||
| 70 | BYTE Targ:6; | ||
| 71 | BYTE Mode:2; /* b10 */ | ||
| 72 | } LogUnit; | ||
| 73 | } SCSI3Addr_struct; | ||
| 74 | |||
| 75 | typedef struct _PhysDevAddr_struct { | ||
| 76 | DWORD TargetId:24; | ||
| 77 | DWORD Bus:6; | ||
| 78 | DWORD Mode:2; | ||
| 79 | SCSI3Addr_struct Target[2]; /* 2 level target device addr */ | ||
| 80 | } PhysDevAddr_struct; | ||
| 81 | |||
| 82 | typedef struct _LogDevAddr_struct { | ||
| 83 | DWORD VolId:30; | ||
| 84 | DWORD Mode:2; | ||
| 85 | BYTE reserved[4]; | ||
| 86 | } LogDevAddr_struct; | ||
| 87 | |||
| 88 | typedef union _LUNAddr_struct { | ||
| 89 | BYTE LunAddrBytes[8]; | ||
| 90 | SCSI3Addr_struct SCSI3Lun[4]; | ||
| 91 | PhysDevAddr_struct PhysDev; | ||
| 92 | LogDevAddr_struct LogDev; | ||
| 93 | } LUNAddr_struct; | ||
| 94 | |||
| 95 | typedef struct _RequestBlock_struct { | ||
| 96 | BYTE CDBLen; | ||
| 97 | struct { | ||
| 98 | BYTE Type:3; | ||
| 99 | BYTE Attribute:3; | ||
| 100 | BYTE Direction:2; | ||
| 101 | } Type; | ||
| 102 | HWORD Timeout; | ||
| 103 | BYTE CDB[16]; | ||
| 104 | } RequestBlock_struct; | ||
| 105 | |||
| 106 | typedef union _MoreErrInfo_struct{ | ||
| 107 | struct { | ||
| 108 | BYTE Reserved[3]; | ||
| 109 | BYTE Type; | ||
| 110 | DWORD ErrorInfo; | ||
| 111 | } Common_Info; | ||
| 112 | struct{ | ||
| 113 | BYTE Reserved[2]; | ||
| 114 | BYTE offense_size; /* size of offending entry */ | ||
| 115 | BYTE offense_num; /* byte # of offense 0-base */ | ||
| 116 | DWORD offense_value; | ||
| 117 | } Invalid_Cmd; | ||
| 118 | } MoreErrInfo_struct; | ||
| 119 | typedef struct _ErrorInfo_struct { | ||
| 120 | BYTE ScsiStatus; | ||
| 121 | BYTE SenseLen; | ||
| 122 | HWORD CommandStatus; | ||
| 123 | DWORD ResidualCnt; | ||
| 124 | MoreErrInfo_struct MoreErrInfo; | ||
| 125 | BYTE SenseInfo[SENSEINFOBYTES]; | ||
| 126 | } ErrorInfo_struct; | ||
| 127 | |||
| 128 | #pragma pack() | ||
| 129 | |||
| 130 | #endif /* CCISS_DEFS_H */ | ||
diff --git a/include/linux/cciss_ioctl.h b/include/linux/cciss_ioctl.h index eb130b4d8e72..986493f5b92b 100644 --- a/include/linux/cciss_ioctl.h +++ b/include/linux/cciss_ioctl.h | |||
| @@ -3,6 +3,7 @@ | |||
| 3 | 3 | ||
| 4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
| 5 | #include <linux/ioctl.h> | 5 | #include <linux/ioctl.h> |
| 6 | #include <linux/cciss_defs.h> | ||
| 6 | 7 | ||
| 7 | #define CCISS_IOC_MAGIC 'B' | 8 | #define CCISS_IOC_MAGIC 'B' |
| 8 | 9 | ||
| @@ -36,133 +37,6 @@ typedef __u32 DriverVer_type; | |||
| 36 | 37 | ||
| 37 | #define MAX_KMALLOC_SIZE 128000 | 38 | #define MAX_KMALLOC_SIZE 128000 |
| 38 | 39 | ||
| 39 | #ifndef CCISS_CMD_H | ||
| 40 | // This defines are duplicated in cciss_cmd.h in the driver directory | ||
| 41 | |||
| 42 | //general boundary definitions | ||
| 43 | #define SENSEINFOBYTES 32//note that this value may vary between host implementations | ||
| 44 | |||
| 45 | //Command Status value | ||
| 46 | #define CMD_SUCCESS 0x0000 | ||
| 47 | #define CMD_TARGET_STATUS 0x0001 | ||
| 48 | #define CMD_DATA_UNDERRUN 0x0002 | ||
| 49 | #define CMD_DATA_OVERRUN 0x0003 | ||
| 50 | #define CMD_INVALID 0x0004 | ||
| 51 | #define CMD_PROTOCOL_ERR 0x0005 | ||
| 52 | #define CMD_HARDWARE_ERR 0x0006 | ||
| 53 | #define CMD_CONNECTION_LOST 0x0007 | ||
| 54 | #define CMD_ABORTED 0x0008 | ||
| 55 | #define CMD_ABORT_FAILED 0x0009 | ||
| 56 | #define CMD_UNSOLICITED_ABORT 0x000A | ||
| 57 | #define CMD_TIMEOUT 0x000B | ||
| 58 | #define CMD_UNABORTABLE 0x000C | ||
| 59 | |||
| 60 | //transfer direction | ||
| 61 | #define XFER_NONE 0x00 | ||
| 62 | #define XFER_WRITE 0x01 | ||
| 63 | #define XFER_READ 0x02 | ||
| 64 | #define XFER_RSVD 0x03 | ||
| 65 | |||
| 66 | //task attribute | ||
| 67 | #define ATTR_UNTAGGED 0x00 | ||
| 68 | #define ATTR_SIMPLE 0x04 | ||
| 69 | #define ATTR_HEADOFQUEUE 0x05 | ||
| 70 | #define ATTR_ORDERED 0x06 | ||
| 71 | #define ATTR_ACA 0x07 | ||
| 72 | |||
| 73 | //cdb type | ||
| 74 | #define TYPE_CMD 0x00 | ||
| 75 | #define TYPE_MSG 0x01 | ||
| 76 | |||
| 77 | // Type defs used in the following structs | ||
| 78 | #define BYTE __u8 | ||
| 79 | #define WORD __u16 | ||
| 80 | #define HWORD __u16 | ||
| 81 | #define DWORD __u32 | ||
| 82 | |||
| 83 | #define CISS_MAX_LUN 1024 | ||
| 84 | |||
| 85 | #define LEVEL2LUN 1 // index into Target(x) structure, due to byte swapping | ||
| 86 | #define LEVEL3LUN 0 | ||
| 87 | |||
| 88 | #pragma pack(1) | ||
| 89 | |||
| 90 | //Command List Structure | ||
| 91 | typedef union _SCSI3Addr_struct { | ||
| 92 | struct { | ||
| 93 | BYTE Dev; | ||
| 94 | BYTE Bus:6; | ||
| 95 | BYTE Mode:2; // b00 | ||
| 96 | } PeripDev; | ||
| 97 | struct { | ||
| 98 | BYTE DevLSB; | ||
| 99 | BYTE DevMSB:6; | ||
| 100 | BYTE Mode:2; // b01 | ||
| 101 | } LogDev; | ||
| 102 | struct { | ||
| 103 | BYTE Dev:5; | ||
| 104 | BYTE Bus:3; | ||
| 105 | BYTE Targ:6; | ||
| 106 | BYTE Mode:2; // b10 | ||
| 107 | } LogUnit; | ||
| 108 | } SCSI3Addr_struct; | ||
| 109 | |||
| 110 | typedef struct _PhysDevAddr_struct { | ||
| 111 | DWORD TargetId:24; | ||
| 112 | DWORD Bus:6; | ||
| 113 | DWORD Mode:2; | ||
| 114 | SCSI3Addr_struct Target[2]; //2 level target device addr | ||
| 115 | } PhysDevAddr_struct; | ||
| 116 | |||
| 117 | typedef struct _LogDevAddr_struct { | ||
| 118 | DWORD VolId:30; | ||
| 119 | DWORD Mode:2; | ||
| 120 | BYTE reserved[4]; | ||
| 121 | } LogDevAddr_struct; | ||
| 122 | |||
| 123 | typedef union _LUNAddr_struct { | ||
| 124 | BYTE LunAddrBytes[8]; | ||
| 125 | SCSI3Addr_struct SCSI3Lun[4]; | ||
| 126 | PhysDevAddr_struct PhysDev; | ||
| 127 | LogDevAddr_struct LogDev; | ||
| 128 | } LUNAddr_struct; | ||
| 129 | |||
| 130 | typedef struct _RequestBlock_struct { | ||
| 131 | BYTE CDBLen; | ||
| 132 | struct { | ||
| 133 | BYTE Type:3; | ||
| 134 | BYTE Attribute:3; | ||
| 135 | BYTE Direction:2; | ||
| 136 | } Type; | ||
| 137 | HWORD Timeout; | ||
| 138 | BYTE CDB[16]; | ||
| 139 | } RequestBlock_struct; | ||
| 140 | |||
| 141 | typedef union _MoreErrInfo_struct{ | ||
| 142 | struct { | ||
| 143 | BYTE Reserved[3]; | ||
| 144 | BYTE Type; | ||
| 145 | DWORD ErrorInfo; | ||
| 146 | }Common_Info; | ||
| 147 | struct{ | ||
| 148 | BYTE Reserved[2]; | ||
| 149 | BYTE offense_size;//size of offending entry | ||
| 150 | BYTE offense_num; //byte # of offense 0-base | ||
| 151 | DWORD offense_value; | ||
| 152 | }Invalid_Cmd; | ||
| 153 | }MoreErrInfo_struct; | ||
| 154 | typedef struct _ErrorInfo_struct { | ||
| 155 | BYTE ScsiStatus; | ||
| 156 | BYTE SenseLen; | ||
| 157 | HWORD CommandStatus; | ||
| 158 | DWORD ResidualCnt; | ||
| 159 | MoreErrInfo_struct MoreErrInfo; | ||
| 160 | BYTE SenseInfo[SENSEINFOBYTES]; | ||
| 161 | } ErrorInfo_struct; | ||
| 162 | |||
| 163 | #pragma pack() | ||
| 164 | #endif /* CCISS_CMD_H */ | ||
| 165 | |||
| 166 | typedef struct _IOCTL_Command_struct { | 40 | typedef struct _IOCTL_Command_struct { |
| 167 | LUNAddr_struct LUN_info; | 41 | LUNAddr_struct LUN_info; |
| 168 | RequestBlock_struct Request; | 42 | RequestBlock_struct Request; |
diff --git a/include/linux/i2o.h b/include/linux/i2o.h index 4c4e57d1f19d..87018dc5527d 100644 --- a/include/linux/i2o.h +++ b/include/linux/i2o.h | |||
| @@ -385,7 +385,7 @@ | |||
| 385 | /* defines for max_sectors and max_phys_segments */ | 385 | /* defines for max_sectors and max_phys_segments */ |
| 386 | #define I2O_MAX_SECTORS 1024 | 386 | #define I2O_MAX_SECTORS 1024 |
| 387 | #define I2O_MAX_SECTORS_LIMITED 128 | 387 | #define I2O_MAX_SECTORS_LIMITED 128 |
| 388 | #define I2O_MAX_PHYS_SEGMENTS MAX_PHYS_SEGMENTS | 388 | #define I2O_MAX_PHYS_SEGMENTS BLK_MAX_SEGMENTS |
| 389 | 389 | ||
| 390 | /* | 390 | /* |
| 391 | * Message structures | 391 | * Message structures |
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h index 78ef023227d4..1195a806fe0c 100644 --- a/include/linux/iocontext.h +++ b/include/linux/iocontext.h | |||
| @@ -49,8 +49,8 @@ struct io_context { | |||
| 49 | /* | 49 | /* |
| 50 | * For request batching | 50 | * For request batching |
| 51 | */ | 51 | */ |
| 52 | unsigned long last_waited; /* Time last woken after wait for request */ | ||
| 53 | int nr_batch_requests; /* Number of requests left in the batch */ | 52 | int nr_batch_requests; /* Number of requests left in the batch */ |
| 53 | unsigned long last_waited; /* Time last woken after wait for request */ | ||
| 54 | 54 | ||
| 55 | struct radix_tree_root radix_root; | 55 | struct radix_tree_root radix_root; |
| 56 | struct hlist_head cic_list; | 56 | struct hlist_head cic_list; |
diff --git a/include/linux/pktcdvd.h b/include/linux/pktcdvd.h index 76e5053e1fac..721301b0a908 100644 --- a/include/linux/pktcdvd.h +++ b/include/linux/pktcdvd.h | |||
| @@ -163,10 +163,8 @@ struct packet_iosched | |||
| 163 | atomic_t attention; /* Set to non-zero when queue processing is needed */ | 163 | atomic_t attention; /* Set to non-zero when queue processing is needed */ |
| 164 | int writing; /* Non-zero when writing, zero when reading */ | 164 | int writing; /* Non-zero when writing, zero when reading */ |
| 165 | spinlock_t lock; /* Protecting read/write queue manipulations */ | 165 | spinlock_t lock; /* Protecting read/write queue manipulations */ |
| 166 | struct bio *read_queue; | 166 | struct bio_list read_queue; |
| 167 | struct bio *read_queue_tail; | 167 | struct bio_list write_queue; |
| 168 | struct bio *write_queue; | ||
| 169 | struct bio *write_queue_tail; | ||
| 170 | sector_t last_write; /* The sector where the last write ended */ | 168 | sector_t last_write; /* The sector where the last write ended */ |
| 171 | int successive_reads; | 169 | int successive_reads; |
| 172 | }; | 170 | }; |
| @@ -206,8 +204,8 @@ struct packet_data | |||
| 206 | spinlock_t lock; /* Lock protecting state transitions and */ | 204 | spinlock_t lock; /* Lock protecting state transitions and */ |
| 207 | /* orig_bios list */ | 205 | /* orig_bios list */ |
| 208 | 206 | ||
| 209 | struct bio *orig_bios; /* Original bios passed to pkt_make_request */ | 207 | struct bio_list orig_bios; /* Original bios passed to pkt_make_request */ |
| 210 | struct bio *orig_bios_tail;/* that will be handled by this packet */ | 208 | /* that will be handled by this packet */ |
| 211 | int write_size; /* Total size of all bios in the orig_bios */ | 209 | int write_size; /* Total size of all bios in the orig_bios */ |
| 212 | /* list, measured in number of frames */ | 210 | /* list, measured in number of frames */ |
| 213 | 211 | ||
diff --git a/include/linux/sched.h b/include/linux/sched.h index 0eef87b58ea5..4b1753f7e48e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -97,7 +97,7 @@ struct sched_param { | |||
| 97 | struct exec_domain; | 97 | struct exec_domain; |
| 98 | struct futex_pi_state; | 98 | struct futex_pi_state; |
| 99 | struct robust_list_head; | 99 | struct robust_list_head; |
| 100 | struct bio; | 100 | struct bio_list; |
| 101 | struct fs_struct; | 101 | struct fs_struct; |
| 102 | struct bts_context; | 102 | struct bts_context; |
| 103 | struct perf_event_context; | 103 | struct perf_event_context; |
| @@ -1454,7 +1454,7 @@ struct task_struct { | |||
| 1454 | void *journal_info; | 1454 | void *journal_info; |
| 1455 | 1455 | ||
| 1456 | /* stacked block device info */ | 1456 | /* stacked block device info */ |
| 1457 | struct bio *bio_list, **bio_tail; | 1457 | struct bio_list *bio_list; |
| 1458 | 1458 | ||
| 1459 | /* VM state */ | 1459 | /* VM state */ |
| 1460 | struct reclaim_state *reclaim_state; | 1460 | struct reclaim_state *reclaim_state; |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index d9d6206e0b14..07f945a99430 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
| @@ -540,9 +540,10 @@ int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, | |||
| 540 | if (ret) | 540 | if (ret) |
| 541 | return ret; | 541 | return ret; |
| 542 | 542 | ||
| 543 | if (copy_to_user(arg, &buts, sizeof(buts))) | 543 | if (copy_to_user(arg, &buts, sizeof(buts))) { |
| 544 | blk_trace_remove(q); | ||
| 544 | return -EFAULT; | 545 | return -EFAULT; |
| 545 | 546 | } | |
| 546 | return 0; | 547 | return 0; |
| 547 | } | 548 | } |
| 548 | EXPORT_SYMBOL_GPL(blk_trace_setup); | 549 | EXPORT_SYMBOL_GPL(blk_trace_setup); |
