diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/genhd.c | 48 | ||||
-rw-r--r-- | block/ll_rw_blk.c | 40 | ||||
-rw-r--r-- | block/scsi_ioctl.c | 2 |
3 files changed, 56 insertions, 34 deletions
diff --git a/block/genhd.c b/block/genhd.c index f04609d553..f1ed83f3f0 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
@@ -358,7 +358,7 @@ static struct sysfs_ops disk_sysfs_ops = { | |||
358 | static ssize_t disk_uevent_store(struct gendisk * disk, | 358 | static ssize_t disk_uevent_store(struct gendisk * disk, |
359 | const char *buf, size_t count) | 359 | const char *buf, size_t count) |
360 | { | 360 | { |
361 | kobject_hotplug(&disk->kobj, KOBJ_ADD); | 361 | kobject_uevent(&disk->kobj, KOBJ_ADD); |
362 | return count; | 362 | return count; |
363 | } | 363 | } |
364 | static ssize_t disk_dev_read(struct gendisk * disk, char *page) | 364 | static ssize_t disk_dev_read(struct gendisk * disk, char *page) |
@@ -455,14 +455,14 @@ static struct kobj_type ktype_block = { | |||
455 | 455 | ||
456 | extern struct kobj_type ktype_part; | 456 | extern struct kobj_type ktype_part; |
457 | 457 | ||
458 | static int block_hotplug_filter(struct kset *kset, struct kobject *kobj) | 458 | static int block_uevent_filter(struct kset *kset, struct kobject *kobj) |
459 | { | 459 | { |
460 | struct kobj_type *ktype = get_ktype(kobj); | 460 | struct kobj_type *ktype = get_ktype(kobj); |
461 | 461 | ||
462 | return ((ktype == &ktype_block) || (ktype == &ktype_part)); | 462 | return ((ktype == &ktype_block) || (ktype == &ktype_part)); |
463 | } | 463 | } |
464 | 464 | ||
465 | static int block_hotplug(struct kset *kset, struct kobject *kobj, char **envp, | 465 | static int block_uevent(struct kset *kset, struct kobject *kobj, char **envp, |
466 | int num_envp, char *buffer, int buffer_size) | 466 | int num_envp, char *buffer, int buffer_size) |
467 | { | 467 | { |
468 | struct kobj_type *ktype = get_ktype(kobj); | 468 | struct kobj_type *ktype = get_ktype(kobj); |
@@ -474,40 +474,40 @@ static int block_hotplug(struct kset *kset, struct kobject *kobj, char **envp, | |||
474 | 474 | ||
475 | if (ktype == &ktype_block) { | 475 | if (ktype == &ktype_block) { |
476 | disk = container_of(kobj, struct gendisk, kobj); | 476 | disk = container_of(kobj, struct gendisk, kobj); |
477 | add_hotplug_env_var(envp, num_envp, &i, buffer, buffer_size, | 477 | add_uevent_var(envp, num_envp, &i, buffer, buffer_size, |
478 | &length, "MINOR=%u", disk->first_minor); | 478 | &length, "MINOR=%u", disk->first_minor); |
479 | } else if (ktype == &ktype_part) { | 479 | } else if (ktype == &ktype_part) { |
480 | disk = container_of(kobj->parent, struct gendisk, kobj); | 480 | disk = container_of(kobj->parent, struct gendisk, kobj); |
481 | part = container_of(kobj, struct hd_struct, kobj); | 481 | part = container_of(kobj, struct hd_struct, kobj); |
482 | add_hotplug_env_var(envp, num_envp, &i, buffer, buffer_size, | 482 | add_uevent_var(envp, num_envp, &i, buffer, buffer_size, |
483 | &length, "MINOR=%u", | 483 | &length, "MINOR=%u", |
484 | disk->first_minor + part->partno); | 484 | disk->first_minor + part->partno); |
485 | } else | 485 | } else |
486 | return 0; | 486 | return 0; |
487 | 487 | ||
488 | add_hotplug_env_var(envp, num_envp, &i, buffer, buffer_size, &length, | 488 | add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length, |
489 | "MAJOR=%u", disk->major); | 489 | "MAJOR=%u", disk->major); |
490 | 490 | ||
491 | /* add physical device, backing this device */ | 491 | /* add physical device, backing this device */ |
492 | physdev = disk->driverfs_dev; | 492 | physdev = disk->driverfs_dev; |
493 | if (physdev) { | 493 | if (physdev) { |
494 | char *path = kobject_get_path(&physdev->kobj, GFP_KERNEL); | 494 | char *path = kobject_get_path(&physdev->kobj, GFP_KERNEL); |
495 | 495 | ||
496 | add_hotplug_env_var(envp, num_envp, &i, buffer, buffer_size, | 496 | add_uevent_var(envp, num_envp, &i, buffer, buffer_size, |
497 | &length, "PHYSDEVPATH=%s", path); | 497 | &length, "PHYSDEVPATH=%s", path); |
498 | kfree(path); | 498 | kfree(path); |
499 | 499 | ||
500 | if (physdev->bus) | 500 | if (physdev->bus) |
501 | add_hotplug_env_var(envp, num_envp, &i, | 501 | add_uevent_var(envp, num_envp, &i, |
502 | buffer, buffer_size, &length, | 502 | buffer, buffer_size, &length, |
503 | "PHYSDEVBUS=%s", | 503 | "PHYSDEVBUS=%s", |
504 | physdev->bus->name); | 504 | physdev->bus->name); |
505 | 505 | ||
506 | if (physdev->driver) | 506 | if (physdev->driver) |
507 | add_hotplug_env_var(envp, num_envp, &i, | 507 | add_uevent_var(envp, num_envp, &i, |
508 | buffer, buffer_size, &length, | 508 | buffer, buffer_size, &length, |
509 | "PHYSDEVDRIVER=%s", | 509 | "PHYSDEVDRIVER=%s", |
510 | physdev->driver->name); | 510 | physdev->driver->name); |
511 | } | 511 | } |
512 | 512 | ||
513 | /* terminate, set to next free slot, shrink available space */ | 513 | /* terminate, set to next free slot, shrink available space */ |
@@ -520,13 +520,13 @@ static int block_hotplug(struct kset *kset, struct kobject *kobj, char **envp, | |||
520 | return 0; | 520 | return 0; |
521 | } | 521 | } |
522 | 522 | ||
523 | static struct kset_hotplug_ops block_hotplug_ops = { | 523 | static struct kset_uevent_ops block_uevent_ops = { |
524 | .filter = block_hotplug_filter, | 524 | .filter = block_uevent_filter, |
525 | .hotplug = block_hotplug, | 525 | .uevent = block_uevent, |
526 | }; | 526 | }; |
527 | 527 | ||
528 | /* declare block_subsys. */ | 528 | /* declare block_subsys. */ |
529 | static decl_subsys(block, &ktype_block, &block_hotplug_ops); | 529 | static decl_subsys(block, &ktype_block, &block_uevent_ops); |
530 | 530 | ||
531 | 531 | ||
532 | /* | 532 | /* |
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 99c9ca6d59..d4beb9a89e 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -239,7 +239,7 @@ void blk_queue_make_request(request_queue_t * q, make_request_fn * mfn) | |||
239 | q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; | 239 | q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; |
240 | q->backing_dev_info.state = 0; | 240 | q->backing_dev_info.state = 0; |
241 | q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; | 241 | q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; |
242 | blk_queue_max_sectors(q, MAX_SECTORS); | 242 | blk_queue_max_sectors(q, SAFE_MAX_SECTORS); |
243 | blk_queue_hardsect_size(q, 512); | 243 | blk_queue_hardsect_size(q, 512); |
244 | blk_queue_dma_alignment(q, 511); | 244 | blk_queue_dma_alignment(q, 511); |
245 | blk_queue_congestion_threshold(q); | 245 | blk_queue_congestion_threshold(q); |
@@ -555,7 +555,12 @@ void blk_queue_max_sectors(request_queue_t *q, unsigned short max_sectors) | |||
555 | printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors); | 555 | printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors); |
556 | } | 556 | } |
557 | 557 | ||
558 | q->max_sectors = q->max_hw_sectors = max_sectors; | 558 | if (BLK_DEF_MAX_SECTORS > max_sectors) |
559 | q->max_hw_sectors = q->max_sectors = max_sectors; | ||
560 | else { | ||
561 | q->max_sectors = BLK_DEF_MAX_SECTORS; | ||
562 | q->max_hw_sectors = max_sectors; | ||
563 | } | ||
559 | } | 564 | } |
560 | 565 | ||
561 | EXPORT_SYMBOL(blk_queue_max_sectors); | 566 | EXPORT_SYMBOL(blk_queue_max_sectors); |
@@ -657,8 +662,8 @@ EXPORT_SYMBOL(blk_queue_hardsect_size); | |||
657 | void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b) | 662 | void blk_queue_stack_limits(request_queue_t *t, request_queue_t *b) |
658 | { | 663 | { |
659 | /* zero is "infinity" */ | 664 | /* zero is "infinity" */ |
660 | t->max_sectors = t->max_hw_sectors = | 665 | t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors); |
661 | min_not_zero(t->max_sectors,b->max_sectors); | 666 | t->max_hw_sectors = min_not_zero(t->max_hw_sectors,b->max_hw_sectors); |
662 | 667 | ||
663 | t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments); | 668 | t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments); |
664 | t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments); | 669 | t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments); |
@@ -1293,9 +1298,15 @@ static inline int ll_new_hw_segment(request_queue_t *q, | |||
1293 | static int ll_back_merge_fn(request_queue_t *q, struct request *req, | 1298 | static int ll_back_merge_fn(request_queue_t *q, struct request *req, |
1294 | struct bio *bio) | 1299 | struct bio *bio) |
1295 | { | 1300 | { |
1301 | unsigned short max_sectors; | ||
1296 | int len; | 1302 | int len; |
1297 | 1303 | ||
1298 | if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) { | 1304 | if (unlikely(blk_pc_request(req))) |
1305 | max_sectors = q->max_hw_sectors; | ||
1306 | else | ||
1307 | max_sectors = q->max_sectors; | ||
1308 | |||
1309 | if (req->nr_sectors + bio_sectors(bio) > max_sectors) { | ||
1299 | req->flags |= REQ_NOMERGE; | 1310 | req->flags |= REQ_NOMERGE; |
1300 | if (req == q->last_merge) | 1311 | if (req == q->last_merge) |
1301 | q->last_merge = NULL; | 1312 | q->last_merge = NULL; |
@@ -1325,9 +1336,16 @@ static int ll_back_merge_fn(request_queue_t *q, struct request *req, | |||
1325 | static int ll_front_merge_fn(request_queue_t *q, struct request *req, | 1336 | static int ll_front_merge_fn(request_queue_t *q, struct request *req, |
1326 | struct bio *bio) | 1337 | struct bio *bio) |
1327 | { | 1338 | { |
1339 | unsigned short max_sectors; | ||
1328 | int len; | 1340 | int len; |
1329 | 1341 | ||
1330 | if (req->nr_sectors + bio_sectors(bio) > q->max_sectors) { | 1342 | if (unlikely(blk_pc_request(req))) |
1343 | max_sectors = q->max_hw_sectors; | ||
1344 | else | ||
1345 | max_sectors = q->max_sectors; | ||
1346 | |||
1347 | |||
1348 | if (req->nr_sectors + bio_sectors(bio) > max_sectors) { | ||
1331 | req->flags |= REQ_NOMERGE; | 1349 | req->flags |= REQ_NOMERGE; |
1332 | if (req == q->last_merge) | 1350 | if (req == q->last_merge) |
1333 | q->last_merge = NULL; | 1351 | q->last_merge = NULL; |
@@ -2144,7 +2162,7 @@ int blk_rq_map_user(request_queue_t *q, struct request *rq, void __user *ubuf, | |||
2144 | struct bio *bio; | 2162 | struct bio *bio; |
2145 | int reading; | 2163 | int reading; |
2146 | 2164 | ||
2147 | if (len > (q->max_sectors << 9)) | 2165 | if (len > (q->max_hw_sectors << 9)) |
2148 | return -EINVAL; | 2166 | return -EINVAL; |
2149 | if (!len || !ubuf) | 2167 | if (!len || !ubuf) |
2150 | return -EINVAL; | 2168 | return -EINVAL; |
@@ -2259,7 +2277,7 @@ int blk_rq_map_kern(request_queue_t *q, struct request *rq, void *kbuf, | |||
2259 | { | 2277 | { |
2260 | struct bio *bio; | 2278 | struct bio *bio; |
2261 | 2279 | ||
2262 | if (len > (q->max_sectors << 9)) | 2280 | if (len > (q->max_hw_sectors << 9)) |
2263 | return -EINVAL; | 2281 | return -EINVAL; |
2264 | if (!len || !kbuf) | 2282 | if (!len || !kbuf) |
2265 | return -EINVAL; | 2283 | return -EINVAL; |
@@ -2306,6 +2324,8 @@ void blk_execute_rq_nowait(request_queue_t *q, struct gendisk *bd_disk, | |||
2306 | generic_unplug_device(q); | 2324 | generic_unplug_device(q); |
2307 | } | 2325 | } |
2308 | 2326 | ||
2327 | EXPORT_SYMBOL_GPL(blk_execute_rq_nowait); | ||
2328 | |||
2309 | /** | 2329 | /** |
2310 | * blk_execute_rq - insert a request into queue for execution | 2330 | * blk_execute_rq - insert a request into queue for execution |
2311 | * @q: queue to insert the request in | 2331 | * @q: queue to insert the request in |
@@ -2444,7 +2464,7 @@ void disk_round_stats(struct gendisk *disk) | |||
2444 | /* | 2464 | /* |
2445 | * queue lock must be held | 2465 | * queue lock must be held |
2446 | */ | 2466 | */ |
2447 | static void __blk_put_request(request_queue_t *q, struct request *req) | 2467 | void __blk_put_request(request_queue_t *q, struct request *req) |
2448 | { | 2468 | { |
2449 | struct request_list *rl = req->rl; | 2469 | struct request_list *rl = req->rl; |
2450 | 2470 | ||
@@ -2473,6 +2493,8 @@ static void __blk_put_request(request_queue_t *q, struct request *req) | |||
2473 | } | 2493 | } |
2474 | } | 2494 | } |
2475 | 2495 | ||
2496 | EXPORT_SYMBOL_GPL(__blk_put_request); | ||
2497 | |||
2476 | void blk_put_request(struct request *req) | 2498 | void blk_put_request(struct request *req) |
2477 | { | 2499 | { |
2478 | unsigned long flags; | 2500 | unsigned long flags; |
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index 6e7db2e79f..1d8852f7bb 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c | |||
@@ -233,7 +233,7 @@ static int sg_io(struct file *file, request_queue_t *q, | |||
233 | if (verify_command(file, cmd)) | 233 | if (verify_command(file, cmd)) |
234 | return -EPERM; | 234 | return -EPERM; |
235 | 235 | ||
236 | if (hdr->dxfer_len > (q->max_sectors << 9)) | 236 | if (hdr->dxfer_len > (q->max_hw_sectors << 9)) |
237 | return -EIO; | 237 | return -EIO; |
238 | 238 | ||
239 | if (hdr->dxfer_len) | 239 | if (hdr->dxfer_len) |