diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-10-04 15:39:14 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-10-04 15:39:14 -0400 |
commit | 58e57fbd1c7e8833314459555e337364fe5521f3 (patch) | |
tree | 242a3859387588889c9dcc45915b0dec951f84c3 | |
parent | 8a0382f6fceaf0c6479e582e1054f36333ea3d24 (diff) | |
parent | 0f78ab9899e9d6acb09d5465def618704255963b (diff) |
Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/linux-2.6-block: (41 commits)
Revert "Seperate read and write statistics of in_flight requests"
cfq-iosched: don't delay async queue if it hasn't dispatched at all
block: Topology ioctls
cfq-iosched: use assigned slice sync value, not default
cfq-iosched: rename 'desktop' sysfs entry to 'low_latency'
cfq-iosched: implement slower async initiate and queue ramp up
cfq-iosched: delay async IO dispatch, if sync IO was just done
cfq-iosched: add a knob for desktop interactiveness
Add a tracepoint for block request remapping
block: allow large discard requests
block: use normal I/O path for discard requests
swapfile: avoid NULL pointer dereference in swapon when s_bdev is NULL
fs/bio.c: move EXPORT* macros to line after function
Add missing blk_trace_remove_sysfs to be in pair with blk_trace_init_sysfs
cciss: fix build when !PROC_FS
block: Do not clamp max_hw_sectors for stacking devices
block: Set max_sectors correctly for stacking devices
cciss: cciss_host_attr_groups should be const
cciss: Dynamically allocate the drive_info_struct for each logical drive.
cciss: Add usage_count attribute to each logical drive in /sys
...
-rw-r--r-- | Documentation/ABI/testing/sysfs-bus-pci-devices-cciss | 28 | ||||
-rw-r--r-- | block/blk-barrier.c | 45 | ||||
-rw-r--r-- | block/blk-core.c | 21 | ||||
-rw-r--r-- | block/blk-merge.c | 2 | ||||
-rw-r--r-- | block/blk-settings.c | 34 | ||||
-rw-r--r-- | block/blk-sysfs.c | 11 | ||||
-rw-r--r-- | block/cfq-iosched.c | 63 | ||||
-rw-r--r-- | block/compat_ioctl.c | 13 | ||||
-rw-r--r-- | block/genhd.c | 4 | ||||
-rw-r--r-- | block/ioctl.c | 17 | ||||
-rw-r--r-- | drivers/block/DAC960.c | 156 | ||||
-rw-r--r-- | drivers/block/cciss.c | 753 | ||||
-rw-r--r-- | drivers/block/cciss.h | 12 | ||||
-rw-r--r-- | drivers/block/cpqarray.c | 63 | ||||
-rw-r--r-- | drivers/md/dm.c | 16 | ||||
-rw-r--r-- | drivers/mtd/mtd_blkdevs.c | 19 | ||||
-rw-r--r-- | drivers/staging/dst/dcore.c | 2 | ||||
-rw-r--r-- | fs/bio.c | 49 | ||||
-rw-r--r-- | fs/partitions/check.c | 12 | ||||
-rw-r--r-- | include/linux/blkdev.h | 48 | ||||
-rw-r--r-- | include/linux/blktrace_api.h | 2 | ||||
-rw-r--r-- | include/linux/fs.h | 4 | ||||
-rw-r--r-- | include/linux/genhd.h | 21 | ||||
-rw-r--r-- | include/trace/events/block.h | 33 | ||||
-rw-r--r-- | kernel/trace/blktrace.c | 39 | ||||
-rw-r--r-- | mm/swapfile.c | 12 |
26 files changed, 999 insertions, 480 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-pci-devices-cciss b/Documentation/ABI/testing/sysfs-bus-pci-devices-cciss index 0a92a7c93a62..4f29e5f1ebfa 100644 --- a/Documentation/ABI/testing/sysfs-bus-pci-devices-cciss +++ b/Documentation/ABI/testing/sysfs-bus-pci-devices-cciss | |||
@@ -31,3 +31,31 @@ Date: March 2009 | |||
31 | Kernel Version: 2.6.30 | 31 | Kernel Version: 2.6.30 |
32 | Contact: iss_storagedev@hp.com | 32 | Contact: iss_storagedev@hp.com |
33 | Description: A symbolic link to /sys/block/cciss!cXdY | 33 | Description: A symbolic link to /sys/block/cciss!cXdY |
34 | |||
35 | Where: /sys/bus/pci/devices/<dev>/ccissX/rescan | ||
36 | Date: August 2009 | ||
37 | Kernel Version: 2.6.31 | ||
38 | Contact: iss_storagedev@hp.com | ||
39 | Description: Kicks of a rescan of the controller to discover logical | ||
40 | drive topology changes. | ||
41 | |||
42 | Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/lunid | ||
43 | Date: August 2009 | ||
44 | Kernel Version: 2.6.31 | ||
45 | Contact: iss_storagedev@hp.com | ||
46 | Description: Displays the 8-byte LUN ID used to address logical | ||
47 | drive Y of controller X. | ||
48 | |||
49 | Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/raid_level | ||
50 | Date: August 2009 | ||
51 | Kernel Version: 2.6.31 | ||
52 | Contact: iss_storagedev@hp.com | ||
53 | Description: Displays the RAID level of logical drive Y of | ||
54 | controller X. | ||
55 | |||
56 | Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/usage_count | ||
57 | Date: August 2009 | ||
58 | Kernel Version: 2.6.31 | ||
59 | Contact: iss_storagedev@hp.com | ||
60 | Description: Displays the usage count (number of opens) of logical drive Y | ||
61 | of controller X. | ||
diff --git a/block/blk-barrier.c b/block/blk-barrier.c index 6593ab39cfe9..8873b9b439ff 100644 --- a/block/blk-barrier.c +++ b/block/blk-barrier.c | |||
@@ -350,6 +350,7 @@ static void blkdev_discard_end_io(struct bio *bio, int err) | |||
350 | 350 | ||
351 | if (bio->bi_private) | 351 | if (bio->bi_private) |
352 | complete(bio->bi_private); | 352 | complete(bio->bi_private); |
353 | __free_page(bio_page(bio)); | ||
353 | 354 | ||
354 | bio_put(bio); | 355 | bio_put(bio); |
355 | } | 356 | } |
@@ -372,30 +373,50 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |||
372 | struct request_queue *q = bdev_get_queue(bdev); | 373 | struct request_queue *q = bdev_get_queue(bdev); |
373 | int type = flags & DISCARD_FL_BARRIER ? | 374 | int type = flags & DISCARD_FL_BARRIER ? |
374 | DISCARD_BARRIER : DISCARD_NOBARRIER; | 375 | DISCARD_BARRIER : DISCARD_NOBARRIER; |
376 | struct bio *bio; | ||
377 | struct page *page; | ||
375 | int ret = 0; | 378 | int ret = 0; |
376 | 379 | ||
377 | if (!q) | 380 | if (!q) |
378 | return -ENXIO; | 381 | return -ENXIO; |
379 | 382 | ||
380 | if (!q->prepare_discard_fn) | 383 | if (!blk_queue_discard(q)) |
381 | return -EOPNOTSUPP; | 384 | return -EOPNOTSUPP; |
382 | 385 | ||
383 | while (nr_sects && !ret) { | 386 | while (nr_sects && !ret) { |
384 | struct bio *bio = bio_alloc(gfp_mask, 0); | 387 | unsigned int sector_size = q->limits.logical_block_size; |
385 | if (!bio) | 388 | unsigned int max_discard_sectors = |
386 | return -ENOMEM; | 389 | min(q->limits.max_discard_sectors, UINT_MAX >> 9); |
387 | 390 | ||
391 | bio = bio_alloc(gfp_mask, 1); | ||
392 | if (!bio) | ||
393 | goto out; | ||
394 | bio->bi_sector = sector; | ||
388 | bio->bi_end_io = blkdev_discard_end_io; | 395 | bio->bi_end_io = blkdev_discard_end_io; |
389 | bio->bi_bdev = bdev; | 396 | bio->bi_bdev = bdev; |
390 | if (flags & DISCARD_FL_WAIT) | 397 | if (flags & DISCARD_FL_WAIT) |
391 | bio->bi_private = &wait; | 398 | bio->bi_private = &wait; |
392 | 399 | ||
393 | bio->bi_sector = sector; | 400 | /* |
401 | * Add a zeroed one-sector payload as that's what | ||
402 | * our current implementations need. If we'll ever need | ||
403 | * more the interface will need revisiting. | ||
404 | */ | ||
405 | page = alloc_page(GFP_KERNEL | __GFP_ZERO); | ||
406 | if (!page) | ||
407 | goto out_free_bio; | ||
408 | if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size) | ||
409 | goto out_free_page; | ||
394 | 410 | ||
395 | if (nr_sects > queue_max_hw_sectors(q)) { | 411 | /* |
396 | bio->bi_size = queue_max_hw_sectors(q) << 9; | 412 | * And override the bio size - the way discard works we |
397 | nr_sects -= queue_max_hw_sectors(q); | 413 | * touch many more blocks on disk than the actual payload |
398 | sector += queue_max_hw_sectors(q); | 414 | * length. |
415 | */ | ||
416 | if (nr_sects > max_discard_sectors) { | ||
417 | bio->bi_size = max_discard_sectors << 9; | ||
418 | nr_sects -= max_discard_sectors; | ||
419 | sector += max_discard_sectors; | ||
399 | } else { | 420 | } else { |
400 | bio->bi_size = nr_sects << 9; | 421 | bio->bi_size = nr_sects << 9; |
401 | nr_sects = 0; | 422 | nr_sects = 0; |
@@ -414,5 +435,11 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | |||
414 | bio_put(bio); | 435 | bio_put(bio); |
415 | } | 436 | } |
416 | return ret; | 437 | return ret; |
438 | out_free_page: | ||
439 | __free_page(page); | ||
440 | out_free_bio: | ||
441 | bio_put(bio); | ||
442 | out: | ||
443 | return -ENOMEM; | ||
417 | } | 444 | } |
418 | EXPORT_SYMBOL(blkdev_issue_discard); | 445 | EXPORT_SYMBOL(blkdev_issue_discard); |
diff --git a/block/blk-core.c b/block/blk-core.c index 8135228e4b29..81f34311659a 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include "blk.h" | 34 | #include "blk.h" |
35 | 35 | ||
36 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap); | 36 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap); |
37 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); | ||
37 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); | 38 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); |
38 | 39 | ||
39 | static int __make_request(struct request_queue *q, struct bio *bio); | 40 | static int __make_request(struct request_queue *q, struct bio *bio); |
@@ -69,7 +70,7 @@ static void drive_stat_acct(struct request *rq, int new_io) | |||
69 | part_stat_inc(cpu, part, merges[rw]); | 70 | part_stat_inc(cpu, part, merges[rw]); |
70 | else { | 71 | else { |
71 | part_round_stats(cpu, part); | 72 | part_round_stats(cpu, part); |
72 | part_inc_in_flight(part, rw); | 73 | part_inc_in_flight(part); |
73 | } | 74 | } |
74 | 75 | ||
75 | part_stat_unlock(); | 76 | part_stat_unlock(); |
@@ -1031,7 +1032,7 @@ static void part_round_stats_single(int cpu, struct hd_struct *part, | |||
1031 | 1032 | ||
1032 | if (part->in_flight) { | 1033 | if (part->in_flight) { |
1033 | __part_stat_add(cpu, part, time_in_queue, | 1034 | __part_stat_add(cpu, part, time_in_queue, |
1034 | part_in_flight(part) * (now - part->stamp)); | 1035 | part->in_flight * (now - part->stamp)); |
1035 | __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); | 1036 | __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); |
1036 | } | 1037 | } |
1037 | part->stamp = now; | 1038 | part->stamp = now; |
@@ -1124,7 +1125,6 @@ void init_request_from_bio(struct request *req, struct bio *bio) | |||
1124 | req->cmd_flags |= REQ_DISCARD; | 1125 | req->cmd_flags |= REQ_DISCARD; |
1125 | if (bio_rw_flagged(bio, BIO_RW_BARRIER)) | 1126 | if (bio_rw_flagged(bio, BIO_RW_BARRIER)) |
1126 | req->cmd_flags |= REQ_SOFTBARRIER; | 1127 | req->cmd_flags |= REQ_SOFTBARRIER; |
1127 | req->q->prepare_discard_fn(req->q, req); | ||
1128 | } else if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) | 1128 | } else if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) |
1129 | req->cmd_flags |= REQ_HARDBARRIER; | 1129 | req->cmd_flags |= REQ_HARDBARRIER; |
1130 | 1130 | ||
@@ -1437,7 +1437,8 @@ static inline void __generic_make_request(struct bio *bio) | |||
1437 | goto end_io; | 1437 | goto end_io; |
1438 | } | 1438 | } |
1439 | 1439 | ||
1440 | if (unlikely(nr_sectors > queue_max_hw_sectors(q))) { | 1440 | if (unlikely(!bio_rw_flagged(bio, BIO_RW_DISCARD) && |
1441 | nr_sectors > queue_max_hw_sectors(q))) { | ||
1441 | printk(KERN_ERR "bio too big device %s (%u > %u)\n", | 1442 | printk(KERN_ERR "bio too big device %s (%u > %u)\n", |
1442 | bdevname(bio->bi_bdev, b), | 1443 | bdevname(bio->bi_bdev, b), |
1443 | bio_sectors(bio), | 1444 | bio_sectors(bio), |
@@ -1470,7 +1471,7 @@ static inline void __generic_make_request(struct bio *bio) | |||
1470 | goto end_io; | 1471 | goto end_io; |
1471 | 1472 | ||
1472 | if (bio_rw_flagged(bio, BIO_RW_DISCARD) && | 1473 | if (bio_rw_flagged(bio, BIO_RW_DISCARD) && |
1473 | !q->prepare_discard_fn) { | 1474 | !blk_queue_discard(q)) { |
1474 | err = -EOPNOTSUPP; | 1475 | err = -EOPNOTSUPP; |
1475 | goto end_io; | 1476 | goto end_io; |
1476 | } | 1477 | } |
@@ -1738,7 +1739,7 @@ static void blk_account_io_done(struct request *req) | |||
1738 | part_stat_inc(cpu, part, ios[rw]); | 1739 | part_stat_inc(cpu, part, ios[rw]); |
1739 | part_stat_add(cpu, part, ticks[rw], duration); | 1740 | part_stat_add(cpu, part, ticks[rw], duration); |
1740 | part_round_stats(cpu, part); | 1741 | part_round_stats(cpu, part); |
1741 | part_dec_in_flight(part, rw); | 1742 | part_dec_in_flight(part); |
1742 | 1743 | ||
1743 | part_stat_unlock(); | 1744 | part_stat_unlock(); |
1744 | } | 1745 | } |
@@ -2491,6 +2492,14 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) | |||
2491 | } | 2492 | } |
2492 | EXPORT_SYMBOL(kblockd_schedule_work); | 2493 | EXPORT_SYMBOL(kblockd_schedule_work); |
2493 | 2494 | ||
2495 | int kblockd_schedule_delayed_work(struct request_queue *q, | ||
2496 | struct delayed_work *work, | ||
2497 | unsigned long delay) | ||
2498 | { | ||
2499 | return queue_delayed_work(kblockd_workqueue, work, delay); | ||
2500 | } | ||
2501 | EXPORT_SYMBOL(kblockd_schedule_delayed_work); | ||
2502 | |||
2494 | int __init blk_dev_init(void) | 2503 | int __init blk_dev_init(void) |
2495 | { | 2504 | { |
2496 | BUILD_BUG_ON(__REQ_NR_BITS > 8 * | 2505 | BUILD_BUG_ON(__REQ_NR_BITS > 8 * |
diff --git a/block/blk-merge.c b/block/blk-merge.c index 99cb5cf1f447..b0de8574fdc8 100644 --- a/block/blk-merge.c +++ b/block/blk-merge.c | |||
@@ -351,7 +351,7 @@ static void blk_account_io_merge(struct request *req) | |||
351 | part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); | 351 | part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); |
352 | 352 | ||
353 | part_round_stats(cpu, part); | 353 | part_round_stats(cpu, part); |
354 | part_dec_in_flight(part, rq_data_dir(req)); | 354 | part_dec_in_flight(part); |
355 | 355 | ||
356 | part_stat_unlock(); | 356 | part_stat_unlock(); |
357 | } | 357 | } |
diff --git a/block/blk-settings.c b/block/blk-settings.c index 83413ff83739..e0695bca7027 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -34,23 +34,6 @@ void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) | |||
34 | EXPORT_SYMBOL(blk_queue_prep_rq); | 34 | EXPORT_SYMBOL(blk_queue_prep_rq); |
35 | 35 | ||
36 | /** | 36 | /** |
37 | * blk_queue_set_discard - set a discard_sectors function for queue | ||
38 | * @q: queue | ||
39 | * @dfn: prepare_discard function | ||
40 | * | ||
41 | * It's possible for a queue to register a discard callback which is used | ||
42 | * to transform a discard request into the appropriate type for the | ||
43 | * hardware. If none is registered, then discard requests are failed | ||
44 | * with %EOPNOTSUPP. | ||
45 | * | ||
46 | */ | ||
47 | void blk_queue_set_discard(struct request_queue *q, prepare_discard_fn *dfn) | ||
48 | { | ||
49 | q->prepare_discard_fn = dfn; | ||
50 | } | ||
51 | EXPORT_SYMBOL(blk_queue_set_discard); | ||
52 | |||
53 | /** | ||
54 | * blk_queue_merge_bvec - set a merge_bvec function for queue | 37 | * blk_queue_merge_bvec - set a merge_bvec function for queue |
55 | * @q: queue | 38 | * @q: queue |
56 | * @mbfn: merge_bvec_fn | 39 | * @mbfn: merge_bvec_fn |
@@ -111,7 +94,9 @@ void blk_set_default_limits(struct queue_limits *lim) | |||
111 | lim->max_hw_segments = MAX_HW_SEGMENTS; | 94 | lim->max_hw_segments = MAX_HW_SEGMENTS; |
112 | lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; | 95 | lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; |
113 | lim->max_segment_size = MAX_SEGMENT_SIZE; | 96 | lim->max_segment_size = MAX_SEGMENT_SIZE; |
114 | lim->max_sectors = lim->max_hw_sectors = SAFE_MAX_SECTORS; | 97 | lim->max_sectors = BLK_DEF_MAX_SECTORS; |
98 | lim->max_hw_sectors = INT_MAX; | ||
99 | lim->max_discard_sectors = SAFE_MAX_SECTORS; | ||
115 | lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; | 100 | lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; |
116 | lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); | 101 | lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); |
117 | lim->alignment_offset = 0; | 102 | lim->alignment_offset = 0; |
@@ -164,6 +149,7 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) | |||
164 | q->unplug_timer.data = (unsigned long)q; | 149 | q->unplug_timer.data = (unsigned long)q; |
165 | 150 | ||
166 | blk_set_default_limits(&q->limits); | 151 | blk_set_default_limits(&q->limits); |
152 | blk_queue_max_sectors(q, SAFE_MAX_SECTORS); | ||
167 | 153 | ||
168 | /* | 154 | /* |
169 | * If the caller didn't supply a lock, fall back to our embedded | 155 | * If the caller didn't supply a lock, fall back to our embedded |
@@ -254,6 +240,18 @@ void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors) | |||
254 | EXPORT_SYMBOL(blk_queue_max_hw_sectors); | 240 | EXPORT_SYMBOL(blk_queue_max_hw_sectors); |
255 | 241 | ||
256 | /** | 242 | /** |
243 | * blk_queue_max_discard_sectors - set max sectors for a single discard | ||
244 | * @q: the request queue for the device | ||
245 | * @max_discard: maximum number of sectors to discard | ||
246 | **/ | ||
247 | void blk_queue_max_discard_sectors(struct request_queue *q, | ||
248 | unsigned int max_discard_sectors) | ||
249 | { | ||
250 | q->limits.max_discard_sectors = max_discard_sectors; | ||
251 | } | ||
252 | EXPORT_SYMBOL(blk_queue_max_discard_sectors); | ||
253 | |||
254 | /** | ||
257 | * blk_queue_max_phys_segments - set max phys segments for a request for this queue | 255 | * blk_queue_max_phys_segments - set max phys segments for a request for this queue |
258 | * @q: the request queue for the device | 256 | * @q: the request queue for the device |
259 | * @max_segments: max number of segments | 257 | * @max_segments: max number of segments |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index b78c9c3e2670..8a6d81afb284 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -452,6 +452,7 @@ int blk_register_queue(struct gendisk *disk) | |||
452 | if (ret) { | 452 | if (ret) { |
453 | kobject_uevent(&q->kobj, KOBJ_REMOVE); | 453 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
454 | kobject_del(&q->kobj); | 454 | kobject_del(&q->kobj); |
455 | blk_trace_remove_sysfs(disk_to_dev(disk)); | ||
455 | return ret; | 456 | return ret; |
456 | } | 457 | } |
457 | 458 | ||
@@ -465,11 +466,11 @@ void blk_unregister_queue(struct gendisk *disk) | |||
465 | if (WARN_ON(!q)) | 466 | if (WARN_ON(!q)) |
466 | return; | 467 | return; |
467 | 468 | ||
468 | if (q->request_fn) { | 469 | if (q->request_fn) |
469 | elv_unregister_queue(q); | 470 | elv_unregister_queue(q); |
470 | 471 | ||
471 | kobject_uevent(&q->kobj, KOBJ_REMOVE); | 472 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
472 | kobject_del(&q->kobj); | 473 | kobject_del(&q->kobj); |
473 | kobject_put(&disk_to_dev(disk)->kobj); | 474 | blk_trace_remove_sysfs(disk_to_dev(disk)); |
474 | } | 475 | kobject_put(&disk_to_dev(disk)->kobj); |
475 | } | 476 | } |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 1ca813b16e78..9c4b679908f4 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -150,7 +150,7 @@ struct cfq_data { | |||
150 | * idle window management | 150 | * idle window management |
151 | */ | 151 | */ |
152 | struct timer_list idle_slice_timer; | 152 | struct timer_list idle_slice_timer; |
153 | struct work_struct unplug_work; | 153 | struct delayed_work unplug_work; |
154 | 154 | ||
155 | struct cfq_queue *active_queue; | 155 | struct cfq_queue *active_queue; |
156 | struct cfq_io_context *active_cic; | 156 | struct cfq_io_context *active_cic; |
@@ -173,6 +173,7 @@ struct cfq_data { | |||
173 | unsigned int cfq_slice[2]; | 173 | unsigned int cfq_slice[2]; |
174 | unsigned int cfq_slice_async_rq; | 174 | unsigned int cfq_slice_async_rq; |
175 | unsigned int cfq_slice_idle; | 175 | unsigned int cfq_slice_idle; |
176 | unsigned int cfq_latency; | ||
176 | 177 | ||
177 | struct list_head cic_list; | 178 | struct list_head cic_list; |
178 | 179 | ||
@@ -180,6 +181,8 @@ struct cfq_data { | |||
180 | * Fallback dummy cfqq for extreme OOM conditions | 181 | * Fallback dummy cfqq for extreme OOM conditions |
181 | */ | 182 | */ |
182 | struct cfq_queue oom_cfqq; | 183 | struct cfq_queue oom_cfqq; |
184 | |||
185 | unsigned long last_end_sync_rq; | ||
183 | }; | 186 | }; |
184 | 187 | ||
185 | enum cfqq_state_flags { | 188 | enum cfqq_state_flags { |
@@ -265,11 +268,13 @@ static inline int cfq_bio_sync(struct bio *bio) | |||
265 | * scheduler run of queue, if there are requests pending and no one in the | 268 | * scheduler run of queue, if there are requests pending and no one in the |
266 | * driver that will restart queueing | 269 | * driver that will restart queueing |
267 | */ | 270 | */ |
268 | static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) | 271 | static inline void cfq_schedule_dispatch(struct cfq_data *cfqd, |
272 | unsigned long delay) | ||
269 | { | 273 | { |
270 | if (cfqd->busy_queues) { | 274 | if (cfqd->busy_queues) { |
271 | cfq_log(cfqd, "schedule dispatch"); | 275 | cfq_log(cfqd, "schedule dispatch"); |
272 | kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work); | 276 | kblockd_schedule_delayed_work(cfqd->queue, &cfqd->unplug_work, |
277 | delay); | ||
273 | } | 278 | } |
274 | } | 279 | } |
275 | 280 | ||
@@ -1326,12 +1331,30 @@ static int cfq_dispatch_requests(struct request_queue *q, int force) | |||
1326 | return 0; | 1331 | return 0; |
1327 | 1332 | ||
1328 | /* | 1333 | /* |
1329 | * we are the only queue, allow up to 4 times of 'quantum' | 1334 | * Sole queue user, allow bigger slice |
1330 | */ | 1335 | */ |
1331 | if (cfqq->dispatched >= 4 * max_dispatch) | 1336 | max_dispatch *= 4; |
1332 | return 0; | 1337 | } |
1338 | |||
1339 | /* | ||
1340 | * Async queues must wait a bit before being allowed dispatch. | ||
1341 | * We also ramp up the dispatch depth gradually for async IO, | ||
1342 | * based on the last sync IO we serviced | ||
1343 | */ | ||
1344 | if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) { | ||
1345 | unsigned long last_sync = jiffies - cfqd->last_end_sync_rq; | ||
1346 | unsigned int depth; | ||
1347 | |||
1348 | depth = last_sync / cfqd->cfq_slice[1]; | ||
1349 | if (!depth && !cfqq->dispatched) | ||
1350 | depth = 1; | ||
1351 | if (depth < max_dispatch) | ||
1352 | max_dispatch = depth; | ||
1333 | } | 1353 | } |
1334 | 1354 | ||
1355 | if (cfqq->dispatched >= max_dispatch) | ||
1356 | return 0; | ||
1357 | |||
1335 | /* | 1358 | /* |
1336 | * Dispatch a request from this cfqq | 1359 | * Dispatch a request from this cfqq |
1337 | */ | 1360 | */ |
@@ -1376,7 +1399,7 @@ static void cfq_put_queue(struct cfq_queue *cfqq) | |||
1376 | 1399 | ||
1377 | if (unlikely(cfqd->active_queue == cfqq)) { | 1400 | if (unlikely(cfqd->active_queue == cfqq)) { |
1378 | __cfq_slice_expired(cfqd, cfqq, 0); | 1401 | __cfq_slice_expired(cfqd, cfqq, 0); |
1379 | cfq_schedule_dispatch(cfqd); | 1402 | cfq_schedule_dispatch(cfqd, 0); |
1380 | } | 1403 | } |
1381 | 1404 | ||
1382 | kmem_cache_free(cfq_pool, cfqq); | 1405 | kmem_cache_free(cfq_pool, cfqq); |
@@ -1471,7 +1494,7 @@ static void cfq_exit_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq) | |||
1471 | { | 1494 | { |
1472 | if (unlikely(cfqq == cfqd->active_queue)) { | 1495 | if (unlikely(cfqq == cfqd->active_queue)) { |
1473 | __cfq_slice_expired(cfqd, cfqq, 0); | 1496 | __cfq_slice_expired(cfqd, cfqq, 0); |
1474 | cfq_schedule_dispatch(cfqd); | 1497 | cfq_schedule_dispatch(cfqd, 0); |
1475 | } | 1498 | } |
1476 | 1499 | ||
1477 | cfq_put_queue(cfqq); | 1500 | cfq_put_queue(cfqq); |
@@ -1951,7 +1974,7 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq, | |||
1951 | enable_idle = old_idle = cfq_cfqq_idle_window(cfqq); | 1974 | enable_idle = old_idle = cfq_cfqq_idle_window(cfqq); |
1952 | 1975 | ||
1953 | if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || | 1976 | if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || |
1954 | (cfqd->hw_tag && CIC_SEEKY(cic))) | 1977 | (!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic))) |
1955 | enable_idle = 0; | 1978 | enable_idle = 0; |
1956 | else if (sample_valid(cic->ttime_samples)) { | 1979 | else if (sample_valid(cic->ttime_samples)) { |
1957 | if (cic->ttime_mean > cfqd->cfq_slice_idle) | 1980 | if (cic->ttime_mean > cfqd->cfq_slice_idle) |
@@ -2157,8 +2180,10 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
2157 | if (cfq_cfqq_sync(cfqq)) | 2180 | if (cfq_cfqq_sync(cfqq)) |
2158 | cfqd->sync_flight--; | 2181 | cfqd->sync_flight--; |
2159 | 2182 | ||
2160 | if (sync) | 2183 | if (sync) { |
2161 | RQ_CIC(rq)->last_end_request = now; | 2184 | RQ_CIC(rq)->last_end_request = now; |
2185 | cfqd->last_end_sync_rq = now; | ||
2186 | } | ||
2162 | 2187 | ||
2163 | /* | 2188 | /* |
2164 | * If this is the active queue, check if it needs to be expired, | 2189 | * If this is the active queue, check if it needs to be expired, |
@@ -2186,7 +2211,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
2186 | } | 2211 | } |
2187 | 2212 | ||
2188 | if (!rq_in_driver(cfqd)) | 2213 | if (!rq_in_driver(cfqd)) |
2189 | cfq_schedule_dispatch(cfqd); | 2214 | cfq_schedule_dispatch(cfqd, 0); |
2190 | } | 2215 | } |
2191 | 2216 | ||
2192 | /* | 2217 | /* |
@@ -2316,7 +2341,7 @@ queue_fail: | |||
2316 | if (cic) | 2341 | if (cic) |
2317 | put_io_context(cic->ioc); | 2342 | put_io_context(cic->ioc); |
2318 | 2343 | ||
2319 | cfq_schedule_dispatch(cfqd); | 2344 | cfq_schedule_dispatch(cfqd, 0); |
2320 | spin_unlock_irqrestore(q->queue_lock, flags); | 2345 | spin_unlock_irqrestore(q->queue_lock, flags); |
2321 | cfq_log(cfqd, "set_request fail"); | 2346 | cfq_log(cfqd, "set_request fail"); |
2322 | return 1; | 2347 | return 1; |
@@ -2325,7 +2350,7 @@ queue_fail: | |||
2325 | static void cfq_kick_queue(struct work_struct *work) | 2350 | static void cfq_kick_queue(struct work_struct *work) |
2326 | { | 2351 | { |
2327 | struct cfq_data *cfqd = | 2352 | struct cfq_data *cfqd = |
2328 | container_of(work, struct cfq_data, unplug_work); | 2353 | container_of(work, struct cfq_data, unplug_work.work); |
2329 | struct request_queue *q = cfqd->queue; | 2354 | struct request_queue *q = cfqd->queue; |
2330 | 2355 | ||
2331 | spin_lock_irq(q->queue_lock); | 2356 | spin_lock_irq(q->queue_lock); |
@@ -2379,7 +2404,7 @@ static void cfq_idle_slice_timer(unsigned long data) | |||
2379 | expire: | 2404 | expire: |
2380 | cfq_slice_expired(cfqd, timed_out); | 2405 | cfq_slice_expired(cfqd, timed_out); |
2381 | out_kick: | 2406 | out_kick: |
2382 | cfq_schedule_dispatch(cfqd); | 2407 | cfq_schedule_dispatch(cfqd, 0); |
2383 | out_cont: | 2408 | out_cont: |
2384 | spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); | 2409 | spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); |
2385 | } | 2410 | } |
@@ -2387,7 +2412,7 @@ out_cont: | |||
2387 | static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) | 2412 | static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) |
2388 | { | 2413 | { |
2389 | del_timer_sync(&cfqd->idle_slice_timer); | 2414 | del_timer_sync(&cfqd->idle_slice_timer); |
2390 | cancel_work_sync(&cfqd->unplug_work); | 2415 | cancel_delayed_work_sync(&cfqd->unplug_work); |
2391 | } | 2416 | } |
2392 | 2417 | ||
2393 | static void cfq_put_async_queues(struct cfq_data *cfqd) | 2418 | static void cfq_put_async_queues(struct cfq_data *cfqd) |
@@ -2469,7 +2494,7 @@ static void *cfq_init_queue(struct request_queue *q) | |||
2469 | cfqd->idle_slice_timer.function = cfq_idle_slice_timer; | 2494 | cfqd->idle_slice_timer.function = cfq_idle_slice_timer; |
2470 | cfqd->idle_slice_timer.data = (unsigned long) cfqd; | 2495 | cfqd->idle_slice_timer.data = (unsigned long) cfqd; |
2471 | 2496 | ||
2472 | INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); | 2497 | INIT_DELAYED_WORK(&cfqd->unplug_work, cfq_kick_queue); |
2473 | 2498 | ||
2474 | cfqd->cfq_quantum = cfq_quantum; | 2499 | cfqd->cfq_quantum = cfq_quantum; |
2475 | cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; | 2500 | cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; |
@@ -2480,8 +2505,9 @@ static void *cfq_init_queue(struct request_queue *q) | |||
2480 | cfqd->cfq_slice[1] = cfq_slice_sync; | 2505 | cfqd->cfq_slice[1] = cfq_slice_sync; |
2481 | cfqd->cfq_slice_async_rq = cfq_slice_async_rq; | 2506 | cfqd->cfq_slice_async_rq = cfq_slice_async_rq; |
2482 | cfqd->cfq_slice_idle = cfq_slice_idle; | 2507 | cfqd->cfq_slice_idle = cfq_slice_idle; |
2508 | cfqd->cfq_latency = 1; | ||
2483 | cfqd->hw_tag = 1; | 2509 | cfqd->hw_tag = 1; |
2484 | 2510 | cfqd->last_end_sync_rq = jiffies; | |
2485 | return cfqd; | 2511 | return cfqd; |
2486 | } | 2512 | } |
2487 | 2513 | ||
@@ -2549,6 +2575,7 @@ SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); | |||
2549 | SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); | 2575 | SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); |
2550 | SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); | 2576 | SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); |
2551 | SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); | 2577 | SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); |
2578 | SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0); | ||
2552 | #undef SHOW_FUNCTION | 2579 | #undef SHOW_FUNCTION |
2553 | 2580 | ||
2554 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ | 2581 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ |
@@ -2580,6 +2607,7 @@ STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); | |||
2580 | STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); | 2607 | STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); |
2581 | STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, | 2608 | STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, |
2582 | UINT_MAX, 0); | 2609 | UINT_MAX, 0); |
2610 | STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0); | ||
2583 | #undef STORE_FUNCTION | 2611 | #undef STORE_FUNCTION |
2584 | 2612 | ||
2585 | #define CFQ_ATTR(name) \ | 2613 | #define CFQ_ATTR(name) \ |
@@ -2595,6 +2623,7 @@ static struct elv_fs_entry cfq_attrs[] = { | |||
2595 | CFQ_ATTR(slice_async), | 2623 | CFQ_ATTR(slice_async), |
2596 | CFQ_ATTR(slice_async_rq), | 2624 | CFQ_ATTR(slice_async_rq), |
2597 | CFQ_ATTR(slice_idle), | 2625 | CFQ_ATTR(slice_idle), |
2626 | CFQ_ATTR(low_latency), | ||
2598 | __ATTR_NULL | 2627 | __ATTR_NULL |
2599 | }; | 2628 | }; |
2600 | 2629 | ||
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c index 7865a34e0faa..9bd086c1a4d5 100644 --- a/block/compat_ioctl.c +++ b/block/compat_ioctl.c | |||
@@ -21,6 +21,11 @@ static int compat_put_int(unsigned long arg, int val) | |||
21 | return put_user(val, (compat_int_t __user *)compat_ptr(arg)); | 21 | return put_user(val, (compat_int_t __user *)compat_ptr(arg)); |
22 | } | 22 | } |
23 | 23 | ||
24 | static int compat_put_uint(unsigned long arg, unsigned int val) | ||
25 | { | ||
26 | return put_user(val, (compat_uint_t __user *)compat_ptr(arg)); | ||
27 | } | ||
28 | |||
24 | static int compat_put_long(unsigned long arg, long val) | 29 | static int compat_put_long(unsigned long arg, long val) |
25 | { | 30 | { |
26 | return put_user(val, (compat_long_t __user *)compat_ptr(arg)); | 31 | return put_user(val, (compat_long_t __user *)compat_ptr(arg)); |
@@ -734,6 +739,14 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) | |||
734 | switch (cmd) { | 739 | switch (cmd) { |
735 | case HDIO_GETGEO: | 740 | case HDIO_GETGEO: |
736 | return compat_hdio_getgeo(disk, bdev, compat_ptr(arg)); | 741 | return compat_hdio_getgeo(disk, bdev, compat_ptr(arg)); |
742 | case BLKPBSZGET: | ||
743 | return compat_put_uint(arg, bdev_physical_block_size(bdev)); | ||
744 | case BLKIOMIN: | ||
745 | return compat_put_uint(arg, bdev_io_min(bdev)); | ||
746 | case BLKIOOPT: | ||
747 | return compat_put_uint(arg, bdev_io_opt(bdev)); | ||
748 | case BLKALIGNOFF: | ||
749 | return compat_put_int(arg, bdev_alignment_offset(bdev)); | ||
737 | case BLKFLSBUF: | 750 | case BLKFLSBUF: |
738 | case BLKROSET: | 751 | case BLKROSET: |
739 | case BLKDISCARD: | 752 | case BLKDISCARD: |
diff --git a/block/genhd.c b/block/genhd.c index 517e4332cb37..5a0861da324d 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
@@ -869,7 +869,6 @@ static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); | |||
869 | static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL); | 869 | static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL); |
870 | static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL); | 870 | static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL); |
871 | static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); | 871 | static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); |
872 | static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL); | ||
873 | #ifdef CONFIG_FAIL_MAKE_REQUEST | 872 | #ifdef CONFIG_FAIL_MAKE_REQUEST |
874 | static struct device_attribute dev_attr_fail = | 873 | static struct device_attribute dev_attr_fail = |
875 | __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store); | 874 | __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store); |
@@ -889,7 +888,6 @@ static struct attribute *disk_attrs[] = { | |||
889 | &dev_attr_alignment_offset.attr, | 888 | &dev_attr_alignment_offset.attr, |
890 | &dev_attr_capability.attr, | 889 | &dev_attr_capability.attr, |
891 | &dev_attr_stat.attr, | 890 | &dev_attr_stat.attr, |
892 | &dev_attr_inflight.attr, | ||
893 | #ifdef CONFIG_FAIL_MAKE_REQUEST | 891 | #ifdef CONFIG_FAIL_MAKE_REQUEST |
894 | &dev_attr_fail.attr, | 892 | &dev_attr_fail.attr, |
895 | #endif | 893 | #endif |
@@ -1055,7 +1053,7 @@ static int diskstats_show(struct seq_file *seqf, void *v) | |||
1055 | part_stat_read(hd, merges[1]), | 1053 | part_stat_read(hd, merges[1]), |
1056 | (unsigned long long)part_stat_read(hd, sectors[1]), | 1054 | (unsigned long long)part_stat_read(hd, sectors[1]), |
1057 | jiffies_to_msecs(part_stat_read(hd, ticks[1])), | 1055 | jiffies_to_msecs(part_stat_read(hd, ticks[1])), |
1058 | part_in_flight(hd), | 1056 | hd->in_flight, |
1059 | jiffies_to_msecs(part_stat_read(hd, io_ticks)), | 1057 | jiffies_to_msecs(part_stat_read(hd, io_ticks)), |
1060 | jiffies_to_msecs(part_stat_read(hd, time_in_queue)) | 1058 | jiffies_to_msecs(part_stat_read(hd, time_in_queue)) |
1061 | ); | 1059 | ); |
diff --git a/block/ioctl.c b/block/ioctl.c index d3e6b5827a34..1f4d1de12b09 100644 --- a/block/ioctl.c +++ b/block/ioctl.c | |||
@@ -138,6 +138,11 @@ static int put_int(unsigned long arg, int val) | |||
138 | return put_user(val, (int __user *)arg); | 138 | return put_user(val, (int __user *)arg); |
139 | } | 139 | } |
140 | 140 | ||
141 | static int put_uint(unsigned long arg, unsigned int val) | ||
142 | { | ||
143 | return put_user(val, (unsigned int __user *)arg); | ||
144 | } | ||
145 | |||
141 | static int put_long(unsigned long arg, long val) | 146 | static int put_long(unsigned long arg, long val) |
142 | { | 147 | { |
143 | return put_user(val, (long __user *)arg); | 148 | return put_user(val, (long __user *)arg); |
@@ -263,10 +268,18 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, | |||
263 | return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512); | 268 | return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512); |
264 | case BLKROGET: | 269 | case BLKROGET: |
265 | return put_int(arg, bdev_read_only(bdev) != 0); | 270 | return put_int(arg, bdev_read_only(bdev) != 0); |
266 | case BLKBSZGET: /* get the logical block size (cf. BLKSSZGET) */ | 271 | case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */ |
267 | return put_int(arg, block_size(bdev)); | 272 | return put_int(arg, block_size(bdev)); |
268 | case BLKSSZGET: /* get block device hardware sector size */ | 273 | case BLKSSZGET: /* get block device logical block size */ |
269 | return put_int(arg, bdev_logical_block_size(bdev)); | 274 | return put_int(arg, bdev_logical_block_size(bdev)); |
275 | case BLKPBSZGET: /* get block device physical block size */ | ||
276 | return put_uint(arg, bdev_physical_block_size(bdev)); | ||
277 | case BLKIOMIN: | ||
278 | return put_uint(arg, bdev_io_min(bdev)); | ||
279 | case BLKIOOPT: | ||
280 | return put_uint(arg, bdev_io_opt(bdev)); | ||
281 | case BLKALIGNOFF: | ||
282 | return put_int(arg, bdev_alignment_offset(bdev)); | ||
270 | case BLKSECTGET: | 283 | case BLKSECTGET: |
271 | return put_ushort(arg, queue_max_sectors(bdev_get_queue(bdev))); | 284 | return put_ushort(arg, queue_max_sectors(bdev_get_queue(bdev))); |
272 | case BLKRASET: | 285 | case BLKRASET: |
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c index 6fa7b0fdbdfd..eb4fa1943944 100644 --- a/drivers/block/DAC960.c +++ b/drivers/block/DAC960.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/slab.h> | 38 | #include <linux/slab.h> |
39 | #include <linux/smp_lock.h> | 39 | #include <linux/smp_lock.h> |
40 | #include <linux/proc_fs.h> | 40 | #include <linux/proc_fs.h> |
41 | #include <linux/seq_file.h> | ||
41 | #include <linux/reboot.h> | 42 | #include <linux/reboot.h> |
42 | #include <linux/spinlock.h> | 43 | #include <linux/spinlock.h> |
43 | #include <linux/timer.h> | 44 | #include <linux/timer.h> |
@@ -6422,16 +6423,10 @@ static bool DAC960_V2_ExecuteUserCommand(DAC960_Controller_T *Controller, | |||
6422 | return true; | 6423 | return true; |
6423 | } | 6424 | } |
6424 | 6425 | ||
6425 | 6426 | static int dac960_proc_show(struct seq_file *m, void *v) | |
6426 | /* | ||
6427 | DAC960_ProcReadStatus implements reading /proc/rd/status. | ||
6428 | */ | ||
6429 | |||
6430 | static int DAC960_ProcReadStatus(char *Page, char **Start, off_t Offset, | ||
6431 | int Count, int *EOF, void *Data) | ||
6432 | { | 6427 | { |
6433 | unsigned char *StatusMessage = "OK\n"; | 6428 | unsigned char *StatusMessage = "OK\n"; |
6434 | int ControllerNumber, BytesAvailable; | 6429 | int ControllerNumber; |
6435 | for (ControllerNumber = 0; | 6430 | for (ControllerNumber = 0; |
6436 | ControllerNumber < DAC960_ControllerCount; | 6431 | ControllerNumber < DAC960_ControllerCount; |
6437 | ControllerNumber++) | 6432 | ControllerNumber++) |
@@ -6444,52 +6439,49 @@ static int DAC960_ProcReadStatus(char *Page, char **Start, off_t Offset, | |||
6444 | break; | 6439 | break; |
6445 | } | 6440 | } |
6446 | } | 6441 | } |
6447 | BytesAvailable = strlen(StatusMessage) - Offset; | 6442 | seq_puts(m, StatusMessage); |
6448 | if (Count >= BytesAvailable) | 6443 | return 0; |
6449 | { | ||
6450 | Count = BytesAvailable; | ||
6451 | *EOF = true; | ||
6452 | } | ||
6453 | if (Count <= 0) return 0; | ||
6454 | *Start = Page; | ||
6455 | memcpy(Page, &StatusMessage[Offset], Count); | ||
6456 | return Count; | ||
6457 | } | 6444 | } |
6458 | 6445 | ||
6446 | static int dac960_proc_open(struct inode *inode, struct file *file) | ||
6447 | { | ||
6448 | return single_open(file, dac960_proc_show, NULL); | ||
6449 | } | ||
6459 | 6450 | ||
6460 | /* | 6451 | static const struct file_operations dac960_proc_fops = { |
6461 | DAC960_ProcReadInitialStatus implements reading /proc/rd/cN/initial_status. | 6452 | .owner = THIS_MODULE, |
6462 | */ | 6453 | .open = dac960_proc_open, |
6454 | .read = seq_read, | ||
6455 | .llseek = seq_lseek, | ||
6456 | .release = single_release, | ||
6457 | }; | ||
6463 | 6458 | ||
6464 | static int DAC960_ProcReadInitialStatus(char *Page, char **Start, off_t Offset, | 6459 | static int dac960_initial_status_proc_show(struct seq_file *m, void *v) |
6465 | int Count, int *EOF, void *Data) | ||
6466 | { | 6460 | { |
6467 | DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data; | 6461 | DAC960_Controller_T *Controller = (DAC960_Controller_T *)m->private; |
6468 | int BytesAvailable = Controller->InitialStatusLength - Offset; | 6462 | seq_printf(m, "%.*s", Controller->InitialStatusLength, Controller->CombinedStatusBuffer); |
6469 | if (Count >= BytesAvailable) | 6463 | return 0; |
6470 | { | ||
6471 | Count = BytesAvailable; | ||
6472 | *EOF = true; | ||
6473 | } | ||
6474 | if (Count <= 0) return 0; | ||
6475 | *Start = Page; | ||
6476 | memcpy(Page, &Controller->CombinedStatusBuffer[Offset], Count); | ||
6477 | return Count; | ||
6478 | } | 6464 | } |
6479 | 6465 | ||
6466 | static int dac960_initial_status_proc_open(struct inode *inode, struct file *file) | ||
6467 | { | ||
6468 | return single_open(file, dac960_initial_status_proc_show, PDE(inode)->data); | ||
6469 | } | ||
6480 | 6470 | ||
6481 | /* | 6471 | static const struct file_operations dac960_initial_status_proc_fops = { |
6482 | DAC960_ProcReadCurrentStatus implements reading /proc/rd/cN/current_status. | 6472 | .owner = THIS_MODULE, |
6483 | */ | 6473 | .open = dac960_initial_status_proc_open, |
6474 | .read = seq_read, | ||
6475 | .llseek = seq_lseek, | ||
6476 | .release = single_release, | ||
6477 | }; | ||
6484 | 6478 | ||
6485 | static int DAC960_ProcReadCurrentStatus(char *Page, char **Start, off_t Offset, | 6479 | static int dac960_current_status_proc_show(struct seq_file *m, void *v) |
6486 | int Count, int *EOF, void *Data) | ||
6487 | { | 6480 | { |
6488 | DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data; | 6481 | DAC960_Controller_T *Controller = (DAC960_Controller_T *) m->private; |
6489 | unsigned char *StatusMessage = | 6482 | unsigned char *StatusMessage = |
6490 | "No Rebuild or Consistency Check in Progress\n"; | 6483 | "No Rebuild or Consistency Check in Progress\n"; |
6491 | int ProgressMessageLength = strlen(StatusMessage); | 6484 | int ProgressMessageLength = strlen(StatusMessage); |
6492 | int BytesAvailable; | ||
6493 | if (jiffies != Controller->LastCurrentStatusTime) | 6485 | if (jiffies != Controller->LastCurrentStatusTime) |
6494 | { | 6486 | { |
6495 | Controller->CurrentStatusLength = 0; | 6487 | Controller->CurrentStatusLength = 0; |
@@ -6513,49 +6505,41 @@ static int DAC960_ProcReadCurrentStatus(char *Page, char **Start, off_t Offset, | |||
6513 | } | 6505 | } |
6514 | Controller->LastCurrentStatusTime = jiffies; | 6506 | Controller->LastCurrentStatusTime = jiffies; |
6515 | } | 6507 | } |
6516 | BytesAvailable = Controller->CurrentStatusLength - Offset; | 6508 | seq_printf(m, "%.*s", Controller->CurrentStatusLength, Controller->CurrentStatusBuffer); |
6517 | if (Count >= BytesAvailable) | 6509 | return 0; |
6518 | { | ||
6519 | Count = BytesAvailable; | ||
6520 | *EOF = true; | ||
6521 | } | ||
6522 | if (Count <= 0) return 0; | ||
6523 | *Start = Page; | ||
6524 | memcpy(Page, &Controller->CurrentStatusBuffer[Offset], Count); | ||
6525 | return Count; | ||
6526 | } | 6510 | } |
6527 | 6511 | ||
6512 | static int dac960_current_status_proc_open(struct inode *inode, struct file *file) | ||
6513 | { | ||
6514 | return single_open(file, dac960_current_status_proc_show, PDE(inode)->data); | ||
6515 | } | ||
6528 | 6516 | ||
6529 | /* | 6517 | static const struct file_operations dac960_current_status_proc_fops = { |
6530 | DAC960_ProcReadUserCommand implements reading /proc/rd/cN/user_command. | 6518 | .owner = THIS_MODULE, |
6531 | */ | 6519 | .open = dac960_current_status_proc_open, |
6520 | .read = seq_read, | ||
6521 | .llseek = seq_lseek, | ||
6522 | .release = single_release, | ||
6523 | }; | ||
6532 | 6524 | ||
6533 | static int DAC960_ProcReadUserCommand(char *Page, char **Start, off_t Offset, | 6525 | static int dac960_user_command_proc_show(struct seq_file *m, void *v) |
6534 | int Count, int *EOF, void *Data) | ||
6535 | { | 6526 | { |
6536 | DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data; | 6527 | DAC960_Controller_T *Controller = (DAC960_Controller_T *)m->private; |
6537 | int BytesAvailable = Controller->UserStatusLength - Offset; | ||
6538 | if (Count >= BytesAvailable) | ||
6539 | { | ||
6540 | Count = BytesAvailable; | ||
6541 | *EOF = true; | ||
6542 | } | ||
6543 | if (Count <= 0) return 0; | ||
6544 | *Start = Page; | ||
6545 | memcpy(Page, &Controller->UserStatusBuffer[Offset], Count); | ||
6546 | return Count; | ||
6547 | } | ||
6548 | 6528 | ||
6529 | seq_printf(m, "%.*s", Controller->UserStatusLength, Controller->UserStatusBuffer); | ||
6530 | return 0; | ||
6531 | } | ||
6549 | 6532 | ||
6550 | /* | 6533 | static int dac960_user_command_proc_open(struct inode *inode, struct file *file) |
6551 | DAC960_ProcWriteUserCommand implements writing /proc/rd/cN/user_command. | 6534 | { |
6552 | */ | 6535 | return single_open(file, dac960_user_command_proc_show, PDE(inode)->data); |
6536 | } | ||
6553 | 6537 | ||
6554 | static int DAC960_ProcWriteUserCommand(struct file *file, | 6538 | static ssize_t dac960_user_command_proc_write(struct file *file, |
6555 | const char __user *Buffer, | 6539 | const char __user *Buffer, |
6556 | unsigned long Count, void *Data) | 6540 | size_t Count, loff_t *pos) |
6557 | { | 6541 | { |
6558 | DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data; | 6542 | DAC960_Controller_T *Controller = (DAC960_Controller_T *) PDE(file->f_path.dentry->d_inode)->data; |
6559 | unsigned char CommandBuffer[80]; | 6543 | unsigned char CommandBuffer[80]; |
6560 | int Length; | 6544 | int Length; |
6561 | if (Count > sizeof(CommandBuffer)-1) return -EINVAL; | 6545 | if (Count > sizeof(CommandBuffer)-1) return -EINVAL; |
@@ -6572,6 +6556,14 @@ static int DAC960_ProcWriteUserCommand(struct file *file, | |||
6572 | ? Count : -EBUSY); | 6556 | ? Count : -EBUSY); |
6573 | } | 6557 | } |
6574 | 6558 | ||
6559 | static const struct file_operations dac960_user_command_proc_fops = { | ||
6560 | .owner = THIS_MODULE, | ||
6561 | .open = dac960_user_command_proc_open, | ||
6562 | .read = seq_read, | ||
6563 | .llseek = seq_lseek, | ||
6564 | .release = single_release, | ||
6565 | .write = dac960_user_command_proc_write, | ||
6566 | }; | ||
6575 | 6567 | ||
6576 | /* | 6568 | /* |
6577 | DAC960_CreateProcEntries creates the /proc/rd/... entries for the | 6569 | DAC960_CreateProcEntries creates the /proc/rd/... entries for the |
@@ -6586,23 +6578,17 @@ static void DAC960_CreateProcEntries(DAC960_Controller_T *Controller) | |||
6586 | 6578 | ||
6587 | if (DAC960_ProcDirectoryEntry == NULL) { | 6579 | if (DAC960_ProcDirectoryEntry == NULL) { |
6588 | DAC960_ProcDirectoryEntry = proc_mkdir("rd", NULL); | 6580 | DAC960_ProcDirectoryEntry = proc_mkdir("rd", NULL); |
6589 | StatusProcEntry = create_proc_read_entry("status", 0, | 6581 | StatusProcEntry = proc_create("status", 0, |
6590 | DAC960_ProcDirectoryEntry, | 6582 | DAC960_ProcDirectoryEntry, |
6591 | DAC960_ProcReadStatus, NULL); | 6583 | &dac960_proc_fops); |
6592 | } | 6584 | } |
6593 | 6585 | ||
6594 | sprintf(Controller->ControllerName, "c%d", Controller->ControllerNumber); | 6586 | sprintf(Controller->ControllerName, "c%d", Controller->ControllerNumber); |
6595 | ControllerProcEntry = proc_mkdir(Controller->ControllerName, | 6587 | ControllerProcEntry = proc_mkdir(Controller->ControllerName, |
6596 | DAC960_ProcDirectoryEntry); | 6588 | DAC960_ProcDirectoryEntry); |
6597 | create_proc_read_entry("initial_status", 0, ControllerProcEntry, | 6589 | proc_create_data("initial_status", 0, ControllerProcEntry, &dac960_initial_status_proc_fops, Controller); |
6598 | DAC960_ProcReadInitialStatus, Controller); | 6590 | proc_create_data("current_status", 0, ControllerProcEntry, &dac960_current_status_proc_fops, Controller); |
6599 | create_proc_read_entry("current_status", 0, ControllerProcEntry, | 6591 | UserCommandProcEntry = proc_create_data("user_command", S_IWUSR | S_IRUSR, ControllerProcEntry, &dac960_user_command_proc_fops, Controller); |
6600 | DAC960_ProcReadCurrentStatus, Controller); | ||
6601 | UserCommandProcEntry = | ||
6602 | create_proc_read_entry("user_command", S_IWUSR | S_IRUSR, | ||
6603 | ControllerProcEntry, DAC960_ProcReadUserCommand, | ||
6604 | Controller); | ||
6605 | UserCommandProcEntry->write_proc = DAC960_ProcWriteUserCommand; | ||
6606 | Controller->ControllerProcEntry = ControllerProcEntry; | 6592 | Controller->ControllerProcEntry = ControllerProcEntry; |
6607 | } | 6593 | } |
6608 | 6594 | ||
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 1ece0b47b581..fb5be2d95d52 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -36,9 +36,11 @@ | |||
36 | #include <linux/proc_fs.h> | 36 | #include <linux/proc_fs.h> |
37 | #include <linux/seq_file.h> | 37 | #include <linux/seq_file.h> |
38 | #include <linux/init.h> | 38 | #include <linux/init.h> |
39 | #include <linux/jiffies.h> | ||
39 | #include <linux/hdreg.h> | 40 | #include <linux/hdreg.h> |
40 | #include <linux/spinlock.h> | 41 | #include <linux/spinlock.h> |
41 | #include <linux/compat.h> | 42 | #include <linux/compat.h> |
43 | #include <linux/mutex.h> | ||
42 | #include <asm/uaccess.h> | 44 | #include <asm/uaccess.h> |
43 | #include <asm/io.h> | 45 | #include <asm/io.h> |
44 | 46 | ||
@@ -155,6 +157,10 @@ static struct board_type products[] = { | |||
155 | 157 | ||
156 | static ctlr_info_t *hba[MAX_CTLR]; | 158 | static ctlr_info_t *hba[MAX_CTLR]; |
157 | 159 | ||
160 | static struct task_struct *cciss_scan_thread; | ||
161 | static DEFINE_MUTEX(scan_mutex); | ||
162 | static LIST_HEAD(scan_q); | ||
163 | |||
158 | static void do_cciss_request(struct request_queue *q); | 164 | static void do_cciss_request(struct request_queue *q); |
159 | static irqreturn_t do_cciss_intr(int irq, void *dev_id); | 165 | static irqreturn_t do_cciss_intr(int irq, void *dev_id); |
160 | static int cciss_open(struct block_device *bdev, fmode_t mode); | 166 | static int cciss_open(struct block_device *bdev, fmode_t mode); |
@@ -164,9 +170,9 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode, | |||
164 | static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo); | 170 | static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo); |
165 | 171 | ||
166 | static int cciss_revalidate(struct gendisk *disk); | 172 | static int cciss_revalidate(struct gendisk *disk); |
167 | static int rebuild_lun_table(ctlr_info_t *h, int first_time); | 173 | static int rebuild_lun_table(ctlr_info_t *h, int first_time, int via_ioctl); |
168 | static int deregister_disk(ctlr_info_t *h, int drv_index, | 174 | static int deregister_disk(ctlr_info_t *h, int drv_index, |
169 | int clear_all); | 175 | int clear_all, int via_ioctl); |
170 | 176 | ||
171 | static void cciss_read_capacity(int ctlr, int logvol, int withirq, | 177 | static void cciss_read_capacity(int ctlr, int logvol, int withirq, |
172 | sector_t *total_size, unsigned int *block_size); | 178 | sector_t *total_size, unsigned int *block_size); |
@@ -189,8 +195,13 @@ static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c, | |||
189 | static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c); | 195 | static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c); |
190 | 196 | ||
191 | static void fail_all_cmds(unsigned long ctlr); | 197 | static void fail_all_cmds(unsigned long ctlr); |
198 | static int add_to_scan_list(struct ctlr_info *h); | ||
192 | static int scan_thread(void *data); | 199 | static int scan_thread(void *data); |
193 | static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c); | 200 | static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c); |
201 | static void cciss_hba_release(struct device *dev); | ||
202 | static void cciss_device_release(struct device *dev); | ||
203 | static void cciss_free_gendisk(ctlr_info_t *h, int drv_index); | ||
204 | static void cciss_free_drive_info(ctlr_info_t *h, int drv_index); | ||
194 | 205 | ||
195 | #ifdef CONFIG_PROC_FS | 206 | #ifdef CONFIG_PROC_FS |
196 | static void cciss_procinit(int i); | 207 | static void cciss_procinit(int i); |
@@ -245,7 +256,10 @@ static inline void removeQ(CommandList_struct *c) | |||
245 | 256 | ||
246 | #include "cciss_scsi.c" /* For SCSI tape support */ | 257 | #include "cciss_scsi.c" /* For SCSI tape support */ |
247 | 258 | ||
248 | #define RAID_UNKNOWN 6 | 259 | static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", |
260 | "UNKNOWN" | ||
261 | }; | ||
262 | #define RAID_UNKNOWN (sizeof(raid_label) / sizeof(raid_label[0])-1) | ||
249 | 263 | ||
250 | #ifdef CONFIG_PROC_FS | 264 | #ifdef CONFIG_PROC_FS |
251 | 265 | ||
@@ -255,9 +269,6 @@ static inline void removeQ(CommandList_struct *c) | |||
255 | #define ENG_GIG 1000000000 | 269 | #define ENG_GIG 1000000000 |
256 | #define ENG_GIG_FACTOR (ENG_GIG/512) | 270 | #define ENG_GIG_FACTOR (ENG_GIG/512) |
257 | #define ENGAGE_SCSI "engage scsi" | 271 | #define ENGAGE_SCSI "engage scsi" |
258 | static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", | ||
259 | "UNKNOWN" | ||
260 | }; | ||
261 | 272 | ||
262 | static struct proc_dir_entry *proc_cciss; | 273 | static struct proc_dir_entry *proc_cciss; |
263 | 274 | ||
@@ -318,7 +329,7 @@ static int cciss_seq_show(struct seq_file *seq, void *v) | |||
318 | ctlr_info_t *h = seq->private; | 329 | ctlr_info_t *h = seq->private; |
319 | unsigned ctlr = h->ctlr; | 330 | unsigned ctlr = h->ctlr; |
320 | loff_t *pos = v; | 331 | loff_t *pos = v; |
321 | drive_info_struct *drv = &h->drv[*pos]; | 332 | drive_info_struct *drv = h->drv[*pos]; |
322 | 333 | ||
323 | if (*pos > h->highest_lun) | 334 | if (*pos > h->highest_lun) |
324 | return 0; | 335 | return 0; |
@@ -331,7 +342,7 @@ static int cciss_seq_show(struct seq_file *seq, void *v) | |||
331 | vol_sz_frac *= 100; | 342 | vol_sz_frac *= 100; |
332 | sector_div(vol_sz_frac, ENG_GIG_FACTOR); | 343 | sector_div(vol_sz_frac, ENG_GIG_FACTOR); |
333 | 344 | ||
334 | if (drv->raid_level > 5) | 345 | if (drv->raid_level < 0 || drv->raid_level > RAID_UNKNOWN) |
335 | drv->raid_level = RAID_UNKNOWN; | 346 | drv->raid_level = RAID_UNKNOWN; |
336 | seq_printf(seq, "cciss/c%dd%d:" | 347 | seq_printf(seq, "cciss/c%dd%d:" |
337 | "\t%4u.%02uGB\tRAID %s\n", | 348 | "\t%4u.%02uGB\tRAID %s\n", |
@@ -454,9 +465,19 @@ static void __devinit cciss_procinit(int i) | |||
454 | #define to_hba(n) container_of(n, struct ctlr_info, dev) | 465 | #define to_hba(n) container_of(n, struct ctlr_info, dev) |
455 | #define to_drv(n) container_of(n, drive_info_struct, dev) | 466 | #define to_drv(n) container_of(n, drive_info_struct, dev) |
456 | 467 | ||
457 | static struct device_type cciss_host_type = { | 468 | static ssize_t host_store_rescan(struct device *dev, |
458 | .name = "cciss_host", | 469 | struct device_attribute *attr, |
459 | }; | 470 | const char *buf, size_t count) |
471 | { | ||
472 | struct ctlr_info *h = to_hba(dev); | ||
473 | |||
474 | add_to_scan_list(h); | ||
475 | wake_up_process(cciss_scan_thread); | ||
476 | wait_for_completion_interruptible(&h->scan_wait); | ||
477 | |||
478 | return count; | ||
479 | } | ||
480 | DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); | ||
460 | 481 | ||
461 | static ssize_t dev_show_unique_id(struct device *dev, | 482 | static ssize_t dev_show_unique_id(struct device *dev, |
462 | struct device_attribute *attr, | 483 | struct device_attribute *attr, |
@@ -560,11 +581,101 @@ static ssize_t dev_show_rev(struct device *dev, | |||
560 | } | 581 | } |
561 | DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL); | 582 | DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL); |
562 | 583 | ||
584 | static ssize_t cciss_show_lunid(struct device *dev, | ||
585 | struct device_attribute *attr, char *buf) | ||
586 | { | ||
587 | drive_info_struct *drv = to_drv(dev); | ||
588 | struct ctlr_info *h = to_hba(drv->dev.parent); | ||
589 | unsigned long flags; | ||
590 | unsigned char lunid[8]; | ||
591 | |||
592 | spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); | ||
593 | if (h->busy_configuring) { | ||
594 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | ||
595 | return -EBUSY; | ||
596 | } | ||
597 | if (!drv->heads) { | ||
598 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | ||
599 | return -ENOTTY; | ||
600 | } | ||
601 | memcpy(lunid, drv->LunID, sizeof(lunid)); | ||
602 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | ||
603 | return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", | ||
604 | lunid[0], lunid[1], lunid[2], lunid[3], | ||
605 | lunid[4], lunid[5], lunid[6], lunid[7]); | ||
606 | } | ||
607 | DEVICE_ATTR(lunid, S_IRUGO, cciss_show_lunid, NULL); | ||
608 | |||
609 | static ssize_t cciss_show_raid_level(struct device *dev, | ||
610 | struct device_attribute *attr, char *buf) | ||
611 | { | ||
612 | drive_info_struct *drv = to_drv(dev); | ||
613 | struct ctlr_info *h = to_hba(drv->dev.parent); | ||
614 | int raid; | ||
615 | unsigned long flags; | ||
616 | |||
617 | spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); | ||
618 | if (h->busy_configuring) { | ||
619 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | ||
620 | return -EBUSY; | ||
621 | } | ||
622 | raid = drv->raid_level; | ||
623 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | ||
624 | if (raid < 0 || raid > RAID_UNKNOWN) | ||
625 | raid = RAID_UNKNOWN; | ||
626 | |||
627 | return snprintf(buf, strlen(raid_label[raid]) + 7, "RAID %s\n", | ||
628 | raid_label[raid]); | ||
629 | } | ||
630 | DEVICE_ATTR(raid_level, S_IRUGO, cciss_show_raid_level, NULL); | ||
631 | |||
632 | static ssize_t cciss_show_usage_count(struct device *dev, | ||
633 | struct device_attribute *attr, char *buf) | ||
634 | { | ||
635 | drive_info_struct *drv = to_drv(dev); | ||
636 | struct ctlr_info *h = to_hba(drv->dev.parent); | ||
637 | unsigned long flags; | ||
638 | int count; | ||
639 | |||
640 | spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); | ||
641 | if (h->busy_configuring) { | ||
642 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | ||
643 | return -EBUSY; | ||
644 | } | ||
645 | count = drv->usage_count; | ||
646 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | ||
647 | return snprintf(buf, 20, "%d\n", count); | ||
648 | } | ||
649 | DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL); | ||
650 | |||
651 | static struct attribute *cciss_host_attrs[] = { | ||
652 | &dev_attr_rescan.attr, | ||
653 | NULL | ||
654 | }; | ||
655 | |||
656 | static struct attribute_group cciss_host_attr_group = { | ||
657 | .attrs = cciss_host_attrs, | ||
658 | }; | ||
659 | |||
660 | static const struct attribute_group *cciss_host_attr_groups[] = { | ||
661 | &cciss_host_attr_group, | ||
662 | NULL | ||
663 | }; | ||
664 | |||
665 | static struct device_type cciss_host_type = { | ||
666 | .name = "cciss_host", | ||
667 | .groups = cciss_host_attr_groups, | ||
668 | .release = cciss_hba_release, | ||
669 | }; | ||
670 | |||
563 | static struct attribute *cciss_dev_attrs[] = { | 671 | static struct attribute *cciss_dev_attrs[] = { |
564 | &dev_attr_unique_id.attr, | 672 | &dev_attr_unique_id.attr, |
565 | &dev_attr_model.attr, | 673 | &dev_attr_model.attr, |
566 | &dev_attr_vendor.attr, | 674 | &dev_attr_vendor.attr, |
567 | &dev_attr_rev.attr, | 675 | &dev_attr_rev.attr, |
676 | &dev_attr_lunid.attr, | ||
677 | &dev_attr_raid_level.attr, | ||
678 | &dev_attr_usage_count.attr, | ||
568 | NULL | 679 | NULL |
569 | }; | 680 | }; |
570 | 681 | ||
@@ -580,12 +691,24 @@ static const struct attribute_group *cciss_dev_attr_groups[] = { | |||
580 | static struct device_type cciss_dev_type = { | 691 | static struct device_type cciss_dev_type = { |
581 | .name = "cciss_device", | 692 | .name = "cciss_device", |
582 | .groups = cciss_dev_attr_groups, | 693 | .groups = cciss_dev_attr_groups, |
694 | .release = cciss_device_release, | ||
583 | }; | 695 | }; |
584 | 696 | ||
585 | static struct bus_type cciss_bus_type = { | 697 | static struct bus_type cciss_bus_type = { |
586 | .name = "cciss", | 698 | .name = "cciss", |
587 | }; | 699 | }; |
588 | 700 | ||
701 | /* | ||
702 | * cciss_hba_release is called when the reference count | ||
703 | * of h->dev goes to zero. | ||
704 | */ | ||
705 | static void cciss_hba_release(struct device *dev) | ||
706 | { | ||
707 | /* | ||
708 | * nothing to do, but need this to avoid a warning | ||
709 | * about not having a release handler from lib/kref.c. | ||
710 | */ | ||
711 | } | ||
589 | 712 | ||
590 | /* | 713 | /* |
591 | * Initialize sysfs entry for each controller. This sets up and registers | 714 | * Initialize sysfs entry for each controller. This sets up and registers |
@@ -609,6 +732,16 @@ static int cciss_create_hba_sysfs_entry(struct ctlr_info *h) | |||
609 | static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h) | 732 | static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h) |
610 | { | 733 | { |
611 | device_del(&h->dev); | 734 | device_del(&h->dev); |
735 | put_device(&h->dev); /* final put. */ | ||
736 | } | ||
737 | |||
738 | /* cciss_device_release is called when the reference count | ||
739 | * of h->drv[x]dev goes to zero. | ||
740 | */ | ||
741 | static void cciss_device_release(struct device *dev) | ||
742 | { | ||
743 | drive_info_struct *drv = to_drv(dev); | ||
744 | kfree(drv); | ||
612 | } | 745 | } |
613 | 746 | ||
614 | /* | 747 | /* |
@@ -617,24 +750,39 @@ static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h) | |||
617 | * /sys/bus/pci/devices/<dev/ccis#/. We also create a link from | 750 | * /sys/bus/pci/devices/<dev/ccis#/. We also create a link from |
618 | * /sys/block/cciss!c#d# to this entry. | 751 | * /sys/block/cciss!c#d# to this entry. |
619 | */ | 752 | */ |
620 | static int cciss_create_ld_sysfs_entry(struct ctlr_info *h, | 753 | static long cciss_create_ld_sysfs_entry(struct ctlr_info *h, |
621 | drive_info_struct *drv, | ||
622 | int drv_index) | 754 | int drv_index) |
623 | { | 755 | { |
624 | device_initialize(&drv->dev); | 756 | struct device *dev; |
625 | drv->dev.type = &cciss_dev_type; | 757 | |
626 | drv->dev.bus = &cciss_bus_type; | 758 | if (h->drv[drv_index]->device_initialized) |
627 | dev_set_name(&drv->dev, "c%dd%d", h->ctlr, drv_index); | 759 | return 0; |
628 | drv->dev.parent = &h->dev; | 760 | |
629 | return device_add(&drv->dev); | 761 | dev = &h->drv[drv_index]->dev; |
762 | device_initialize(dev); | ||
763 | dev->type = &cciss_dev_type; | ||
764 | dev->bus = &cciss_bus_type; | ||
765 | dev_set_name(dev, "c%dd%d", h->ctlr, drv_index); | ||
766 | dev->parent = &h->dev; | ||
767 | h->drv[drv_index]->device_initialized = 1; | ||
768 | return device_add(dev); | ||
630 | } | 769 | } |
631 | 770 | ||
632 | /* | 771 | /* |
633 | * Remove sysfs entries for a logical drive. | 772 | * Remove sysfs entries for a logical drive. |
634 | */ | 773 | */ |
635 | static void cciss_destroy_ld_sysfs_entry(drive_info_struct *drv) | 774 | static void cciss_destroy_ld_sysfs_entry(struct ctlr_info *h, int drv_index, |
775 | int ctlr_exiting) | ||
636 | { | 776 | { |
637 | device_del(&drv->dev); | 777 | struct device *dev = &h->drv[drv_index]->dev; |
778 | |||
779 | /* special case for c*d0, we only destroy it on controller exit */ | ||
780 | if (drv_index == 0 && !ctlr_exiting) | ||
781 | return; | ||
782 | |||
783 | device_del(dev); | ||
784 | put_device(dev); /* the "final" put. */ | ||
785 | h->drv[drv_index] = NULL; | ||
638 | } | 786 | } |
639 | 787 | ||
640 | /* | 788 | /* |
@@ -751,7 +899,7 @@ static int cciss_open(struct block_device *bdev, fmode_t mode) | |||
751 | printk(KERN_DEBUG "cciss_open %s\n", bdev->bd_disk->disk_name); | 899 | printk(KERN_DEBUG "cciss_open %s\n", bdev->bd_disk->disk_name); |
752 | #endif /* CCISS_DEBUG */ | 900 | #endif /* CCISS_DEBUG */ |
753 | 901 | ||
754 | if (host->busy_initializing || drv->busy_configuring) | 902 | if (drv->busy_configuring) |
755 | return -EBUSY; | 903 | return -EBUSY; |
756 | /* | 904 | /* |
757 | * Root is allowed to open raw volume zero even if it's not configured | 905 | * Root is allowed to open raw volume zero even if it's not configured |
@@ -767,7 +915,8 @@ static int cciss_open(struct block_device *bdev, fmode_t mode) | |||
767 | if (MINOR(bdev->bd_dev) & 0x0f) { | 915 | if (MINOR(bdev->bd_dev) & 0x0f) { |
768 | return -ENXIO; | 916 | return -ENXIO; |
769 | /* if it is, make sure we have a LUN ID */ | 917 | /* if it is, make sure we have a LUN ID */ |
770 | } else if (drv->LunID == 0) { | 918 | } else if (memcmp(drv->LunID, CTLR_LUNID, |
919 | sizeof(drv->LunID))) { | ||
771 | return -ENXIO; | 920 | return -ENXIO; |
772 | } | 921 | } |
773 | } | 922 | } |
@@ -1132,12 +1281,13 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode, | |||
1132 | case CCISS_DEREGDISK: | 1281 | case CCISS_DEREGDISK: |
1133 | case CCISS_REGNEWD: | 1282 | case CCISS_REGNEWD: |
1134 | case CCISS_REVALIDVOLS: | 1283 | case CCISS_REVALIDVOLS: |
1135 | return rebuild_lun_table(host, 0); | 1284 | return rebuild_lun_table(host, 0, 1); |
1136 | 1285 | ||
1137 | case CCISS_GETLUNINFO:{ | 1286 | case CCISS_GETLUNINFO:{ |
1138 | LogvolInfo_struct luninfo; | 1287 | LogvolInfo_struct luninfo; |
1139 | 1288 | ||
1140 | luninfo.LunID = drv->LunID; | 1289 | memcpy(&luninfo.LunID, drv->LunID, |
1290 | sizeof(luninfo.LunID)); | ||
1141 | luninfo.num_opens = drv->usage_count; | 1291 | luninfo.num_opens = drv->usage_count; |
1142 | luninfo.num_parts = 0; | 1292 | luninfo.num_parts = 0; |
1143 | if (copy_to_user(argp, &luninfo, | 1293 | if (copy_to_user(argp, &luninfo, |
@@ -1475,7 +1625,10 @@ static void cciss_check_queues(ctlr_info_t *h) | |||
1475 | /* make sure the disk has been added and the drive is real | 1625 | /* make sure the disk has been added and the drive is real |
1476 | * because this can be called from the middle of init_one. | 1626 | * because this can be called from the middle of init_one. |
1477 | */ | 1627 | */ |
1478 | if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads)) | 1628 | if (!h->drv[curr_queue]) |
1629 | continue; | ||
1630 | if (!(h->drv[curr_queue]->queue) || | ||
1631 | !(h->drv[curr_queue]->heads)) | ||
1479 | continue; | 1632 | continue; |
1480 | blk_start_queue(h->gendisk[curr_queue]->queue); | 1633 | blk_start_queue(h->gendisk[curr_queue]->queue); |
1481 | 1634 | ||
@@ -1532,13 +1685,11 @@ static void cciss_softirq_done(struct request *rq) | |||
1532 | spin_unlock_irqrestore(&h->lock, flags); | 1685 | spin_unlock_irqrestore(&h->lock, flags); |
1533 | } | 1686 | } |
1534 | 1687 | ||
1535 | static void log_unit_to_scsi3addr(ctlr_info_t *h, unsigned char scsi3addr[], | 1688 | static inline void log_unit_to_scsi3addr(ctlr_info_t *h, |
1536 | uint32_t log_unit) | 1689 | unsigned char scsi3addr[], uint32_t log_unit) |
1537 | { | 1690 | { |
1538 | log_unit = h->drv[log_unit].LunID & 0x03fff; | 1691 | memcpy(scsi3addr, h->drv[log_unit]->LunID, |
1539 | memset(&scsi3addr[4], 0, 4); | 1692 | sizeof(h->drv[log_unit]->LunID)); |
1540 | memcpy(&scsi3addr[0], &log_unit, 4); | ||
1541 | scsi3addr[3] |= 0x40; | ||
1542 | } | 1693 | } |
1543 | 1694 | ||
1544 | /* This function gets the SCSI vendor, model, and revision of a logical drive | 1695 | /* This function gets the SCSI vendor, model, and revision of a logical drive |
@@ -1615,16 +1766,23 @@ static void cciss_get_serial_no(int ctlr, int logvol, int withirq, | |||
1615 | return; | 1766 | return; |
1616 | } | 1767 | } |
1617 | 1768 | ||
1618 | static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, | 1769 | /* |
1770 | * cciss_add_disk sets up the block device queue for a logical drive | ||
1771 | */ | ||
1772 | static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, | ||
1619 | int drv_index) | 1773 | int drv_index) |
1620 | { | 1774 | { |
1621 | disk->queue = blk_init_queue(do_cciss_request, &h->lock); | 1775 | disk->queue = blk_init_queue(do_cciss_request, &h->lock); |
1776 | if (!disk->queue) | ||
1777 | goto init_queue_failure; | ||
1622 | sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index); | 1778 | sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index); |
1623 | disk->major = h->major; | 1779 | disk->major = h->major; |
1624 | disk->first_minor = drv_index << NWD_SHIFT; | 1780 | disk->first_minor = drv_index << NWD_SHIFT; |
1625 | disk->fops = &cciss_fops; | 1781 | disk->fops = &cciss_fops; |
1626 | disk->private_data = &h->drv[drv_index]; | 1782 | if (cciss_create_ld_sysfs_entry(h, drv_index)) |
1627 | disk->driverfs_dev = &h->drv[drv_index].dev; | 1783 | goto cleanup_queue; |
1784 | disk->private_data = h->drv[drv_index]; | ||
1785 | disk->driverfs_dev = &h->drv[drv_index]->dev; | ||
1628 | 1786 | ||
1629 | /* Set up queue information */ | 1787 | /* Set up queue information */ |
1630 | blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask); | 1788 | blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask); |
@@ -1642,14 +1800,21 @@ static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, | |||
1642 | disk->queue->queuedata = h; | 1800 | disk->queue->queuedata = h; |
1643 | 1801 | ||
1644 | blk_queue_logical_block_size(disk->queue, | 1802 | blk_queue_logical_block_size(disk->queue, |
1645 | h->drv[drv_index].block_size); | 1803 | h->drv[drv_index]->block_size); |
1646 | 1804 | ||
1647 | /* Make sure all queue data is written out before */ | 1805 | /* Make sure all queue data is written out before */ |
1648 | /* setting h->drv[drv_index].queue, as setting this */ | 1806 | /* setting h->drv[drv_index]->queue, as setting this */ |
1649 | /* allows the interrupt handler to start the queue */ | 1807 | /* allows the interrupt handler to start the queue */ |
1650 | wmb(); | 1808 | wmb(); |
1651 | h->drv[drv_index].queue = disk->queue; | 1809 | h->drv[drv_index]->queue = disk->queue; |
1652 | add_disk(disk); | 1810 | add_disk(disk); |
1811 | return 0; | ||
1812 | |||
1813 | cleanup_queue: | ||
1814 | blk_cleanup_queue(disk->queue); | ||
1815 | disk->queue = NULL; | ||
1816 | init_queue_failure: | ||
1817 | return -1; | ||
1653 | } | 1818 | } |
1654 | 1819 | ||
1655 | /* This function will check the usage_count of the drive to be updated/added. | 1820 | /* This function will check the usage_count of the drive to be updated/added. |
@@ -1662,7 +1827,8 @@ static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, | |||
1662 | * is also the controller node. Any changes to disk 0 will show up on | 1827 | * is also the controller node. Any changes to disk 0 will show up on |
1663 | * the next reboot. | 1828 | * the next reboot. |
1664 | */ | 1829 | */ |
1665 | static void cciss_update_drive_info(int ctlr, int drv_index, int first_time) | 1830 | static void cciss_update_drive_info(int ctlr, int drv_index, int first_time, |
1831 | int via_ioctl) | ||
1666 | { | 1832 | { |
1667 | ctlr_info_t *h = hba[ctlr]; | 1833 | ctlr_info_t *h = hba[ctlr]; |
1668 | struct gendisk *disk; | 1834 | struct gendisk *disk; |
@@ -1672,21 +1838,13 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time) | |||
1672 | unsigned long flags = 0; | 1838 | unsigned long flags = 0; |
1673 | int ret = 0; | 1839 | int ret = 0; |
1674 | drive_info_struct *drvinfo; | 1840 | drive_info_struct *drvinfo; |
1675 | int was_only_controller_node; | ||
1676 | 1841 | ||
1677 | /* Get information about the disk and modify the driver structure */ | 1842 | /* Get information about the disk and modify the driver structure */ |
1678 | inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL); | 1843 | inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL); |
1679 | drvinfo = kmalloc(sizeof(*drvinfo), GFP_KERNEL); | 1844 | drvinfo = kzalloc(sizeof(*drvinfo), GFP_KERNEL); |
1680 | if (inq_buff == NULL || drvinfo == NULL) | 1845 | if (inq_buff == NULL || drvinfo == NULL) |
1681 | goto mem_msg; | 1846 | goto mem_msg; |
1682 | 1847 | ||
1683 | /* See if we're trying to update the "controller node" | ||
1684 | * this will happen the when the first logical drive gets | ||
1685 | * created by ACU. | ||
1686 | */ | ||
1687 | was_only_controller_node = (drv_index == 0 && | ||
1688 | h->drv[0].raid_level == -1); | ||
1689 | |||
1690 | /* testing to see if 16-byte CDBs are already being used */ | 1848 | /* testing to see if 16-byte CDBs are already being used */ |
1691 | if (h->cciss_read == CCISS_READ_16) { | 1849 | if (h->cciss_read == CCISS_READ_16) { |
1692 | cciss_read_capacity_16(h->ctlr, drv_index, 1, | 1850 | cciss_read_capacity_16(h->ctlr, drv_index, 1, |
@@ -1719,16 +1877,19 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time) | |||
1719 | drvinfo->model, drvinfo->rev); | 1877 | drvinfo->model, drvinfo->rev); |
1720 | cciss_get_serial_no(ctlr, drv_index, 1, drvinfo->serial_no, | 1878 | cciss_get_serial_no(ctlr, drv_index, 1, drvinfo->serial_no, |
1721 | sizeof(drvinfo->serial_no)); | 1879 | sizeof(drvinfo->serial_no)); |
1880 | /* Save the lunid in case we deregister the disk, below. */ | ||
1881 | memcpy(drvinfo->LunID, h->drv[drv_index]->LunID, | ||
1882 | sizeof(drvinfo->LunID)); | ||
1722 | 1883 | ||
1723 | /* Is it the same disk we already know, and nothing's changed? */ | 1884 | /* Is it the same disk we already know, and nothing's changed? */ |
1724 | if (h->drv[drv_index].raid_level != -1 && | 1885 | if (h->drv[drv_index]->raid_level != -1 && |
1725 | ((memcmp(drvinfo->serial_no, | 1886 | ((memcmp(drvinfo->serial_no, |
1726 | h->drv[drv_index].serial_no, 16) == 0) && | 1887 | h->drv[drv_index]->serial_no, 16) == 0) && |
1727 | drvinfo->block_size == h->drv[drv_index].block_size && | 1888 | drvinfo->block_size == h->drv[drv_index]->block_size && |
1728 | drvinfo->nr_blocks == h->drv[drv_index].nr_blocks && | 1889 | drvinfo->nr_blocks == h->drv[drv_index]->nr_blocks && |
1729 | drvinfo->heads == h->drv[drv_index].heads && | 1890 | drvinfo->heads == h->drv[drv_index]->heads && |
1730 | drvinfo->sectors == h->drv[drv_index].sectors && | 1891 | drvinfo->sectors == h->drv[drv_index]->sectors && |
1731 | drvinfo->cylinders == h->drv[drv_index].cylinders)) | 1892 | drvinfo->cylinders == h->drv[drv_index]->cylinders)) |
1732 | /* The disk is unchanged, nothing to update */ | 1893 | /* The disk is unchanged, nothing to update */ |
1733 | goto freeret; | 1894 | goto freeret; |
1734 | 1895 | ||
@@ -1738,18 +1899,17 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time) | |||
1738 | * If the disk already exists then deregister it before proceeding | 1899 | * If the disk already exists then deregister it before proceeding |
1739 | * (unless it's the first disk (for the controller node). | 1900 | * (unless it's the first disk (for the controller node). |
1740 | */ | 1901 | */ |
1741 | if (h->drv[drv_index].raid_level != -1 && drv_index != 0) { | 1902 | if (h->drv[drv_index]->raid_level != -1 && drv_index != 0) { |
1742 | printk(KERN_WARNING "disk %d has changed.\n", drv_index); | 1903 | printk(KERN_WARNING "disk %d has changed.\n", drv_index); |
1743 | spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); | 1904 | spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); |
1744 | h->drv[drv_index].busy_configuring = 1; | 1905 | h->drv[drv_index]->busy_configuring = 1; |
1745 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | 1906 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); |
1746 | 1907 | ||
1747 | /* deregister_disk sets h->drv[drv_index].queue = NULL | 1908 | /* deregister_disk sets h->drv[drv_index]->queue = NULL |
1748 | * which keeps the interrupt handler from starting | 1909 | * which keeps the interrupt handler from starting |
1749 | * the queue. | 1910 | * the queue. |
1750 | */ | 1911 | */ |
1751 | ret = deregister_disk(h, drv_index, 0); | 1912 | ret = deregister_disk(h, drv_index, 0, via_ioctl); |
1752 | h->drv[drv_index].busy_configuring = 0; | ||
1753 | } | 1913 | } |
1754 | 1914 | ||
1755 | /* If the disk is in use return */ | 1915 | /* If the disk is in use return */ |
@@ -1757,22 +1917,31 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time) | |||
1757 | goto freeret; | 1917 | goto freeret; |
1758 | 1918 | ||
1759 | /* Save the new information from cciss_geometry_inquiry | 1919 | /* Save the new information from cciss_geometry_inquiry |
1760 | * and serial number inquiry. | 1920 | * and serial number inquiry. If the disk was deregistered |
1921 | * above, then h->drv[drv_index] will be NULL. | ||
1761 | */ | 1922 | */ |
1762 | h->drv[drv_index].block_size = drvinfo->block_size; | 1923 | if (h->drv[drv_index] == NULL) { |
1763 | h->drv[drv_index].nr_blocks = drvinfo->nr_blocks; | 1924 | drvinfo->device_initialized = 0; |
1764 | h->drv[drv_index].heads = drvinfo->heads; | 1925 | h->drv[drv_index] = drvinfo; |
1765 | h->drv[drv_index].sectors = drvinfo->sectors; | 1926 | drvinfo = NULL; /* so it won't be freed below. */ |
1766 | h->drv[drv_index].cylinders = drvinfo->cylinders; | 1927 | } else { |
1767 | h->drv[drv_index].raid_level = drvinfo->raid_level; | 1928 | /* special case for cxd0 */ |
1768 | memcpy(h->drv[drv_index].serial_no, drvinfo->serial_no, 16); | 1929 | h->drv[drv_index]->block_size = drvinfo->block_size; |
1769 | memcpy(h->drv[drv_index].vendor, drvinfo->vendor, VENDOR_LEN + 1); | 1930 | h->drv[drv_index]->nr_blocks = drvinfo->nr_blocks; |
1770 | memcpy(h->drv[drv_index].model, drvinfo->model, MODEL_LEN + 1); | 1931 | h->drv[drv_index]->heads = drvinfo->heads; |
1771 | memcpy(h->drv[drv_index].rev, drvinfo->rev, REV_LEN + 1); | 1932 | h->drv[drv_index]->sectors = drvinfo->sectors; |
1933 | h->drv[drv_index]->cylinders = drvinfo->cylinders; | ||
1934 | h->drv[drv_index]->raid_level = drvinfo->raid_level; | ||
1935 | memcpy(h->drv[drv_index]->serial_no, drvinfo->serial_no, 16); | ||
1936 | memcpy(h->drv[drv_index]->vendor, drvinfo->vendor, | ||
1937 | VENDOR_LEN + 1); | ||
1938 | memcpy(h->drv[drv_index]->model, drvinfo->model, MODEL_LEN + 1); | ||
1939 | memcpy(h->drv[drv_index]->rev, drvinfo->rev, REV_LEN + 1); | ||
1940 | } | ||
1772 | 1941 | ||
1773 | ++h->num_luns; | 1942 | ++h->num_luns; |
1774 | disk = h->gendisk[drv_index]; | 1943 | disk = h->gendisk[drv_index]; |
1775 | set_capacity(disk, h->drv[drv_index].nr_blocks); | 1944 | set_capacity(disk, h->drv[drv_index]->nr_blocks); |
1776 | 1945 | ||
1777 | /* If it's not disk 0 (drv_index != 0) | 1946 | /* If it's not disk 0 (drv_index != 0) |
1778 | * or if it was disk 0, but there was previously | 1947 | * or if it was disk 0, but there was previously |
@@ -1780,8 +1949,15 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time) | |||
1780 | * (raid_leve == -1) then we want to update the | 1949 | * (raid_leve == -1) then we want to update the |
1781 | * logical drive's information. | 1950 | * logical drive's information. |
1782 | */ | 1951 | */ |
1783 | if (drv_index || first_time) | 1952 | if (drv_index || first_time) { |
1784 | cciss_add_disk(h, disk, drv_index); | 1953 | if (cciss_add_disk(h, disk, drv_index) != 0) { |
1954 | cciss_free_gendisk(h, drv_index); | ||
1955 | cciss_free_drive_info(h, drv_index); | ||
1956 | printk(KERN_WARNING "cciss:%d could not update " | ||
1957 | "disk %d\n", h->ctlr, drv_index); | ||
1958 | --h->num_luns; | ||
1959 | } | ||
1960 | } | ||
1785 | 1961 | ||
1786 | freeret: | 1962 | freeret: |
1787 | kfree(inq_buff); | 1963 | kfree(inq_buff); |
@@ -1793,28 +1969,70 @@ mem_msg: | |||
1793 | } | 1969 | } |
1794 | 1970 | ||
1795 | /* This function will find the first index of the controllers drive array | 1971 | /* This function will find the first index of the controllers drive array |
1796 | * that has a -1 for the raid_level and will return that index. This is | 1972 | * that has a null drv pointer and allocate the drive info struct and |
1797 | * where new drives will be added. If the index to be returned is greater | 1973 | * will return that index This is where new drives will be added. |
1798 | * than the highest_lun index for the controller then highest_lun is set | 1974 | * If the index to be returned is greater than the highest_lun index for |
1799 | * to this new index. If there are no available indexes then -1 is returned. | 1975 | * the controller then highest_lun is set * to this new index. |
1800 | * "controller_node" is used to know if this is a real logical drive, or just | 1976 | * If there are no available indexes or if tha allocation fails, then -1 |
1801 | * the controller node, which determines if this counts towards highest_lun. | 1977 | * is returned. * "controller_node" is used to know if this is a real |
1978 | * logical drive, or just the controller node, which determines if this | ||
1979 | * counts towards highest_lun. | ||
1802 | */ | 1980 | */ |
1803 | static int cciss_find_free_drive_index(int ctlr, int controller_node) | 1981 | static int cciss_alloc_drive_info(ctlr_info_t *h, int controller_node) |
1804 | { | 1982 | { |
1805 | int i; | 1983 | int i; |
1984 | drive_info_struct *drv; | ||
1806 | 1985 | ||
1986 | /* Search for an empty slot for our drive info */ | ||
1807 | for (i = 0; i < CISS_MAX_LUN; i++) { | 1987 | for (i = 0; i < CISS_MAX_LUN; i++) { |
1808 | if (hba[ctlr]->drv[i].raid_level == -1) { | 1988 | |
1809 | if (i > hba[ctlr]->highest_lun) | 1989 | /* if not cxd0 case, and it's occupied, skip it. */ |
1810 | if (!controller_node) | 1990 | if (h->drv[i] && i != 0) |
1811 | hba[ctlr]->highest_lun = i; | 1991 | continue; |
1992 | /* | ||
1993 | * If it's cxd0 case, and drv is alloc'ed already, and a | ||
1994 | * disk is configured there, skip it. | ||
1995 | */ | ||
1996 | if (i == 0 && h->drv[i] && h->drv[i]->raid_level != -1) | ||
1997 | continue; | ||
1998 | |||
1999 | /* | ||
2000 | * We've found an empty slot. Update highest_lun | ||
2001 | * provided this isn't just the fake cxd0 controller node. | ||
2002 | */ | ||
2003 | if (i > h->highest_lun && !controller_node) | ||
2004 | h->highest_lun = i; | ||
2005 | |||
2006 | /* If adding a real disk at cxd0, and it's already alloc'ed */ | ||
2007 | if (i == 0 && h->drv[i] != NULL) | ||
1812 | return i; | 2008 | return i; |
1813 | } | 2009 | |
2010 | /* | ||
2011 | * Found an empty slot, not already alloc'ed. Allocate it. | ||
2012 | * Mark it with raid_level == -1, so we know it's new later on. | ||
2013 | */ | ||
2014 | drv = kzalloc(sizeof(*drv), GFP_KERNEL); | ||
2015 | if (!drv) | ||
2016 | return -1; | ||
2017 | drv->raid_level = -1; /* so we know it's new */ | ||
2018 | h->drv[i] = drv; | ||
2019 | return i; | ||
1814 | } | 2020 | } |
1815 | return -1; | 2021 | return -1; |
1816 | } | 2022 | } |
1817 | 2023 | ||
2024 | static void cciss_free_drive_info(ctlr_info_t *h, int drv_index) | ||
2025 | { | ||
2026 | kfree(h->drv[drv_index]); | ||
2027 | h->drv[drv_index] = NULL; | ||
2028 | } | ||
2029 | |||
2030 | static void cciss_free_gendisk(ctlr_info_t *h, int drv_index) | ||
2031 | { | ||
2032 | put_disk(h->gendisk[drv_index]); | ||
2033 | h->gendisk[drv_index] = NULL; | ||
2034 | } | ||
2035 | |||
1818 | /* cciss_add_gendisk finds a free hba[]->drv structure | 2036 | /* cciss_add_gendisk finds a free hba[]->drv structure |
1819 | * and allocates a gendisk if needed, and sets the lunid | 2037 | * and allocates a gendisk if needed, and sets the lunid |
1820 | * in the drvinfo structure. It returns the index into | 2038 | * in the drvinfo structure. It returns the index into |
@@ -1824,13 +2042,15 @@ static int cciss_find_free_drive_index(int ctlr, int controller_node) | |||
1824 | * a means to talk to the controller in case no logical | 2042 | * a means to talk to the controller in case no logical |
1825 | * drives have yet been configured. | 2043 | * drives have yet been configured. |
1826 | */ | 2044 | */ |
1827 | static int cciss_add_gendisk(ctlr_info_t *h, __u32 lunid, int controller_node) | 2045 | static int cciss_add_gendisk(ctlr_info_t *h, unsigned char lunid[], |
2046 | int controller_node) | ||
1828 | { | 2047 | { |
1829 | int drv_index; | 2048 | int drv_index; |
1830 | 2049 | ||
1831 | drv_index = cciss_find_free_drive_index(h->ctlr, controller_node); | 2050 | drv_index = cciss_alloc_drive_info(h, controller_node); |
1832 | if (drv_index == -1) | 2051 | if (drv_index == -1) |
1833 | return -1; | 2052 | return -1; |
2053 | |||
1834 | /*Check if the gendisk needs to be allocated */ | 2054 | /*Check if the gendisk needs to be allocated */ |
1835 | if (!h->gendisk[drv_index]) { | 2055 | if (!h->gendisk[drv_index]) { |
1836 | h->gendisk[drv_index] = | 2056 | h->gendisk[drv_index] = |
@@ -1839,23 +2059,24 @@ static int cciss_add_gendisk(ctlr_info_t *h, __u32 lunid, int controller_node) | |||
1839 | printk(KERN_ERR "cciss%d: could not " | 2059 | printk(KERN_ERR "cciss%d: could not " |
1840 | "allocate a new disk %d\n", | 2060 | "allocate a new disk %d\n", |
1841 | h->ctlr, drv_index); | 2061 | h->ctlr, drv_index); |
1842 | return -1; | 2062 | goto err_free_drive_info; |
1843 | } | 2063 | } |
1844 | } | 2064 | } |
1845 | h->drv[drv_index].LunID = lunid; | 2065 | memcpy(h->drv[drv_index]->LunID, lunid, |
1846 | if (cciss_create_ld_sysfs_entry(h, &h->drv[drv_index], drv_index)) | 2066 | sizeof(h->drv[drv_index]->LunID)); |
2067 | if (cciss_create_ld_sysfs_entry(h, drv_index)) | ||
1847 | goto err_free_disk; | 2068 | goto err_free_disk; |
1848 | |||
1849 | /* Don't need to mark this busy because nobody */ | 2069 | /* Don't need to mark this busy because nobody */ |
1850 | /* else knows about this disk yet to contend */ | 2070 | /* else knows about this disk yet to contend */ |
1851 | /* for access to it. */ | 2071 | /* for access to it. */ |
1852 | h->drv[drv_index].busy_configuring = 0; | 2072 | h->drv[drv_index]->busy_configuring = 0; |
1853 | wmb(); | 2073 | wmb(); |
1854 | return drv_index; | 2074 | return drv_index; |
1855 | 2075 | ||
1856 | err_free_disk: | 2076 | err_free_disk: |
1857 | put_disk(h->gendisk[drv_index]); | 2077 | cciss_free_gendisk(h, drv_index); |
1858 | h->gendisk[drv_index] = NULL; | 2078 | err_free_drive_info: |
2079 | cciss_free_drive_info(h, drv_index); | ||
1859 | return -1; | 2080 | return -1; |
1860 | } | 2081 | } |
1861 | 2082 | ||
@@ -1872,21 +2093,25 @@ static void cciss_add_controller_node(ctlr_info_t *h) | |||
1872 | if (h->gendisk[0] != NULL) /* already did this? Then bail. */ | 2093 | if (h->gendisk[0] != NULL) /* already did this? Then bail. */ |
1873 | return; | 2094 | return; |
1874 | 2095 | ||
1875 | drv_index = cciss_add_gendisk(h, 0, 1); | 2096 | drv_index = cciss_add_gendisk(h, CTLR_LUNID, 1); |
1876 | if (drv_index == -1) { | 2097 | if (drv_index == -1) |
1877 | printk(KERN_WARNING "cciss%d: could not " | 2098 | goto error; |
1878 | "add disk 0.\n", h->ctlr); | 2099 | h->drv[drv_index]->block_size = 512; |
1879 | return; | 2100 | h->drv[drv_index]->nr_blocks = 0; |
1880 | } | 2101 | h->drv[drv_index]->heads = 0; |
1881 | h->drv[drv_index].block_size = 512; | 2102 | h->drv[drv_index]->sectors = 0; |
1882 | h->drv[drv_index].nr_blocks = 0; | 2103 | h->drv[drv_index]->cylinders = 0; |
1883 | h->drv[drv_index].heads = 0; | 2104 | h->drv[drv_index]->raid_level = -1; |
1884 | h->drv[drv_index].sectors = 0; | 2105 | memset(h->drv[drv_index]->serial_no, 0, 16); |
1885 | h->drv[drv_index].cylinders = 0; | ||
1886 | h->drv[drv_index].raid_level = -1; | ||
1887 | memset(h->drv[drv_index].serial_no, 0, 16); | ||
1888 | disk = h->gendisk[drv_index]; | 2106 | disk = h->gendisk[drv_index]; |
1889 | cciss_add_disk(h, disk, drv_index); | 2107 | if (cciss_add_disk(h, disk, drv_index) == 0) |
2108 | return; | ||
2109 | cciss_free_gendisk(h, drv_index); | ||
2110 | cciss_free_drive_info(h, drv_index); | ||
2111 | error: | ||
2112 | printk(KERN_WARNING "cciss%d: could not " | ||
2113 | "add disk 0.\n", h->ctlr); | ||
2114 | return; | ||
1890 | } | 2115 | } |
1891 | 2116 | ||
1892 | /* This function will add and remove logical drives from the Logical | 2117 | /* This function will add and remove logical drives from the Logical |
@@ -1897,7 +2122,8 @@ static void cciss_add_controller_node(ctlr_info_t *h) | |||
1897 | * INPUT | 2122 | * INPUT |
1898 | * h = The controller to perform the operations on | 2123 | * h = The controller to perform the operations on |
1899 | */ | 2124 | */ |
1900 | static int rebuild_lun_table(ctlr_info_t *h, int first_time) | 2125 | static int rebuild_lun_table(ctlr_info_t *h, int first_time, |
2126 | int via_ioctl) | ||
1901 | { | 2127 | { |
1902 | int ctlr = h->ctlr; | 2128 | int ctlr = h->ctlr; |
1903 | int num_luns; | 2129 | int num_luns; |
@@ -1907,7 +2133,7 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time) | |||
1907 | int i; | 2133 | int i; |
1908 | int drv_found; | 2134 | int drv_found; |
1909 | int drv_index = 0; | 2135 | int drv_index = 0; |
1910 | __u32 lunid = 0; | 2136 | unsigned char lunid[8] = CTLR_LUNID; |
1911 | unsigned long flags; | 2137 | unsigned long flags; |
1912 | 2138 | ||
1913 | if (!capable(CAP_SYS_RAWIO)) | 2139 | if (!capable(CAP_SYS_RAWIO)) |
@@ -1960,13 +2186,13 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time) | |||
1960 | drv_found = 0; | 2186 | drv_found = 0; |
1961 | 2187 | ||
1962 | /* skip holes in the array from already deleted drives */ | 2188 | /* skip holes in the array from already deleted drives */ |
1963 | if (h->drv[i].raid_level == -1) | 2189 | if (h->drv[i] == NULL) |
1964 | continue; | 2190 | continue; |
1965 | 2191 | ||
1966 | for (j = 0; j < num_luns; j++) { | 2192 | for (j = 0; j < num_luns; j++) { |
1967 | memcpy(&lunid, &ld_buff->LUN[j][0], 4); | 2193 | memcpy(lunid, &ld_buff->LUN[j][0], sizeof(lunid)); |
1968 | lunid = le32_to_cpu(lunid); | 2194 | if (memcmp(h->drv[i]->LunID, lunid, |
1969 | if (h->drv[i].LunID == lunid) { | 2195 | sizeof(lunid)) == 0) { |
1970 | drv_found = 1; | 2196 | drv_found = 1; |
1971 | break; | 2197 | break; |
1972 | } | 2198 | } |
@@ -1974,11 +2200,11 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time) | |||
1974 | if (!drv_found) { | 2200 | if (!drv_found) { |
1975 | /* Deregister it from the OS, it's gone. */ | 2201 | /* Deregister it from the OS, it's gone. */ |
1976 | spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); | 2202 | spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); |
1977 | h->drv[i].busy_configuring = 1; | 2203 | h->drv[i]->busy_configuring = 1; |
1978 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); | 2204 | spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); |
1979 | return_code = deregister_disk(h, i, 1); | 2205 | return_code = deregister_disk(h, i, 1, via_ioctl); |
1980 | cciss_destroy_ld_sysfs_entry(&h->drv[i]); | 2206 | if (h->drv[i] != NULL) |
1981 | h->drv[i].busy_configuring = 0; | 2207 | h->drv[i]->busy_configuring = 0; |
1982 | } | 2208 | } |
1983 | } | 2209 | } |
1984 | 2210 | ||
@@ -1992,17 +2218,16 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time) | |||
1992 | 2218 | ||
1993 | drv_found = 0; | 2219 | drv_found = 0; |
1994 | 2220 | ||
1995 | memcpy(&lunid, &ld_buff->LUN[i][0], 4); | 2221 | memcpy(lunid, &ld_buff->LUN[i][0], sizeof(lunid)); |
1996 | lunid = le32_to_cpu(lunid); | ||
1997 | |||
1998 | /* Find if the LUN is already in the drive array | 2222 | /* Find if the LUN is already in the drive array |
1999 | * of the driver. If so then update its info | 2223 | * of the driver. If so then update its info |
2000 | * if not in use. If it does not exist then find | 2224 | * if not in use. If it does not exist then find |
2001 | * the first free index and add it. | 2225 | * the first free index and add it. |
2002 | */ | 2226 | */ |
2003 | for (j = 0; j <= h->highest_lun; j++) { | 2227 | for (j = 0; j <= h->highest_lun; j++) { |
2004 | if (h->drv[j].raid_level != -1 && | 2228 | if (h->drv[j] != NULL && |
2005 | h->drv[j].LunID == lunid) { | 2229 | memcmp(h->drv[j]->LunID, lunid, |
2230 | sizeof(h->drv[j]->LunID)) == 0) { | ||
2006 | drv_index = j; | 2231 | drv_index = j; |
2007 | drv_found = 1; | 2232 | drv_found = 1; |
2008 | break; | 2233 | break; |
@@ -2015,7 +2240,8 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time) | |||
2015 | if (drv_index == -1) | 2240 | if (drv_index == -1) |
2016 | goto freeret; | 2241 | goto freeret; |
2017 | } | 2242 | } |
2018 | cciss_update_drive_info(ctlr, drv_index, first_time); | 2243 | cciss_update_drive_info(ctlr, drv_index, first_time, |
2244 | via_ioctl); | ||
2019 | } /* end for */ | 2245 | } /* end for */ |
2020 | 2246 | ||
2021 | freeret: | 2247 | freeret: |
@@ -2032,6 +2258,25 @@ mem_msg: | |||
2032 | goto freeret; | 2258 | goto freeret; |
2033 | } | 2259 | } |
2034 | 2260 | ||
2261 | static void cciss_clear_drive_info(drive_info_struct *drive_info) | ||
2262 | { | ||
2263 | /* zero out the disk size info */ | ||
2264 | drive_info->nr_blocks = 0; | ||
2265 | drive_info->block_size = 0; | ||
2266 | drive_info->heads = 0; | ||
2267 | drive_info->sectors = 0; | ||
2268 | drive_info->cylinders = 0; | ||
2269 | drive_info->raid_level = -1; | ||
2270 | memset(drive_info->serial_no, 0, sizeof(drive_info->serial_no)); | ||
2271 | memset(drive_info->model, 0, sizeof(drive_info->model)); | ||
2272 | memset(drive_info->rev, 0, sizeof(drive_info->rev)); | ||
2273 | memset(drive_info->vendor, 0, sizeof(drive_info->vendor)); | ||
2274 | /* | ||
2275 | * don't clear the LUNID though, we need to remember which | ||
2276 | * one this one is. | ||
2277 | */ | ||
2278 | } | ||
2279 | |||
2035 | /* This function will deregister the disk and it's queue from the | 2280 | /* This function will deregister the disk and it's queue from the |
2036 | * kernel. It must be called with the controller lock held and the | 2281 | * kernel. It must be called with the controller lock held and the |
2037 | * drv structures busy_configuring flag set. It's parameters are: | 2282 | * drv structures busy_configuring flag set. It's parameters are: |
@@ -2046,43 +2291,48 @@ mem_msg: | |||
2046 | * the disk in preparation for re-adding it. In this case | 2291 | * the disk in preparation for re-adding it. In this case |
2047 | * the highest_lun should be left unchanged and the LunID | 2292 | * the highest_lun should be left unchanged and the LunID |
2048 | * should not be cleared. | 2293 | * should not be cleared. |
2294 | * via_ioctl | ||
2295 | * This indicates whether we've reached this path via ioctl. | ||
2296 | * This affects the maximum usage count allowed for c0d0 to be messed with. | ||
2297 | * If this path is reached via ioctl(), then the max_usage_count will | ||
2298 | * be 1, as the process calling ioctl() has got to have the device open. | ||
2299 | * If we get here via sysfs, then the max usage count will be zero. | ||
2049 | */ | 2300 | */ |
2050 | static int deregister_disk(ctlr_info_t *h, int drv_index, | 2301 | static int deregister_disk(ctlr_info_t *h, int drv_index, |
2051 | int clear_all) | 2302 | int clear_all, int via_ioctl) |
2052 | { | 2303 | { |
2053 | int i; | 2304 | int i; |
2054 | struct gendisk *disk; | 2305 | struct gendisk *disk; |
2055 | drive_info_struct *drv; | 2306 | drive_info_struct *drv; |
2307 | int recalculate_highest_lun; | ||
2056 | 2308 | ||
2057 | if (!capable(CAP_SYS_RAWIO)) | 2309 | if (!capable(CAP_SYS_RAWIO)) |
2058 | return -EPERM; | 2310 | return -EPERM; |
2059 | 2311 | ||
2060 | drv = &h->drv[drv_index]; | 2312 | drv = h->drv[drv_index]; |
2061 | disk = h->gendisk[drv_index]; | 2313 | disk = h->gendisk[drv_index]; |
2062 | 2314 | ||
2063 | /* make sure logical volume is NOT is use */ | 2315 | /* make sure logical volume is NOT is use */ |
2064 | if (clear_all || (h->gendisk[0] == disk)) { | 2316 | if (clear_all || (h->gendisk[0] == disk)) { |
2065 | if (drv->usage_count > 1) | 2317 | if (drv->usage_count > via_ioctl) |
2066 | return -EBUSY; | 2318 | return -EBUSY; |
2067 | } else if (drv->usage_count > 0) | 2319 | } else if (drv->usage_count > 0) |
2068 | return -EBUSY; | 2320 | return -EBUSY; |
2069 | 2321 | ||
2322 | recalculate_highest_lun = (drv == h->drv[h->highest_lun]); | ||
2323 | |||
2070 | /* invalidate the devices and deregister the disk. If it is disk | 2324 | /* invalidate the devices and deregister the disk. If it is disk |
2071 | * zero do not deregister it but just zero out it's values. This | 2325 | * zero do not deregister it but just zero out it's values. This |
2072 | * allows us to delete disk zero but keep the controller registered. | 2326 | * allows us to delete disk zero but keep the controller registered. |
2073 | */ | 2327 | */ |
2074 | if (h->gendisk[0] != disk) { | 2328 | if (h->gendisk[0] != disk) { |
2075 | struct request_queue *q = disk->queue; | 2329 | struct request_queue *q = disk->queue; |
2076 | if (disk->flags & GENHD_FL_UP) | 2330 | if (disk->flags & GENHD_FL_UP) { |
2331 | cciss_destroy_ld_sysfs_entry(h, drv_index, 0); | ||
2077 | del_gendisk(disk); | 2332 | del_gendisk(disk); |
2078 | if (q) { | ||
2079 | blk_cleanup_queue(q); | ||
2080 | /* Set drv->queue to NULL so that we do not try | ||
2081 | * to call blk_start_queue on this queue in the | ||
2082 | * interrupt handler | ||
2083 | */ | ||
2084 | drv->queue = NULL; | ||
2085 | } | 2333 | } |
2334 | if (q) | ||
2335 | blk_cleanup_queue(q); | ||
2086 | /* If clear_all is set then we are deleting the logical | 2336 | /* If clear_all is set then we are deleting the logical |
2087 | * drive, not just refreshing its info. For drives | 2337 | * drive, not just refreshing its info. For drives |
2088 | * other than disk 0 we will call put_disk. We do not | 2338 | * other than disk 0 we will call put_disk. We do not |
@@ -2105,34 +2355,20 @@ static int deregister_disk(ctlr_info_t *h, int drv_index, | |||
2105 | } | 2355 | } |
2106 | } else { | 2356 | } else { |
2107 | set_capacity(disk, 0); | 2357 | set_capacity(disk, 0); |
2358 | cciss_clear_drive_info(drv); | ||
2108 | } | 2359 | } |
2109 | 2360 | ||
2110 | --h->num_luns; | 2361 | --h->num_luns; |
2111 | /* zero out the disk size info */ | ||
2112 | drv->nr_blocks = 0; | ||
2113 | drv->block_size = 0; | ||
2114 | drv->heads = 0; | ||
2115 | drv->sectors = 0; | ||
2116 | drv->cylinders = 0; | ||
2117 | drv->raid_level = -1; /* This can be used as a flag variable to | ||
2118 | * indicate that this element of the drive | ||
2119 | * array is free. | ||
2120 | */ | ||
2121 | |||
2122 | if (clear_all) { | ||
2123 | /* check to see if it was the last disk */ | ||
2124 | if (drv == h->drv + h->highest_lun) { | ||
2125 | /* if so, find the new hightest lun */ | ||
2126 | int i, newhighest = -1; | ||
2127 | for (i = 0; i <= h->highest_lun; i++) { | ||
2128 | /* if the disk has size > 0, it is available */ | ||
2129 | if (h->drv[i].heads) | ||
2130 | newhighest = i; | ||
2131 | } | ||
2132 | h->highest_lun = newhighest; | ||
2133 | } | ||
2134 | 2362 | ||
2135 | drv->LunID = 0; | 2363 | /* if it was the last disk, find the new hightest lun */ |
2364 | if (clear_all && recalculate_highest_lun) { | ||
2365 | int i, newhighest = -1; | ||
2366 | for (i = 0; i <= h->highest_lun; i++) { | ||
2367 | /* if the disk has size > 0, it is available */ | ||
2368 | if (h->drv[i] && h->drv[i]->heads) | ||
2369 | newhighest = i; | ||
2370 | } | ||
2371 | h->highest_lun = newhighest; | ||
2136 | } | 2372 | } |
2137 | return 0; | 2373 | return 0; |
2138 | } | 2374 | } |
@@ -2479,8 +2715,6 @@ static void cciss_geometry_inquiry(int ctlr, int logvol, | |||
2479 | } else { /* Get geometry failed */ | 2715 | } else { /* Get geometry failed */ |
2480 | printk(KERN_WARNING "cciss: reading geometry failed\n"); | 2716 | printk(KERN_WARNING "cciss: reading geometry failed\n"); |
2481 | } | 2717 | } |
2482 | printk(KERN_INFO " heads=%d, sectors=%d, cylinders=%d\n\n", | ||
2483 | drv->heads, drv->sectors, drv->cylinders); | ||
2484 | } | 2718 | } |
2485 | 2719 | ||
2486 | static void | 2720 | static void |
@@ -2514,9 +2748,6 @@ cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size, | |||
2514 | *total_size = 0; | 2748 | *total_size = 0; |
2515 | *block_size = BLOCK_SIZE; | 2749 | *block_size = BLOCK_SIZE; |
2516 | } | 2750 | } |
2517 | if (*total_size != 0) | ||
2518 | printk(KERN_INFO " blocks= %llu block_size= %d\n", | ||
2519 | (unsigned long long)*total_size+1, *block_size); | ||
2520 | kfree(buf); | 2751 | kfree(buf); |
2521 | } | 2752 | } |
2522 | 2753 | ||
@@ -2568,7 +2799,8 @@ static int cciss_revalidate(struct gendisk *disk) | |||
2568 | InquiryData_struct *inq_buff = NULL; | 2799 | InquiryData_struct *inq_buff = NULL; |
2569 | 2800 | ||
2570 | for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) { | 2801 | for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) { |
2571 | if (h->drv[logvol].LunID == drv->LunID) { | 2802 | if (memcmp(h->drv[logvol]->LunID, drv->LunID, |
2803 | sizeof(drv->LunID)) == 0) { | ||
2572 | FOUND = 1; | 2804 | FOUND = 1; |
2573 | break; | 2805 | break; |
2574 | } | 2806 | } |
@@ -3053,8 +3285,7 @@ static void do_cciss_request(struct request_queue *q) | |||
3053 | /* The first 2 bits are reserved for controller error reporting. */ | 3285 | /* The first 2 bits are reserved for controller error reporting. */ |
3054 | c->Header.Tag.lower = (c->cmdindex << 3); | 3286 | c->Header.Tag.lower = (c->cmdindex << 3); |
3055 | c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */ | 3287 | c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */ |
3056 | c->Header.LUN.LogDev.VolId = drv->LunID; | 3288 | memcpy(&c->Header.LUN, drv->LunID, sizeof(drv->LunID)); |
3057 | c->Header.LUN.LogDev.Mode = 1; | ||
3058 | c->Request.CDBLen = 10; // 12 byte commands not in FW yet; | 3289 | c->Request.CDBLen = 10; // 12 byte commands not in FW yet; |
3059 | c->Request.Type.Type = TYPE_CMD; // It is a command. | 3290 | c->Request.Type.Type = TYPE_CMD; // It is a command. |
3060 | c->Request.Type.Attribute = ATTR_SIMPLE; | 3291 | c->Request.Type.Attribute = ATTR_SIMPLE; |
@@ -3232,20 +3463,121 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id) | |||
3232 | return IRQ_HANDLED; | 3463 | return IRQ_HANDLED; |
3233 | } | 3464 | } |
3234 | 3465 | ||
3466 | /** | ||
3467 | * add_to_scan_list() - add controller to rescan queue | ||
3468 | * @h: Pointer to the controller. | ||
3469 | * | ||
3470 | * Adds the controller to the rescan queue if not already on the queue. | ||
3471 | * | ||
3472 | * returns 1 if added to the queue, 0 if skipped (could be on the | ||
3473 | * queue already, or the controller could be initializing or shutting | ||
3474 | * down). | ||
3475 | **/ | ||
3476 | static int add_to_scan_list(struct ctlr_info *h) | ||
3477 | { | ||
3478 | struct ctlr_info *test_h; | ||
3479 | int found = 0; | ||
3480 | int ret = 0; | ||
3481 | |||
3482 | if (h->busy_initializing) | ||
3483 | return 0; | ||
3484 | |||
3485 | if (!mutex_trylock(&h->busy_shutting_down)) | ||
3486 | return 0; | ||
3487 | |||
3488 | mutex_lock(&scan_mutex); | ||
3489 | list_for_each_entry(test_h, &scan_q, scan_list) { | ||
3490 | if (test_h == h) { | ||
3491 | found = 1; | ||
3492 | break; | ||
3493 | } | ||
3494 | } | ||
3495 | if (!found && !h->busy_scanning) { | ||
3496 | INIT_COMPLETION(h->scan_wait); | ||
3497 | list_add_tail(&h->scan_list, &scan_q); | ||
3498 | ret = 1; | ||
3499 | } | ||
3500 | mutex_unlock(&scan_mutex); | ||
3501 | mutex_unlock(&h->busy_shutting_down); | ||
3502 | |||
3503 | return ret; | ||
3504 | } | ||
3505 | |||
3506 | /** | ||
3507 | * remove_from_scan_list() - remove controller from rescan queue | ||
3508 | * @h: Pointer to the controller. | ||
3509 | * | ||
3510 | * Removes the controller from the rescan queue if present. Blocks if | ||
3511 | * the controller is currently conducting a rescan. | ||
3512 | **/ | ||
3513 | static void remove_from_scan_list(struct ctlr_info *h) | ||
3514 | { | ||
3515 | struct ctlr_info *test_h, *tmp_h; | ||
3516 | int scanning = 0; | ||
3517 | |||
3518 | mutex_lock(&scan_mutex); | ||
3519 | list_for_each_entry_safe(test_h, tmp_h, &scan_q, scan_list) { | ||
3520 | if (test_h == h) { | ||
3521 | list_del(&h->scan_list); | ||
3522 | complete_all(&h->scan_wait); | ||
3523 | mutex_unlock(&scan_mutex); | ||
3524 | return; | ||
3525 | } | ||
3526 | } | ||
3527 | if (&h->busy_scanning) | ||
3528 | scanning = 0; | ||
3529 | mutex_unlock(&scan_mutex); | ||
3530 | |||
3531 | if (scanning) | ||
3532 | wait_for_completion(&h->scan_wait); | ||
3533 | } | ||
3534 | |||
3535 | /** | ||
3536 | * scan_thread() - kernel thread used to rescan controllers | ||
3537 | * @data: Ignored. | ||
3538 | * | ||
3539 | * A kernel thread used scan for drive topology changes on | ||
3540 | * controllers. The thread processes only one controller at a time | ||
3541 | * using a queue. Controllers are added to the queue using | ||
3542 | * add_to_scan_list() and removed from the queue either after done | ||
3543 | * processing or using remove_from_scan_list(). | ||
3544 | * | ||
3545 | * returns 0. | ||
3546 | **/ | ||
3235 | static int scan_thread(void *data) | 3547 | static int scan_thread(void *data) |
3236 | { | 3548 | { |
3237 | ctlr_info_t *h = data; | 3549 | struct ctlr_info *h; |
3238 | int rc; | ||
3239 | DECLARE_COMPLETION_ONSTACK(wait); | ||
3240 | h->rescan_wait = &wait; | ||
3241 | 3550 | ||
3242 | for (;;) { | 3551 | while (1) { |
3243 | rc = wait_for_completion_interruptible(&wait); | 3552 | set_current_state(TASK_INTERRUPTIBLE); |
3553 | schedule(); | ||
3244 | if (kthread_should_stop()) | 3554 | if (kthread_should_stop()) |
3245 | break; | 3555 | break; |
3246 | if (!rc) | 3556 | |
3247 | rebuild_lun_table(h, 0); | 3557 | while (1) { |
3558 | mutex_lock(&scan_mutex); | ||
3559 | if (list_empty(&scan_q)) { | ||
3560 | mutex_unlock(&scan_mutex); | ||
3561 | break; | ||
3562 | } | ||
3563 | |||
3564 | h = list_entry(scan_q.next, | ||
3565 | struct ctlr_info, | ||
3566 | scan_list); | ||
3567 | list_del(&h->scan_list); | ||
3568 | h->busy_scanning = 1; | ||
3569 | mutex_unlock(&scan_mutex); | ||
3570 | |||
3571 | if (h) { | ||
3572 | rebuild_lun_table(h, 0, 0); | ||
3573 | complete_all(&h->scan_wait); | ||
3574 | mutex_lock(&scan_mutex); | ||
3575 | h->busy_scanning = 0; | ||
3576 | mutex_unlock(&scan_mutex); | ||
3577 | } | ||
3578 | } | ||
3248 | } | 3579 | } |
3580 | |||
3249 | return 0; | 3581 | return 0; |
3250 | } | 3582 | } |
3251 | 3583 | ||
@@ -3268,8 +3600,8 @@ static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c) | |||
3268 | case REPORT_LUNS_CHANGED: | 3600 | case REPORT_LUNS_CHANGED: |
3269 | printk(KERN_WARNING "cciss%d: report LUN data " | 3601 | printk(KERN_WARNING "cciss%d: report LUN data " |
3270 | "changed\n", h->ctlr); | 3602 | "changed\n", h->ctlr); |
3271 | if (h->rescan_wait) | 3603 | add_to_scan_list(h); |
3272 | complete(h->rescan_wait); | 3604 | wake_up_process(cciss_scan_thread); |
3273 | return 1; | 3605 | return 1; |
3274 | break; | 3606 | break; |
3275 | case POWER_OR_RESET: | 3607 | case POWER_OR_RESET: |
@@ -3489,7 +3821,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev) | |||
3489 | if (scratchpad == CCISS_FIRMWARE_READY) | 3821 | if (scratchpad == CCISS_FIRMWARE_READY) |
3490 | break; | 3822 | break; |
3491 | set_current_state(TASK_INTERRUPTIBLE); | 3823 | set_current_state(TASK_INTERRUPTIBLE); |
3492 | schedule_timeout(HZ / 10); /* wait 100ms */ | 3824 | schedule_timeout(msecs_to_jiffies(100)); /* wait 100ms */ |
3493 | } | 3825 | } |
3494 | if (scratchpad != CCISS_FIRMWARE_READY) { | 3826 | if (scratchpad != CCISS_FIRMWARE_READY) { |
3495 | printk(KERN_WARNING "cciss: Board not ready. Timed out.\n"); | 3827 | printk(KERN_WARNING "cciss: Board not ready. Timed out.\n"); |
@@ -3615,7 +3947,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev) | |||
3615 | break; | 3947 | break; |
3616 | /* delay and try again */ | 3948 | /* delay and try again */ |
3617 | set_current_state(TASK_INTERRUPTIBLE); | 3949 | set_current_state(TASK_INTERRUPTIBLE); |
3618 | schedule_timeout(10); | 3950 | schedule_timeout(msecs_to_jiffies(1)); |
3619 | } | 3951 | } |
3620 | 3952 | ||
3621 | #ifdef CCISS_DEBUG | 3953 | #ifdef CCISS_DEBUG |
@@ -3669,15 +4001,16 @@ Enomem: | |||
3669 | return -1; | 4001 | return -1; |
3670 | } | 4002 | } |
3671 | 4003 | ||
3672 | static void free_hba(int i) | 4004 | static void free_hba(int n) |
3673 | { | 4005 | { |
3674 | ctlr_info_t *p = hba[i]; | 4006 | ctlr_info_t *h = hba[n]; |
3675 | int n; | 4007 | int i; |
3676 | 4008 | ||
3677 | hba[i] = NULL; | 4009 | hba[n] = NULL; |
3678 | for (n = 0; n < CISS_MAX_LUN; n++) | 4010 | for (i = 0; i < h->highest_lun + 1; i++) |
3679 | put_disk(p->gendisk[n]); | 4011 | if (h->gendisk[i] != NULL) |
3680 | kfree(p); | 4012 | put_disk(h->gendisk[i]); |
4013 | kfree(h); | ||
3681 | } | 4014 | } |
3682 | 4015 | ||
3683 | /* Send a message CDB to the firmware. */ | 4016 | /* Send a message CDB to the firmware. */ |
@@ -3918,6 +4251,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
3918 | hba[i]->busy_initializing = 1; | 4251 | hba[i]->busy_initializing = 1; |
3919 | INIT_HLIST_HEAD(&hba[i]->cmpQ); | 4252 | INIT_HLIST_HEAD(&hba[i]->cmpQ); |
3920 | INIT_HLIST_HEAD(&hba[i]->reqQ); | 4253 | INIT_HLIST_HEAD(&hba[i]->reqQ); |
4254 | mutex_init(&hba[i]->busy_shutting_down); | ||
3921 | 4255 | ||
3922 | if (cciss_pci_init(hba[i], pdev) != 0) | 4256 | if (cciss_pci_init(hba[i], pdev) != 0) |
3923 | goto clean0; | 4257 | goto clean0; |
@@ -3926,6 +4260,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
3926 | hba[i]->ctlr = i; | 4260 | hba[i]->ctlr = i; |
3927 | hba[i]->pdev = pdev; | 4261 | hba[i]->pdev = pdev; |
3928 | 4262 | ||
4263 | init_completion(&hba[i]->scan_wait); | ||
4264 | |||
3929 | if (cciss_create_hba_sysfs_entry(hba[i])) | 4265 | if (cciss_create_hba_sysfs_entry(hba[i])) |
3930 | goto clean0; | 4266 | goto clean0; |
3931 | 4267 | ||
@@ -4001,8 +4337,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
4001 | hba[i]->num_luns = 0; | 4337 | hba[i]->num_luns = 0; |
4002 | hba[i]->highest_lun = -1; | 4338 | hba[i]->highest_lun = -1; |
4003 | for (j = 0; j < CISS_MAX_LUN; j++) { | 4339 | for (j = 0; j < CISS_MAX_LUN; j++) { |
4004 | hba[i]->drv[j].raid_level = -1; | 4340 | hba[i]->drv[j] = NULL; |
4005 | hba[i]->drv[j].queue = NULL; | ||
4006 | hba[i]->gendisk[j] = NULL; | 4341 | hba[i]->gendisk[j] = NULL; |
4007 | } | 4342 | } |
4008 | 4343 | ||
@@ -4035,14 +4370,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
4035 | 4370 | ||
4036 | hba[i]->cciss_max_sectors = 2048; | 4371 | hba[i]->cciss_max_sectors = 2048; |
4037 | 4372 | ||
4373 | rebuild_lun_table(hba[i], 1, 0); | ||
4038 | hba[i]->busy_initializing = 0; | 4374 | hba[i]->busy_initializing = 0; |
4039 | |||
4040 | rebuild_lun_table(hba[i], 1); | ||
4041 | hba[i]->cciss_scan_thread = kthread_run(scan_thread, hba[i], | ||
4042 | "cciss_scan%02d", i); | ||
4043 | if (IS_ERR(hba[i]->cciss_scan_thread)) | ||
4044 | return PTR_ERR(hba[i]->cciss_scan_thread); | ||
4045 | |||
4046 | return 1; | 4375 | return 1; |
4047 | 4376 | ||
4048 | clean4: | 4377 | clean4: |
@@ -4063,12 +4392,7 @@ clean1: | |||
4063 | cciss_destroy_hba_sysfs_entry(hba[i]); | 4392 | cciss_destroy_hba_sysfs_entry(hba[i]); |
4064 | clean0: | 4393 | clean0: |
4065 | hba[i]->busy_initializing = 0; | 4394 | hba[i]->busy_initializing = 0; |
4066 | /* cleanup any queues that may have been initialized */ | 4395 | |
4067 | for (j=0; j <= hba[i]->highest_lun; j++){ | ||
4068 | drive_info_struct *drv = &(hba[i]->drv[j]); | ||
4069 | if (drv->queue) | ||
4070 | blk_cleanup_queue(drv->queue); | ||
4071 | } | ||
4072 | /* | 4396 | /* |
4073 | * Deliberately omit pci_disable_device(): it does something nasty to | 4397 | * Deliberately omit pci_disable_device(): it does something nasty to |
4074 | * Smart Array controllers that pci_enable_device does not undo | 4398 | * Smart Array controllers that pci_enable_device does not undo |
@@ -4125,8 +4449,9 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev) | |||
4125 | return; | 4449 | return; |
4126 | } | 4450 | } |
4127 | 4451 | ||
4128 | kthread_stop(hba[i]->cciss_scan_thread); | 4452 | mutex_lock(&hba[i]->busy_shutting_down); |
4129 | 4453 | ||
4454 | remove_from_scan_list(hba[i]); | ||
4130 | remove_proc_entry(hba[i]->devname, proc_cciss); | 4455 | remove_proc_entry(hba[i]->devname, proc_cciss); |
4131 | unregister_blkdev(hba[i]->major, hba[i]->devname); | 4456 | unregister_blkdev(hba[i]->major, hba[i]->devname); |
4132 | 4457 | ||
@@ -4136,8 +4461,10 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev) | |||
4136 | if (disk) { | 4461 | if (disk) { |
4137 | struct request_queue *q = disk->queue; | 4462 | struct request_queue *q = disk->queue; |
4138 | 4463 | ||
4139 | if (disk->flags & GENHD_FL_UP) | 4464 | if (disk->flags & GENHD_FL_UP) { |
4465 | cciss_destroy_ld_sysfs_entry(hba[i], j, 1); | ||
4140 | del_gendisk(disk); | 4466 | del_gendisk(disk); |
4467 | } | ||
4141 | if (q) | 4468 | if (q) |
4142 | blk_cleanup_queue(q); | 4469 | blk_cleanup_queue(q); |
4143 | } | 4470 | } |
@@ -4170,6 +4497,7 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev) | |||
4170 | pci_release_regions(pdev); | 4497 | pci_release_regions(pdev); |
4171 | pci_set_drvdata(pdev, NULL); | 4498 | pci_set_drvdata(pdev, NULL); |
4172 | cciss_destroy_hba_sysfs_entry(hba[i]); | 4499 | cciss_destroy_hba_sysfs_entry(hba[i]); |
4500 | mutex_unlock(&hba[i]->busy_shutting_down); | ||
4173 | free_hba(i); | 4501 | free_hba(i); |
4174 | } | 4502 | } |
4175 | 4503 | ||
@@ -4202,15 +4530,25 @@ static int __init cciss_init(void) | |||
4202 | if (err) | 4530 | if (err) |
4203 | return err; | 4531 | return err; |
4204 | 4532 | ||
4533 | /* Start the scan thread */ | ||
4534 | cciss_scan_thread = kthread_run(scan_thread, NULL, "cciss_scan"); | ||
4535 | if (IS_ERR(cciss_scan_thread)) { | ||
4536 | err = PTR_ERR(cciss_scan_thread); | ||
4537 | goto err_bus_unregister; | ||
4538 | } | ||
4539 | |||
4205 | /* Register for our PCI devices */ | 4540 | /* Register for our PCI devices */ |
4206 | err = pci_register_driver(&cciss_pci_driver); | 4541 | err = pci_register_driver(&cciss_pci_driver); |
4207 | if (err) | 4542 | if (err) |
4208 | goto err_bus_register; | 4543 | goto err_thread_stop; |
4209 | 4544 | ||
4210 | return 0; | 4545 | return err; |
4211 | 4546 | ||
4212 | err_bus_register: | 4547 | err_thread_stop: |
4548 | kthread_stop(cciss_scan_thread); | ||
4549 | err_bus_unregister: | ||
4213 | bus_unregister(&cciss_bus_type); | 4550 | bus_unregister(&cciss_bus_type); |
4551 | |||
4214 | return err; | 4552 | return err; |
4215 | } | 4553 | } |
4216 | 4554 | ||
@@ -4227,6 +4565,7 @@ static void __exit cciss_cleanup(void) | |||
4227 | cciss_remove_one(hba[i]->pdev); | 4565 | cciss_remove_one(hba[i]->pdev); |
4228 | } | 4566 | } |
4229 | } | 4567 | } |
4568 | kthread_stop(cciss_scan_thread); | ||
4230 | remove_proc_entry("driver/cciss", NULL); | 4569 | remove_proc_entry("driver/cciss", NULL); |
4231 | bus_unregister(&cciss_bus_type); | 4570 | bus_unregister(&cciss_bus_type); |
4232 | } | 4571 | } |
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h index 06a5db25b298..31524cf42c77 100644 --- a/drivers/block/cciss.h +++ b/drivers/block/cciss.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define CCISS_H | 2 | #define CCISS_H |
3 | 3 | ||
4 | #include <linux/genhd.h> | 4 | #include <linux/genhd.h> |
5 | #include <linux/mutex.h> | ||
5 | 6 | ||
6 | #include "cciss_cmd.h" | 7 | #include "cciss_cmd.h" |
7 | 8 | ||
@@ -29,7 +30,7 @@ struct access_method { | |||
29 | }; | 30 | }; |
30 | typedef struct _drive_info_struct | 31 | typedef struct _drive_info_struct |
31 | { | 32 | { |
32 | __u32 LunID; | 33 | unsigned char LunID[8]; |
33 | int usage_count; | 34 | int usage_count; |
34 | struct request_queue *queue; | 35 | struct request_queue *queue; |
35 | sector_t nr_blocks; | 36 | sector_t nr_blocks; |
@@ -51,6 +52,7 @@ typedef struct _drive_info_struct | |||
51 | char vendor[VENDOR_LEN + 1]; /* SCSI vendor string */ | 52 | char vendor[VENDOR_LEN + 1]; /* SCSI vendor string */ |
52 | char model[MODEL_LEN + 1]; /* SCSI model string */ | 53 | char model[MODEL_LEN + 1]; /* SCSI model string */ |
53 | char rev[REV_LEN + 1]; /* SCSI revision string */ | 54 | char rev[REV_LEN + 1]; /* SCSI revision string */ |
55 | char device_initialized; /* indicates whether dev is initialized */ | ||
54 | } drive_info_struct; | 56 | } drive_info_struct; |
55 | 57 | ||
56 | struct ctlr_info | 58 | struct ctlr_info |
@@ -86,7 +88,7 @@ struct ctlr_info | |||
86 | BYTE cciss_read_capacity; | 88 | BYTE cciss_read_capacity; |
87 | 89 | ||
88 | // information about each logical volume | 90 | // information about each logical volume |
89 | drive_info_struct drv[CISS_MAX_LUN]; | 91 | drive_info_struct *drv[CISS_MAX_LUN]; |
90 | 92 | ||
91 | struct access_method access; | 93 | struct access_method access; |
92 | 94 | ||
@@ -108,6 +110,8 @@ struct ctlr_info | |||
108 | int nr_frees; | 110 | int nr_frees; |
109 | int busy_configuring; | 111 | int busy_configuring; |
110 | int busy_initializing; | 112 | int busy_initializing; |
113 | int busy_scanning; | ||
114 | struct mutex busy_shutting_down; | ||
111 | 115 | ||
112 | /* This element holds the zero based queue number of the last | 116 | /* This element holds the zero based queue number of the last |
113 | * queue to be started. It is used for fairness. | 117 | * queue to be started. It is used for fairness. |
@@ -122,8 +126,8 @@ struct ctlr_info | |||
122 | /* and saved for later processing */ | 126 | /* and saved for later processing */ |
123 | #endif | 127 | #endif |
124 | unsigned char alive; | 128 | unsigned char alive; |
125 | struct completion *rescan_wait; | 129 | struct list_head scan_list; |
126 | struct task_struct *cciss_scan_thread; | 130 | struct completion scan_wait; |
127 | struct device dev; | 131 | struct device dev; |
128 | }; | 132 | }; |
129 | 133 | ||
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c index b82d438e2607..6422651ec364 100644 --- a/drivers/block/cpqarray.c +++ b/drivers/block/cpqarray.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/blkpg.h> | 32 | #include <linux/blkpg.h> |
33 | #include <linux/timer.h> | 33 | #include <linux/timer.h> |
34 | #include <linux/proc_fs.h> | 34 | #include <linux/proc_fs.h> |
35 | #include <linux/seq_file.h> | ||
35 | #include <linux/init.h> | 36 | #include <linux/init.h> |
36 | #include <linux/hdreg.h> | 37 | #include <linux/hdreg.h> |
37 | #include <linux/spinlock.h> | 38 | #include <linux/spinlock.h> |
@@ -177,7 +178,6 @@ static int cpqarray_register_ctlr(int ctlr, struct pci_dev *pdev); | |||
177 | 178 | ||
178 | #ifdef CONFIG_PROC_FS | 179 | #ifdef CONFIG_PROC_FS |
179 | static void ida_procinit(int i); | 180 | static void ida_procinit(int i); |
180 | static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data); | ||
181 | #else | 181 | #else |
182 | static void ida_procinit(int i) {} | 182 | static void ida_procinit(int i) {} |
183 | #endif | 183 | #endif |
@@ -206,6 +206,7 @@ static const struct block_device_operations ida_fops = { | |||
206 | #ifdef CONFIG_PROC_FS | 206 | #ifdef CONFIG_PROC_FS |
207 | 207 | ||
208 | static struct proc_dir_entry *proc_array; | 208 | static struct proc_dir_entry *proc_array; |
209 | static const struct file_operations ida_proc_fops; | ||
209 | 210 | ||
210 | /* | 211 | /* |
211 | * Get us a file in /proc/array that says something about each controller. | 212 | * Get us a file in /proc/array that says something about each controller. |
@@ -218,19 +219,16 @@ static void __init ida_procinit(int i) | |||
218 | if (!proc_array) return; | 219 | if (!proc_array) return; |
219 | } | 220 | } |
220 | 221 | ||
221 | create_proc_read_entry(hba[i]->devname, 0, proc_array, | 222 | proc_create_data(hba[i]->devname, 0, proc_array, &ida_proc_fops, hba[i]); |
222 | ida_proc_get_info, hba[i]); | ||
223 | } | 223 | } |
224 | 224 | ||
225 | /* | 225 | /* |
226 | * Report information about this controller. | 226 | * Report information about this controller. |
227 | */ | 227 | */ |
228 | static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data) | 228 | static int ida_proc_show(struct seq_file *m, void *v) |
229 | { | 229 | { |
230 | off_t pos = 0; | 230 | int i, ctlr; |
231 | off_t len = 0; | 231 | ctlr_info_t *h = (ctlr_info_t*)m->private; |
232 | int size, i, ctlr; | ||
233 | ctlr_info_t *h = (ctlr_info_t*)data; | ||
234 | drv_info_t *drv; | 232 | drv_info_t *drv; |
235 | #ifdef CPQ_PROC_PRINT_QUEUES | 233 | #ifdef CPQ_PROC_PRINT_QUEUES |
236 | cmdlist_t *c; | 234 | cmdlist_t *c; |
@@ -238,7 +236,7 @@ static int ida_proc_get_info(char *buffer, char **start, off_t offset, int lengt | |||
238 | #endif | 236 | #endif |
239 | 237 | ||
240 | ctlr = h->ctlr; | 238 | ctlr = h->ctlr; |
241 | size = sprintf(buffer, "%s: Compaq %s Controller\n" | 239 | seq_printf(m, "%s: Compaq %s Controller\n" |
242 | " Board ID: 0x%08lx\n" | 240 | " Board ID: 0x%08lx\n" |
243 | " Firmware Revision: %c%c%c%c\n" | 241 | " Firmware Revision: %c%c%c%c\n" |
244 | " Controller Sig: 0x%08lx\n" | 242 | " Controller Sig: 0x%08lx\n" |
@@ -258,55 +256,54 @@ static int ida_proc_get_info(char *buffer, char **start, off_t offset, int lengt | |||
258 | h->log_drives, h->phys_drives, | 256 | h->log_drives, h->phys_drives, |
259 | h->Qdepth, h->maxQsinceinit); | 257 | h->Qdepth, h->maxQsinceinit); |
260 | 258 | ||
261 | pos += size; len += size; | 259 | seq_puts(m, "Logical Drive Info:\n"); |
262 | |||
263 | size = sprintf(buffer+len, "Logical Drive Info:\n"); | ||
264 | pos += size; len += size; | ||
265 | 260 | ||
266 | for(i=0; i<h->log_drives; i++) { | 261 | for(i=0; i<h->log_drives; i++) { |
267 | drv = &h->drv[i]; | 262 | drv = &h->drv[i]; |
268 | size = sprintf(buffer+len, "ida/c%dd%d: blksz=%d nr_blks=%d\n", | 263 | seq_printf(m, "ida/c%dd%d: blksz=%d nr_blks=%d\n", |
269 | ctlr, i, drv->blk_size, drv->nr_blks); | 264 | ctlr, i, drv->blk_size, drv->nr_blks); |
270 | pos += size; len += size; | ||
271 | } | 265 | } |
272 | 266 | ||
273 | #ifdef CPQ_PROC_PRINT_QUEUES | 267 | #ifdef CPQ_PROC_PRINT_QUEUES |
274 | spin_lock_irqsave(IDA_LOCK(h->ctlr), flags); | 268 | spin_lock_irqsave(IDA_LOCK(h->ctlr), flags); |
275 | size = sprintf(buffer+len, "\nCurrent Queues:\n"); | 269 | seq_puts(m, "\nCurrent Queues:\n"); |
276 | pos += size; len += size; | ||
277 | 270 | ||
278 | c = h->reqQ; | 271 | c = h->reqQ; |
279 | size = sprintf(buffer+len, "reqQ = %p", c); pos += size; len += size; | 272 | seq_printf(m, "reqQ = %p", c); |
280 | if (c) c=c->next; | 273 | if (c) c=c->next; |
281 | while(c && c != h->reqQ) { | 274 | while(c && c != h->reqQ) { |
282 | size = sprintf(buffer+len, "->%p", c); | 275 | seq_printf(m, "->%p", c); |
283 | pos += size; len += size; | ||
284 | c=c->next; | 276 | c=c->next; |
285 | } | 277 | } |
286 | 278 | ||
287 | c = h->cmpQ; | 279 | c = h->cmpQ; |
288 | size = sprintf(buffer+len, "\ncmpQ = %p", c); pos += size; len += size; | 280 | seq_printf(m, "\ncmpQ = %p", c); |
289 | if (c) c=c->next; | 281 | if (c) c=c->next; |
290 | while(c && c != h->cmpQ) { | 282 | while(c && c != h->cmpQ) { |
291 | size = sprintf(buffer+len, "->%p", c); | 283 | seq_printf(m, "->%p", c); |
292 | pos += size; len += size; | ||
293 | c=c->next; | 284 | c=c->next; |
294 | } | 285 | } |
295 | 286 | ||
296 | size = sprintf(buffer+len, "\n"); pos += size; len += size; | 287 | seq_putc(m, '\n'); |
297 | spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags); | 288 | spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags); |
298 | #endif | 289 | #endif |
299 | size = sprintf(buffer+len, "nr_allocs = %d\nnr_frees = %d\n", | 290 | seq_printf(m, "nr_allocs = %d\nnr_frees = %d\n", |
300 | h->nr_allocs, h->nr_frees); | 291 | h->nr_allocs, h->nr_frees); |
301 | pos += size; len += size; | 292 | return 0; |
302 | 293 | } | |
303 | *eof = 1; | 294 | |
304 | *start = buffer+offset; | 295 | static int ida_proc_open(struct inode *inode, struct file *file) |
305 | len -= offset; | 296 | { |
306 | if (len>length) | 297 | return single_open(file, ida_proc_show, PDE(inode)->data); |
307 | len = length; | ||
308 | return len; | ||
309 | } | 298 | } |
299 | |||
300 | static const struct file_operations ida_proc_fops = { | ||
301 | .owner = THIS_MODULE, | ||
302 | .open = ida_proc_open, | ||
303 | .read = seq_read, | ||
304 | .llseek = seq_lseek, | ||
305 | .release = single_release, | ||
306 | }; | ||
310 | #endif /* CONFIG_PROC_FS */ | 307 | #endif /* CONFIG_PROC_FS */ |
311 | 308 | ||
312 | module_param_array(eisa, int, NULL, 0); | 309 | module_param_array(eisa, int, NULL, 0); |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 376f1ab48a24..23e76fe0d359 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -130,7 +130,7 @@ struct mapped_device { | |||
130 | /* | 130 | /* |
131 | * A list of ios that arrived while we were suspended. | 131 | * A list of ios that arrived while we were suspended. |
132 | */ | 132 | */ |
133 | atomic_t pending[2]; | 133 | atomic_t pending; |
134 | wait_queue_head_t wait; | 134 | wait_queue_head_t wait; |
135 | struct work_struct work; | 135 | struct work_struct work; |
136 | struct bio_list deferred; | 136 | struct bio_list deferred; |
@@ -453,14 +453,13 @@ static void start_io_acct(struct dm_io *io) | |||
453 | { | 453 | { |
454 | struct mapped_device *md = io->md; | 454 | struct mapped_device *md = io->md; |
455 | int cpu; | 455 | int cpu; |
456 | int rw = bio_data_dir(io->bio); | ||
457 | 456 | ||
458 | io->start_time = jiffies; | 457 | io->start_time = jiffies; |
459 | 458 | ||
460 | cpu = part_stat_lock(); | 459 | cpu = part_stat_lock(); |
461 | part_round_stats(cpu, &dm_disk(md)->part0); | 460 | part_round_stats(cpu, &dm_disk(md)->part0); |
462 | part_stat_unlock(); | 461 | part_stat_unlock(); |
463 | dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]); | 462 | dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending); |
464 | } | 463 | } |
465 | 464 | ||
466 | static void end_io_acct(struct dm_io *io) | 465 | static void end_io_acct(struct dm_io *io) |
@@ -480,9 +479,8 @@ static void end_io_acct(struct dm_io *io) | |||
480 | * After this is decremented the bio must not be touched if it is | 479 | * After this is decremented the bio must not be touched if it is |
481 | * a barrier. | 480 | * a barrier. |
482 | */ | 481 | */ |
483 | dm_disk(md)->part0.in_flight[rw] = pending = | 482 | dm_disk(md)->part0.in_flight = pending = |
484 | atomic_dec_return(&md->pending[rw]); | 483 | atomic_dec_return(&md->pending); |
485 | pending += atomic_read(&md->pending[rw^0x1]); | ||
486 | 484 | ||
487 | /* nudge anyone waiting on suspend queue */ | 485 | /* nudge anyone waiting on suspend queue */ |
488 | if (!pending) | 486 | if (!pending) |
@@ -1787,8 +1785,7 @@ static struct mapped_device *alloc_dev(int minor) | |||
1787 | if (!md->disk) | 1785 | if (!md->disk) |
1788 | goto bad_disk; | 1786 | goto bad_disk; |
1789 | 1787 | ||
1790 | atomic_set(&md->pending[0], 0); | 1788 | atomic_set(&md->pending, 0); |
1791 | atomic_set(&md->pending[1], 0); | ||
1792 | init_waitqueue_head(&md->wait); | 1789 | init_waitqueue_head(&md->wait); |
1793 | INIT_WORK(&md->work, dm_wq_work); | 1790 | INIT_WORK(&md->work, dm_wq_work); |
1794 | init_waitqueue_head(&md->eventq); | 1791 | init_waitqueue_head(&md->eventq); |
@@ -2091,8 +2088,7 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible) | |||
2091 | break; | 2088 | break; |
2092 | } | 2089 | } |
2093 | spin_unlock_irqrestore(q->queue_lock, flags); | 2090 | spin_unlock_irqrestore(q->queue_lock, flags); |
2094 | } else if (!atomic_read(&md->pending[0]) && | 2091 | } else if (!atomic_read(&md->pending)) |
2095 | !atomic_read(&md->pending[1])) | ||
2096 | break; | 2092 | break; |
2097 | 2093 | ||
2098 | if (interruptible == TASK_INTERRUPTIBLE && | 2094 | if (interruptible == TASK_INTERRUPTIBLE && |
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c index 0acbf4f5be50..8ca17a3e96ea 100644 --- a/drivers/mtd/mtd_blkdevs.c +++ b/drivers/mtd/mtd_blkdevs.c | |||
@@ -32,14 +32,6 @@ struct mtd_blkcore_priv { | |||
32 | spinlock_t queue_lock; | 32 | spinlock_t queue_lock; |
33 | }; | 33 | }; |
34 | 34 | ||
35 | static int blktrans_discard_request(struct request_queue *q, | ||
36 | struct request *req) | ||
37 | { | ||
38 | req->cmd_type = REQ_TYPE_LINUX_BLOCK; | ||
39 | req->cmd[0] = REQ_LB_OP_DISCARD; | ||
40 | return 0; | ||
41 | } | ||
42 | |||
43 | static int do_blktrans_request(struct mtd_blktrans_ops *tr, | 35 | static int do_blktrans_request(struct mtd_blktrans_ops *tr, |
44 | struct mtd_blktrans_dev *dev, | 36 | struct mtd_blktrans_dev *dev, |
45 | struct request *req) | 37 | struct request *req) |
@@ -52,10 +44,6 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr, | |||
52 | 44 | ||
53 | buf = req->buffer; | 45 | buf = req->buffer; |
54 | 46 | ||
55 | if (req->cmd_type == REQ_TYPE_LINUX_BLOCK && | ||
56 | req->cmd[0] == REQ_LB_OP_DISCARD) | ||
57 | return tr->discard(dev, block, nsect); | ||
58 | |||
59 | if (!blk_fs_request(req)) | 47 | if (!blk_fs_request(req)) |
60 | return -EIO; | 48 | return -EIO; |
61 | 49 | ||
@@ -63,6 +51,9 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr, | |||
63 | get_capacity(req->rq_disk)) | 51 | get_capacity(req->rq_disk)) |
64 | return -EIO; | 52 | return -EIO; |
65 | 53 | ||
54 | if (blk_discard_rq(req)) | ||
55 | return tr->discard(dev, block, nsect); | ||
56 | |||
66 | switch(rq_data_dir(req)) { | 57 | switch(rq_data_dir(req)) { |
67 | case READ: | 58 | case READ: |
68 | for (; nsect > 0; nsect--, block++, buf += tr->blksize) | 59 | for (; nsect > 0; nsect--, block++, buf += tr->blksize) |
@@ -380,8 +371,8 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr) | |||
380 | tr->blkcore_priv->rq->queuedata = tr; | 371 | tr->blkcore_priv->rq->queuedata = tr; |
381 | blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize); | 372 | blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize); |
382 | if (tr->discard) | 373 | if (tr->discard) |
383 | blk_queue_set_discard(tr->blkcore_priv->rq, | 374 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, |
384 | blktrans_discard_request); | 375 | tr->blkcore_priv->rq); |
385 | 376 | ||
386 | tr->blkshift = ffs(tr->blksize) - 1; | 377 | tr->blkshift = ffs(tr->blksize) - 1; |
387 | 378 | ||
diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c index ee1601026fb0..c24e4e0367a2 100644 --- a/drivers/staging/dst/dcore.c +++ b/drivers/staging/dst/dcore.c | |||
@@ -102,7 +102,7 @@ static int dst_request(struct request_queue *q, struct bio *bio) | |||
102 | struct dst_node *n = q->queuedata; | 102 | struct dst_node *n = q->queuedata; |
103 | int err = -EIO; | 103 | int err = -EIO; |
104 | 104 | ||
105 | if (bio_empty_barrier(bio) && !q->prepare_discard_fn) { | 105 | if (bio_empty_barrier(bio) && !blk_queue_discard(q)) { |
106 | /* | 106 | /* |
107 | * This is a dirty^Wnice hack, but if we complete this | 107 | * This is a dirty^Wnice hack, but if we complete this |
108 | * operation with -EOPNOTSUPP like intended, XFS | 108 | * operation with -EOPNOTSUPP like intended, XFS |
@@ -249,6 +249,7 @@ void bio_free(struct bio *bio, struct bio_set *bs) | |||
249 | 249 | ||
250 | mempool_free(p, bs->bio_pool); | 250 | mempool_free(p, bs->bio_pool); |
251 | } | 251 | } |
252 | EXPORT_SYMBOL(bio_free); | ||
252 | 253 | ||
253 | void bio_init(struct bio *bio) | 254 | void bio_init(struct bio *bio) |
254 | { | 255 | { |
@@ -257,6 +258,7 @@ void bio_init(struct bio *bio) | |||
257 | bio->bi_comp_cpu = -1; | 258 | bio->bi_comp_cpu = -1; |
258 | atomic_set(&bio->bi_cnt, 1); | 259 | atomic_set(&bio->bi_cnt, 1); |
259 | } | 260 | } |
261 | EXPORT_SYMBOL(bio_init); | ||
260 | 262 | ||
261 | /** | 263 | /** |
262 | * bio_alloc_bioset - allocate a bio for I/O | 264 | * bio_alloc_bioset - allocate a bio for I/O |
@@ -311,6 +313,7 @@ err_free: | |||
311 | mempool_free(p, bs->bio_pool); | 313 | mempool_free(p, bs->bio_pool); |
312 | return NULL; | 314 | return NULL; |
313 | } | 315 | } |
316 | EXPORT_SYMBOL(bio_alloc_bioset); | ||
314 | 317 | ||
315 | static void bio_fs_destructor(struct bio *bio) | 318 | static void bio_fs_destructor(struct bio *bio) |
316 | { | 319 | { |
@@ -337,6 +340,7 @@ struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs) | |||
337 | 340 | ||
338 | return bio; | 341 | return bio; |
339 | } | 342 | } |
343 | EXPORT_SYMBOL(bio_alloc); | ||
340 | 344 | ||
341 | static void bio_kmalloc_destructor(struct bio *bio) | 345 | static void bio_kmalloc_destructor(struct bio *bio) |
342 | { | 346 | { |
@@ -380,6 +384,7 @@ struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs) | |||
380 | 384 | ||
381 | return bio; | 385 | return bio; |
382 | } | 386 | } |
387 | EXPORT_SYMBOL(bio_kmalloc); | ||
383 | 388 | ||
384 | void zero_fill_bio(struct bio *bio) | 389 | void zero_fill_bio(struct bio *bio) |
385 | { | 390 | { |
@@ -416,6 +421,7 @@ void bio_put(struct bio *bio) | |||
416 | bio->bi_destructor(bio); | 421 | bio->bi_destructor(bio); |
417 | } | 422 | } |
418 | } | 423 | } |
424 | EXPORT_SYMBOL(bio_put); | ||
419 | 425 | ||
420 | inline int bio_phys_segments(struct request_queue *q, struct bio *bio) | 426 | inline int bio_phys_segments(struct request_queue *q, struct bio *bio) |
421 | { | 427 | { |
@@ -424,6 +430,7 @@ inline int bio_phys_segments(struct request_queue *q, struct bio *bio) | |||
424 | 430 | ||
425 | return bio->bi_phys_segments; | 431 | return bio->bi_phys_segments; |
426 | } | 432 | } |
433 | EXPORT_SYMBOL(bio_phys_segments); | ||
427 | 434 | ||
428 | /** | 435 | /** |
429 | * __bio_clone - clone a bio | 436 | * __bio_clone - clone a bio |
@@ -451,6 +458,7 @@ void __bio_clone(struct bio *bio, struct bio *bio_src) | |||
451 | bio->bi_size = bio_src->bi_size; | 458 | bio->bi_size = bio_src->bi_size; |
452 | bio->bi_idx = bio_src->bi_idx; | 459 | bio->bi_idx = bio_src->bi_idx; |
453 | } | 460 | } |
461 | EXPORT_SYMBOL(__bio_clone); | ||
454 | 462 | ||
455 | /** | 463 | /** |
456 | * bio_clone - clone a bio | 464 | * bio_clone - clone a bio |
@@ -482,6 +490,7 @@ struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask) | |||
482 | 490 | ||
483 | return b; | 491 | return b; |
484 | } | 492 | } |
493 | EXPORT_SYMBOL(bio_clone); | ||
485 | 494 | ||
486 | /** | 495 | /** |
487 | * bio_get_nr_vecs - return approx number of vecs | 496 | * bio_get_nr_vecs - return approx number of vecs |
@@ -505,6 +514,7 @@ int bio_get_nr_vecs(struct block_device *bdev) | |||
505 | 514 | ||
506 | return nr_pages; | 515 | return nr_pages; |
507 | } | 516 | } |
517 | EXPORT_SYMBOL(bio_get_nr_vecs); | ||
508 | 518 | ||
509 | static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page | 519 | static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page |
510 | *page, unsigned int len, unsigned int offset, | 520 | *page, unsigned int len, unsigned int offset, |
@@ -635,6 +645,7 @@ int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page, | |||
635 | return __bio_add_page(q, bio, page, len, offset, | 645 | return __bio_add_page(q, bio, page, len, offset, |
636 | queue_max_hw_sectors(q)); | 646 | queue_max_hw_sectors(q)); |
637 | } | 647 | } |
648 | EXPORT_SYMBOL(bio_add_pc_page); | ||
638 | 649 | ||
639 | /** | 650 | /** |
640 | * bio_add_page - attempt to add page to bio | 651 | * bio_add_page - attempt to add page to bio |
@@ -655,6 +666,7 @@ int bio_add_page(struct bio *bio, struct page *page, unsigned int len, | |||
655 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); | 666 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); |
656 | return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q)); | 667 | return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q)); |
657 | } | 668 | } |
669 | EXPORT_SYMBOL(bio_add_page); | ||
658 | 670 | ||
659 | struct bio_map_data { | 671 | struct bio_map_data { |
660 | struct bio_vec *iovecs; | 672 | struct bio_vec *iovecs; |
@@ -776,6 +788,7 @@ int bio_uncopy_user(struct bio *bio) | |||
776 | bio_put(bio); | 788 | bio_put(bio); |
777 | return ret; | 789 | return ret; |
778 | } | 790 | } |
791 | EXPORT_SYMBOL(bio_uncopy_user); | ||
779 | 792 | ||
780 | /** | 793 | /** |
781 | * bio_copy_user_iov - copy user data to bio | 794 | * bio_copy_user_iov - copy user data to bio |
@@ -920,6 +933,7 @@ struct bio *bio_copy_user(struct request_queue *q, struct rq_map_data *map_data, | |||
920 | 933 | ||
921 | return bio_copy_user_iov(q, map_data, &iov, 1, write_to_vm, gfp_mask); | 934 | return bio_copy_user_iov(q, map_data, &iov, 1, write_to_vm, gfp_mask); |
922 | } | 935 | } |
936 | EXPORT_SYMBOL(bio_copy_user); | ||
923 | 937 | ||
924 | static struct bio *__bio_map_user_iov(struct request_queue *q, | 938 | static struct bio *__bio_map_user_iov(struct request_queue *q, |
925 | struct block_device *bdev, | 939 | struct block_device *bdev, |
@@ -1050,6 +1064,7 @@ struct bio *bio_map_user(struct request_queue *q, struct block_device *bdev, | |||
1050 | 1064 | ||
1051 | return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm, gfp_mask); | 1065 | return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm, gfp_mask); |
1052 | } | 1066 | } |
1067 | EXPORT_SYMBOL(bio_map_user); | ||
1053 | 1068 | ||
1054 | /** | 1069 | /** |
1055 | * bio_map_user_iov - map user sg_iovec table into bio | 1070 | * bio_map_user_iov - map user sg_iovec table into bio |
@@ -1117,13 +1132,13 @@ void bio_unmap_user(struct bio *bio) | |||
1117 | __bio_unmap_user(bio); | 1132 | __bio_unmap_user(bio); |
1118 | bio_put(bio); | 1133 | bio_put(bio); |
1119 | } | 1134 | } |
1135 | EXPORT_SYMBOL(bio_unmap_user); | ||
1120 | 1136 | ||
1121 | static void bio_map_kern_endio(struct bio *bio, int err) | 1137 | static void bio_map_kern_endio(struct bio *bio, int err) |
1122 | { | 1138 | { |
1123 | bio_put(bio); | 1139 | bio_put(bio); |
1124 | } | 1140 | } |
1125 | 1141 | ||
1126 | |||
1127 | static struct bio *__bio_map_kern(struct request_queue *q, void *data, | 1142 | static struct bio *__bio_map_kern(struct request_queue *q, void *data, |
1128 | unsigned int len, gfp_t gfp_mask) | 1143 | unsigned int len, gfp_t gfp_mask) |
1129 | { | 1144 | { |
@@ -1189,6 +1204,7 @@ struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len, | |||
1189 | bio_put(bio); | 1204 | bio_put(bio); |
1190 | return ERR_PTR(-EINVAL); | 1205 | return ERR_PTR(-EINVAL); |
1191 | } | 1206 | } |
1207 | EXPORT_SYMBOL(bio_map_kern); | ||
1192 | 1208 | ||
1193 | static void bio_copy_kern_endio(struct bio *bio, int err) | 1209 | static void bio_copy_kern_endio(struct bio *bio, int err) |
1194 | { | 1210 | { |
@@ -1250,6 +1266,7 @@ struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len, | |||
1250 | 1266 | ||
1251 | return bio; | 1267 | return bio; |
1252 | } | 1268 | } |
1269 | EXPORT_SYMBOL(bio_copy_kern); | ||
1253 | 1270 | ||
1254 | /* | 1271 | /* |
1255 | * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions | 1272 | * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions |
@@ -1400,6 +1417,7 @@ void bio_endio(struct bio *bio, int error) | |||
1400 | if (bio->bi_end_io) | 1417 | if (bio->bi_end_io) |
1401 | bio->bi_end_io(bio, error); | 1418 | bio->bi_end_io(bio, error); |
1402 | } | 1419 | } |
1420 | EXPORT_SYMBOL(bio_endio); | ||
1403 | 1421 | ||
1404 | void bio_pair_release(struct bio_pair *bp) | 1422 | void bio_pair_release(struct bio_pair *bp) |
1405 | { | 1423 | { |
@@ -1410,6 +1428,7 @@ void bio_pair_release(struct bio_pair *bp) | |||
1410 | mempool_free(bp, bp->bio2.bi_private); | 1428 | mempool_free(bp, bp->bio2.bi_private); |
1411 | } | 1429 | } |
1412 | } | 1430 | } |
1431 | EXPORT_SYMBOL(bio_pair_release); | ||
1413 | 1432 | ||
1414 | static void bio_pair_end_1(struct bio *bi, int err) | 1433 | static void bio_pair_end_1(struct bio *bi, int err) |
1415 | { | 1434 | { |
@@ -1477,6 +1496,7 @@ struct bio_pair *bio_split(struct bio *bi, int first_sectors) | |||
1477 | 1496 | ||
1478 | return bp; | 1497 | return bp; |
1479 | } | 1498 | } |
1499 | EXPORT_SYMBOL(bio_split); | ||
1480 | 1500 | ||
1481 | /** | 1501 | /** |
1482 | * bio_sector_offset - Find hardware sector offset in bio | 1502 | * bio_sector_offset - Find hardware sector offset in bio |
@@ -1547,6 +1567,7 @@ void bioset_free(struct bio_set *bs) | |||
1547 | 1567 | ||
1548 | kfree(bs); | 1568 | kfree(bs); |
1549 | } | 1569 | } |
1570 | EXPORT_SYMBOL(bioset_free); | ||
1550 | 1571 | ||
1551 | /** | 1572 | /** |
1552 | * bioset_create - Create a bio_set | 1573 | * bioset_create - Create a bio_set |
@@ -1592,6 +1613,7 @@ bad: | |||
1592 | bioset_free(bs); | 1613 | bioset_free(bs); |
1593 | return NULL; | 1614 | return NULL; |
1594 | } | 1615 | } |
1616 | EXPORT_SYMBOL(bioset_create); | ||
1595 | 1617 | ||
1596 | static void __init biovec_init_slabs(void) | 1618 | static void __init biovec_init_slabs(void) |
1597 | { | 1619 | { |
@@ -1636,29 +1658,4 @@ static int __init init_bio(void) | |||
1636 | 1658 | ||
1637 | return 0; | 1659 | return 0; |
1638 | } | 1660 | } |
1639 | |||
1640 | subsys_initcall(init_bio); | 1661 | subsys_initcall(init_bio); |
1641 | |||
1642 | EXPORT_SYMBOL(bio_alloc); | ||
1643 | EXPORT_SYMBOL(bio_kmalloc); | ||
1644 | EXPORT_SYMBOL(bio_put); | ||
1645 | EXPORT_SYMBOL(bio_free); | ||
1646 | EXPORT_SYMBOL(bio_endio); | ||
1647 | EXPORT_SYMBOL(bio_init); | ||
1648 | EXPORT_SYMBOL(__bio_clone); | ||
1649 | EXPORT_SYMBOL(bio_clone); | ||
1650 | EXPORT_SYMBOL(bio_phys_segments); | ||
1651 | EXPORT_SYMBOL(bio_add_page); | ||
1652 | EXPORT_SYMBOL(bio_add_pc_page); | ||
1653 | EXPORT_SYMBOL(bio_get_nr_vecs); | ||
1654 | EXPORT_SYMBOL(bio_map_user); | ||
1655 | EXPORT_SYMBOL(bio_unmap_user); | ||
1656 | EXPORT_SYMBOL(bio_map_kern); | ||
1657 | EXPORT_SYMBOL(bio_copy_kern); | ||
1658 | EXPORT_SYMBOL(bio_pair_release); | ||
1659 | EXPORT_SYMBOL(bio_split); | ||
1660 | EXPORT_SYMBOL(bio_copy_user); | ||
1661 | EXPORT_SYMBOL(bio_uncopy_user); | ||
1662 | EXPORT_SYMBOL(bioset_create); | ||
1663 | EXPORT_SYMBOL(bioset_free); | ||
1664 | EXPORT_SYMBOL(bio_alloc_bioset); | ||
diff --git a/fs/partitions/check.c b/fs/partitions/check.c index 7b685e10cbad..f38fee0311a7 100644 --- a/fs/partitions/check.c +++ b/fs/partitions/check.c | |||
@@ -248,19 +248,11 @@ ssize_t part_stat_show(struct device *dev, | |||
248 | part_stat_read(p, merges[WRITE]), | 248 | part_stat_read(p, merges[WRITE]), |
249 | (unsigned long long)part_stat_read(p, sectors[WRITE]), | 249 | (unsigned long long)part_stat_read(p, sectors[WRITE]), |
250 | jiffies_to_msecs(part_stat_read(p, ticks[WRITE])), | 250 | jiffies_to_msecs(part_stat_read(p, ticks[WRITE])), |
251 | part_in_flight(p), | 251 | p->in_flight, |
252 | jiffies_to_msecs(part_stat_read(p, io_ticks)), | 252 | jiffies_to_msecs(part_stat_read(p, io_ticks)), |
253 | jiffies_to_msecs(part_stat_read(p, time_in_queue))); | 253 | jiffies_to_msecs(part_stat_read(p, time_in_queue))); |
254 | } | 254 | } |
255 | 255 | ||
256 | ssize_t part_inflight_show(struct device *dev, | ||
257 | struct device_attribute *attr, char *buf) | ||
258 | { | ||
259 | struct hd_struct *p = dev_to_part(dev); | ||
260 | |||
261 | return sprintf(buf, "%8u %8u\n", p->in_flight[0], p->in_flight[1]); | ||
262 | } | ||
263 | |||
264 | #ifdef CONFIG_FAIL_MAKE_REQUEST | 256 | #ifdef CONFIG_FAIL_MAKE_REQUEST |
265 | ssize_t part_fail_show(struct device *dev, | 257 | ssize_t part_fail_show(struct device *dev, |
266 | struct device_attribute *attr, char *buf) | 258 | struct device_attribute *attr, char *buf) |
@@ -289,7 +281,6 @@ static DEVICE_ATTR(start, S_IRUGO, part_start_show, NULL); | |||
289 | static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); | 281 | static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); |
290 | static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL); | 282 | static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL); |
291 | static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); | 283 | static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); |
292 | static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL); | ||
293 | #ifdef CONFIG_FAIL_MAKE_REQUEST | 284 | #ifdef CONFIG_FAIL_MAKE_REQUEST |
294 | static struct device_attribute dev_attr_fail = | 285 | static struct device_attribute dev_attr_fail = |
295 | __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store); | 286 | __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store); |
@@ -301,7 +292,6 @@ static struct attribute *part_attrs[] = { | |||
301 | &dev_attr_size.attr, | 292 | &dev_attr_size.attr, |
302 | &dev_attr_alignment_offset.attr, | 293 | &dev_attr_alignment_offset.attr, |
303 | &dev_attr_stat.attr, | 294 | &dev_attr_stat.attr, |
304 | &dev_attr_inflight.attr, | ||
305 | #ifdef CONFIG_FAIL_MAKE_REQUEST | 295 | #ifdef CONFIG_FAIL_MAKE_REQUEST |
306 | &dev_attr_fail.attr, | 296 | &dev_attr_fail.attr, |
307 | #endif | 297 | #endif |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index e23a86cae5ac..25119041e034 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -82,7 +82,6 @@ enum rq_cmd_type_bits { | |||
82 | enum { | 82 | enum { |
83 | REQ_LB_OP_EJECT = 0x40, /* eject request */ | 83 | REQ_LB_OP_EJECT = 0x40, /* eject request */ |
84 | REQ_LB_OP_FLUSH = 0x41, /* flush request */ | 84 | REQ_LB_OP_FLUSH = 0x41, /* flush request */ |
85 | REQ_LB_OP_DISCARD = 0x42, /* discard sectors */ | ||
86 | }; | 85 | }; |
87 | 86 | ||
88 | /* | 87 | /* |
@@ -261,7 +260,6 @@ typedef void (request_fn_proc) (struct request_queue *q); | |||
261 | typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); | 260 | typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); |
262 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); | 261 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); |
263 | typedef void (unplug_fn) (struct request_queue *); | 262 | typedef void (unplug_fn) (struct request_queue *); |
264 | typedef int (prepare_discard_fn) (struct request_queue *, struct request *); | ||
265 | 263 | ||
266 | struct bio_vec; | 264 | struct bio_vec; |
267 | struct bvec_merge_data { | 265 | struct bvec_merge_data { |
@@ -313,6 +311,7 @@ struct queue_limits { | |||
313 | unsigned int alignment_offset; | 311 | unsigned int alignment_offset; |
314 | unsigned int io_min; | 312 | unsigned int io_min; |
315 | unsigned int io_opt; | 313 | unsigned int io_opt; |
314 | unsigned int max_discard_sectors; | ||
316 | 315 | ||
317 | unsigned short logical_block_size; | 316 | unsigned short logical_block_size; |
318 | unsigned short max_hw_segments; | 317 | unsigned short max_hw_segments; |
@@ -340,7 +339,6 @@ struct request_queue | |||
340 | make_request_fn *make_request_fn; | 339 | make_request_fn *make_request_fn; |
341 | prep_rq_fn *prep_rq_fn; | 340 | prep_rq_fn *prep_rq_fn; |
342 | unplug_fn *unplug_fn; | 341 | unplug_fn *unplug_fn; |
343 | prepare_discard_fn *prepare_discard_fn; | ||
344 | merge_bvec_fn *merge_bvec_fn; | 342 | merge_bvec_fn *merge_bvec_fn; |
345 | prepare_flush_fn *prepare_flush_fn; | 343 | prepare_flush_fn *prepare_flush_fn; |
346 | softirq_done_fn *softirq_done_fn; | 344 | softirq_done_fn *softirq_done_fn; |
@@ -460,6 +458,7 @@ struct request_queue | |||
460 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ | 458 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ |
461 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ | 459 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ |
462 | #define QUEUE_FLAG_CQ 16 /* hardware does queuing */ | 460 | #define QUEUE_FLAG_CQ 16 /* hardware does queuing */ |
461 | #define QUEUE_FLAG_DISCARD 17 /* supports DISCARD */ | ||
463 | 462 | ||
464 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | 463 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
465 | (1 << QUEUE_FLAG_CLUSTER) | \ | 464 | (1 << QUEUE_FLAG_CLUSTER) | \ |
@@ -591,6 +590,7 @@ enum { | |||
591 | #define blk_queue_flushing(q) ((q)->ordseq) | 590 | #define blk_queue_flushing(q) ((q)->ordseq) |
592 | #define blk_queue_stackable(q) \ | 591 | #define blk_queue_stackable(q) \ |
593 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) | 592 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) |
593 | #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) | ||
594 | 594 | ||
595 | #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) | 595 | #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) |
596 | #define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) | 596 | #define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) |
@@ -929,6 +929,8 @@ extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); | |||
929 | extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); | 929 | extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); |
930 | extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); | 930 | extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); |
931 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | 931 | extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); |
932 | extern void blk_queue_max_discard_sectors(struct request_queue *q, | ||
933 | unsigned int max_discard_sectors); | ||
932 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); | 934 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); |
933 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); | 935 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); |
934 | extern void blk_queue_alignment_offset(struct request_queue *q, | 936 | extern void blk_queue_alignment_offset(struct request_queue *q, |
@@ -955,7 +957,6 @@ extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); | |||
955 | extern void blk_queue_dma_alignment(struct request_queue *, int); | 957 | extern void blk_queue_dma_alignment(struct request_queue *, int); |
956 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); | 958 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); |
957 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); | 959 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); |
958 | extern void blk_queue_set_discard(struct request_queue *, prepare_discard_fn *); | ||
959 | extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); | 960 | extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); |
960 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); | 961 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); |
961 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); | 962 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); |
@@ -1080,25 +1081,37 @@ static inline unsigned int queue_physical_block_size(struct request_queue *q) | |||
1080 | return q->limits.physical_block_size; | 1081 | return q->limits.physical_block_size; |
1081 | } | 1082 | } |
1082 | 1083 | ||
1084 | static inline int bdev_physical_block_size(struct block_device *bdev) | ||
1085 | { | ||
1086 | return queue_physical_block_size(bdev_get_queue(bdev)); | ||
1087 | } | ||
1088 | |||
1083 | static inline unsigned int queue_io_min(struct request_queue *q) | 1089 | static inline unsigned int queue_io_min(struct request_queue *q) |
1084 | { | 1090 | { |
1085 | return q->limits.io_min; | 1091 | return q->limits.io_min; |
1086 | } | 1092 | } |
1087 | 1093 | ||
1094 | static inline int bdev_io_min(struct block_device *bdev) | ||
1095 | { | ||
1096 | return queue_io_min(bdev_get_queue(bdev)); | ||
1097 | } | ||
1098 | |||
1088 | static inline unsigned int queue_io_opt(struct request_queue *q) | 1099 | static inline unsigned int queue_io_opt(struct request_queue *q) |
1089 | { | 1100 | { |
1090 | return q->limits.io_opt; | 1101 | return q->limits.io_opt; |
1091 | } | 1102 | } |
1092 | 1103 | ||
1104 | static inline int bdev_io_opt(struct block_device *bdev) | ||
1105 | { | ||
1106 | return queue_io_opt(bdev_get_queue(bdev)); | ||
1107 | } | ||
1108 | |||
1093 | static inline int queue_alignment_offset(struct request_queue *q) | 1109 | static inline int queue_alignment_offset(struct request_queue *q) |
1094 | { | 1110 | { |
1095 | if (q && q->limits.misaligned) | 1111 | if (q->limits.misaligned) |
1096 | return -1; | 1112 | return -1; |
1097 | 1113 | ||
1098 | if (q && q->limits.alignment_offset) | 1114 | return q->limits.alignment_offset; |
1099 | return q->limits.alignment_offset; | ||
1100 | |||
1101 | return 0; | ||
1102 | } | 1115 | } |
1103 | 1116 | ||
1104 | static inline int queue_sector_alignment_offset(struct request_queue *q, | 1117 | static inline int queue_sector_alignment_offset(struct request_queue *q, |
@@ -1108,6 +1121,19 @@ static inline int queue_sector_alignment_offset(struct request_queue *q, | |||
1108 | & (q->limits.io_min - 1); | 1121 | & (q->limits.io_min - 1); |
1109 | } | 1122 | } |
1110 | 1123 | ||
1124 | static inline int bdev_alignment_offset(struct block_device *bdev) | ||
1125 | { | ||
1126 | struct request_queue *q = bdev_get_queue(bdev); | ||
1127 | |||
1128 | if (q->limits.misaligned) | ||
1129 | return -1; | ||
1130 | |||
1131 | if (bdev != bdev->bd_contains) | ||
1132 | return bdev->bd_part->alignment_offset; | ||
1133 | |||
1134 | return q->limits.alignment_offset; | ||
1135 | } | ||
1136 | |||
1111 | static inline int queue_dma_alignment(struct request_queue *q) | 1137 | static inline int queue_dma_alignment(struct request_queue *q) |
1112 | { | 1138 | { |
1113 | return q ? q->dma_alignment : 511; | 1139 | return q ? q->dma_alignment : 511; |
@@ -1146,7 +1172,11 @@ static inline void put_dev_sector(Sector p) | |||
1146 | } | 1172 | } |
1147 | 1173 | ||
1148 | struct work_struct; | 1174 | struct work_struct; |
1175 | struct delayed_work; | ||
1149 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); | 1176 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); |
1177 | int kblockd_schedule_delayed_work(struct request_queue *q, | ||
1178 | struct delayed_work *work, | ||
1179 | unsigned long delay); | ||
1150 | 1180 | ||
1151 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ | 1181 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ |
1152 | MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) | 1182 | MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) |
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 7e4350ece0f8..3b73b9992b26 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h | |||
@@ -198,6 +198,7 @@ extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, | |||
198 | char __user *arg); | 198 | char __user *arg); |
199 | extern int blk_trace_startstop(struct request_queue *q, int start); | 199 | extern int blk_trace_startstop(struct request_queue *q, int start); |
200 | extern int blk_trace_remove(struct request_queue *q); | 200 | extern int blk_trace_remove(struct request_queue *q); |
201 | extern void blk_trace_remove_sysfs(struct device *dev); | ||
201 | extern int blk_trace_init_sysfs(struct device *dev); | 202 | extern int blk_trace_init_sysfs(struct device *dev); |
202 | 203 | ||
203 | extern struct attribute_group blk_trace_attr_group; | 204 | extern struct attribute_group blk_trace_attr_group; |
@@ -211,6 +212,7 @@ extern struct attribute_group blk_trace_attr_group; | |||
211 | # define blk_trace_startstop(q, start) (-ENOTTY) | 212 | # define blk_trace_startstop(q, start) (-ENOTTY) |
212 | # define blk_trace_remove(q) (-ENOTTY) | 213 | # define blk_trace_remove(q) (-ENOTTY) |
213 | # define blk_add_trace_msg(q, fmt, ...) do { } while (0) | 214 | # define blk_add_trace_msg(q, fmt, ...) do { } while (0) |
215 | # define blk_trace_remove_sysfs(dev) do { } while (0) | ||
214 | static inline int blk_trace_init_sysfs(struct device *dev) | 216 | static inline int blk_trace_init_sysfs(struct device *dev) |
215 | { | 217 | { |
216 | return 0; | 218 | return 0; |
diff --git a/include/linux/fs.h b/include/linux/fs.h index a1e6899d4b6c..2620a8c63571 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -300,6 +300,10 @@ struct inodes_stat_t { | |||
300 | #define BLKTRACESTOP _IO(0x12,117) | 300 | #define BLKTRACESTOP _IO(0x12,117) |
301 | #define BLKTRACETEARDOWN _IO(0x12,118) | 301 | #define BLKTRACETEARDOWN _IO(0x12,118) |
302 | #define BLKDISCARD _IO(0x12,119) | 302 | #define BLKDISCARD _IO(0x12,119) |
303 | #define BLKIOMIN _IO(0x12,120) | ||
304 | #define BLKIOOPT _IO(0x12,121) | ||
305 | #define BLKALIGNOFF _IO(0x12,122) | ||
306 | #define BLKPBSZGET _IO(0x12,123) | ||
303 | 307 | ||
304 | #define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ | 308 | #define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ |
305 | #define FIBMAP _IO(0x00,1) /* bmap access */ | 309 | #define FIBMAP _IO(0x00,1) /* bmap access */ |
diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 297df45ffd0a..7beaa21b3880 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h | |||
@@ -98,7 +98,7 @@ struct hd_struct { | |||
98 | int make_it_fail; | 98 | int make_it_fail; |
99 | #endif | 99 | #endif |
100 | unsigned long stamp; | 100 | unsigned long stamp; |
101 | int in_flight[2]; | 101 | int in_flight; |
102 | #ifdef CONFIG_SMP | 102 | #ifdef CONFIG_SMP |
103 | struct disk_stats *dkstats; | 103 | struct disk_stats *dkstats; |
104 | #else | 104 | #else |
@@ -322,23 +322,18 @@ static inline void free_part_stats(struct hd_struct *part) | |||
322 | #define part_stat_sub(cpu, gendiskp, field, subnd) \ | 322 | #define part_stat_sub(cpu, gendiskp, field, subnd) \ |
323 | part_stat_add(cpu, gendiskp, field, -subnd) | 323 | part_stat_add(cpu, gendiskp, field, -subnd) |
324 | 324 | ||
325 | static inline void part_inc_in_flight(struct hd_struct *part, int rw) | 325 | static inline void part_inc_in_flight(struct hd_struct *part) |
326 | { | 326 | { |
327 | part->in_flight[rw]++; | 327 | part->in_flight++; |
328 | if (part->partno) | 328 | if (part->partno) |
329 | part_to_disk(part)->part0.in_flight[rw]++; | 329 | part_to_disk(part)->part0.in_flight++; |
330 | } | 330 | } |
331 | 331 | ||
332 | static inline void part_dec_in_flight(struct hd_struct *part, int rw) | 332 | static inline void part_dec_in_flight(struct hd_struct *part) |
333 | { | 333 | { |
334 | part->in_flight[rw]--; | 334 | part->in_flight--; |
335 | if (part->partno) | 335 | if (part->partno) |
336 | part_to_disk(part)->part0.in_flight[rw]--; | 336 | part_to_disk(part)->part0.in_flight--; |
337 | } | ||
338 | |||
339 | static inline int part_in_flight(struct hd_struct *part) | ||
340 | { | ||
341 | return part->in_flight[0] + part->in_flight[1]; | ||
342 | } | 337 | } |
343 | 338 | ||
344 | /* block/blk-core.c */ | 339 | /* block/blk-core.c */ |
@@ -551,8 +546,6 @@ extern ssize_t part_size_show(struct device *dev, | |||
551 | struct device_attribute *attr, char *buf); | 546 | struct device_attribute *attr, char *buf); |
552 | extern ssize_t part_stat_show(struct device *dev, | 547 | extern ssize_t part_stat_show(struct device *dev, |
553 | struct device_attribute *attr, char *buf); | 548 | struct device_attribute *attr, char *buf); |
554 | extern ssize_t part_inflight_show(struct device *dev, | ||
555 | struct device_attribute *attr, char *buf); | ||
556 | #ifdef CONFIG_FAIL_MAKE_REQUEST | 549 | #ifdef CONFIG_FAIL_MAKE_REQUEST |
557 | extern ssize_t part_fail_show(struct device *dev, | 550 | extern ssize_t part_fail_show(struct device *dev, |
558 | struct device_attribute *attr, char *buf); | 551 | struct device_attribute *attr, char *buf); |
diff --git a/include/trace/events/block.h b/include/trace/events/block.h index d86af94691c2..00405b5f624a 100644 --- a/include/trace/events/block.h +++ b/include/trace/events/block.h | |||
@@ -488,6 +488,39 @@ TRACE_EVENT(block_remap, | |||
488 | (unsigned long long)__entry->old_sector) | 488 | (unsigned long long)__entry->old_sector) |
489 | ); | 489 | ); |
490 | 490 | ||
491 | TRACE_EVENT(block_rq_remap, | ||
492 | |||
493 | TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev, | ||
494 | sector_t from), | ||
495 | |||
496 | TP_ARGS(q, rq, dev, from), | ||
497 | |||
498 | TP_STRUCT__entry( | ||
499 | __field( dev_t, dev ) | ||
500 | __field( sector_t, sector ) | ||
501 | __field( unsigned int, nr_sector ) | ||
502 | __field( dev_t, old_dev ) | ||
503 | __field( sector_t, old_sector ) | ||
504 | __array( char, rwbs, 6 ) | ||
505 | ), | ||
506 | |||
507 | TP_fast_assign( | ||
508 | __entry->dev = disk_devt(rq->rq_disk); | ||
509 | __entry->sector = blk_rq_pos(rq); | ||
510 | __entry->nr_sector = blk_rq_sectors(rq); | ||
511 | __entry->old_dev = dev; | ||
512 | __entry->old_sector = from; | ||
513 | blk_fill_rwbs_rq(__entry->rwbs, rq); | ||
514 | ), | ||
515 | |||
516 | TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", | ||
517 | MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, | ||
518 | (unsigned long long)__entry->sector, | ||
519 | __entry->nr_sector, | ||
520 | MAJOR(__entry->old_dev), MINOR(__entry->old_dev), | ||
521 | (unsigned long long)__entry->old_sector) | ||
522 | ); | ||
523 | |||
491 | #endif /* _TRACE_BLOCK_H */ | 524 | #endif /* _TRACE_BLOCK_H */ |
492 | 525 | ||
493 | /* This part must be outside protection */ | 526 | /* This part must be outside protection */ |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index 3eb159c277c8..d9d6206e0b14 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
@@ -856,6 +856,37 @@ static void blk_add_trace_remap(struct request_queue *q, struct bio *bio, | |||
856 | } | 856 | } |
857 | 857 | ||
858 | /** | 858 | /** |
859 | * blk_add_trace_rq_remap - Add a trace for a request-remap operation | ||
860 | * @q: queue the io is for | ||
861 | * @rq: the source request | ||
862 | * @dev: target device | ||
863 | * @from: source sector | ||
864 | * | ||
865 | * Description: | ||
866 | * Device mapper remaps request to other devices. | ||
867 | * Add a trace for that action. | ||
868 | * | ||
869 | **/ | ||
870 | static void blk_add_trace_rq_remap(struct request_queue *q, | ||
871 | struct request *rq, dev_t dev, | ||
872 | sector_t from) | ||
873 | { | ||
874 | struct blk_trace *bt = q->blk_trace; | ||
875 | struct blk_io_trace_remap r; | ||
876 | |||
877 | if (likely(!bt)) | ||
878 | return; | ||
879 | |||
880 | r.device_from = cpu_to_be32(dev); | ||
881 | r.device_to = cpu_to_be32(disk_devt(rq->rq_disk)); | ||
882 | r.sector_from = cpu_to_be64(from); | ||
883 | |||
884 | __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), | ||
885 | rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors, | ||
886 | sizeof(r), &r); | ||
887 | } | ||
888 | |||
889 | /** | ||
859 | * blk_add_driver_data - Add binary message with driver-specific data | 890 | * blk_add_driver_data - Add binary message with driver-specific data |
860 | * @q: queue the io is for | 891 | * @q: queue the io is for |
861 | * @rq: io request | 892 | * @rq: io request |
@@ -922,10 +953,13 @@ static void blk_register_tracepoints(void) | |||
922 | WARN_ON(ret); | 953 | WARN_ON(ret); |
923 | ret = register_trace_block_remap(blk_add_trace_remap); | 954 | ret = register_trace_block_remap(blk_add_trace_remap); |
924 | WARN_ON(ret); | 955 | WARN_ON(ret); |
956 | ret = register_trace_block_rq_remap(blk_add_trace_rq_remap); | ||
957 | WARN_ON(ret); | ||
925 | } | 958 | } |
926 | 959 | ||
927 | static void blk_unregister_tracepoints(void) | 960 | static void blk_unregister_tracepoints(void) |
928 | { | 961 | { |
962 | unregister_trace_block_rq_remap(blk_add_trace_rq_remap); | ||
929 | unregister_trace_block_remap(blk_add_trace_remap); | 963 | unregister_trace_block_remap(blk_add_trace_remap); |
930 | unregister_trace_block_split(blk_add_trace_split); | 964 | unregister_trace_block_split(blk_add_trace_split); |
931 | unregister_trace_block_unplug_io(blk_add_trace_unplug_io); | 965 | unregister_trace_block_unplug_io(blk_add_trace_unplug_io); |
@@ -1657,6 +1691,11 @@ int blk_trace_init_sysfs(struct device *dev) | |||
1657 | return sysfs_create_group(&dev->kobj, &blk_trace_attr_group); | 1691 | return sysfs_create_group(&dev->kobj, &blk_trace_attr_group); |
1658 | } | 1692 | } |
1659 | 1693 | ||
1694 | void blk_trace_remove_sysfs(struct device *dev) | ||
1695 | { | ||
1696 | sysfs_remove_group(&dev->kobj, &blk_trace_attr_group); | ||
1697 | } | ||
1698 | |||
1660 | #endif /* CONFIG_BLK_DEV_IO_TRACE */ | 1699 | #endif /* CONFIG_BLK_DEV_IO_TRACE */ |
1661 | 1700 | ||
1662 | #ifdef CONFIG_EVENT_TRACING | 1701 | #ifdef CONFIG_EVENT_TRACING |
diff --git a/mm/swapfile.c b/mm/swapfile.c index 4de7f02f820b..a1bc6b9af9a2 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -1974,12 +1974,14 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) | |||
1974 | goto bad_swap; | 1974 | goto bad_swap; |
1975 | } | 1975 | } |
1976 | 1976 | ||
1977 | if (blk_queue_nonrot(bdev_get_queue(p->bdev))) { | 1977 | if (p->bdev) { |
1978 | p->flags |= SWP_SOLIDSTATE; | 1978 | if (blk_queue_nonrot(bdev_get_queue(p->bdev))) { |
1979 | p->cluster_next = 1 + (random32() % p->highest_bit); | 1979 | p->flags |= SWP_SOLIDSTATE; |
1980 | p->cluster_next = 1 + (random32() % p->highest_bit); | ||
1981 | } | ||
1982 | if (discard_swap(p) == 0) | ||
1983 | p->flags |= SWP_DISCARDABLE; | ||
1980 | } | 1984 | } |
1981 | if (discard_swap(p) == 0) | ||
1982 | p->flags |= SWP_DISCARDABLE; | ||
1983 | 1985 | ||
1984 | mutex_lock(&swapon_mutex); | 1986 | mutex_lock(&swapon_mutex); |
1985 | spin_lock(&swap_lock); | 1987 | spin_lock(&swap_lock); |