aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-06-03 19:11:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-06-03 19:11:26 -0400
commit4f1ba49efafccbc73017f824efa2505c81b247cd (patch)
treedb072bbccffd1f1c6b1269ac7a752cb30af3a726
parent39b4a46f19295b4876fba6655f27d67232dc6a1f (diff)
parente3a57b3ccf5c04934ac43b5b80e32ba51b817288 (diff)
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
* 'for-linus' of git://git.kernel.dk/linux-block: block: Use hlist_entry() for io_context.cic_list.first cfq-iosched: Remove bogus check in queue_fail path xen/blkback: potential null dereference in error handling xen/blkback: don't call vbd_size() if bd_disk is NULL block: blkdev_get() should access ->bd_disk only after success CFQ: Fix typo and remove unnecessary semicolon block: remove unwanted semicolons Revert "block: Remove extra discard_alignment from hd_struct." nbd: adjust 'max_part' according to part_shift nbd: limit module parameters to a sane value nbd: pass MSG_* flags to kernel_recvmsg() block: improve the bio_add_page() and bio_add_pc_page() descriptions
-rw-r--r--block/blk-ioc.c4
-rw-r--r--block/cfq-iosched.c11
-rw-r--r--drivers/block/nbd.c22
-rw-r--r--drivers/block/xen-blkback/blkback.c10
-rw-r--r--drivers/block/xen-blkback/xenbus.c3
-rw-r--r--fs/block_dev.c4
-rw-r--r--fs/partitions/check.c10
-rw-r--r--include/linux/blkdev.h4
-rw-r--r--include/linux/genhd.h1
9 files changed, 41 insertions, 28 deletions
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index c898049dafd5..342eae9b0d3c 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -21,7 +21,7 @@ static void cfq_dtor(struct io_context *ioc)
21 if (!hlist_empty(&ioc->cic_list)) { 21 if (!hlist_empty(&ioc->cic_list)) {
22 struct cfq_io_context *cic; 22 struct cfq_io_context *cic;
23 23
24 cic = list_entry(ioc->cic_list.first, struct cfq_io_context, 24 cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context,
25 cic_list); 25 cic_list);
26 cic->dtor(ioc); 26 cic->dtor(ioc);
27 } 27 }
@@ -57,7 +57,7 @@ static void cfq_exit(struct io_context *ioc)
57 if (!hlist_empty(&ioc->cic_list)) { 57 if (!hlist_empty(&ioc->cic_list)) {
58 struct cfq_io_context *cic; 58 struct cfq_io_context *cic;
59 59
60 cic = list_entry(ioc->cic_list.first, struct cfq_io_context, 60 cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context,
61 cic_list); 61 cic_list);
62 cic->exit(ioc); 62 cic->exit(ioc);
63 } 63 }
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 7c52d6888924..3c7b537bf908 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -185,7 +185,7 @@ struct cfq_group {
185 int nr_cfqq; 185 int nr_cfqq;
186 186
187 /* 187 /*
188 * Per group busy queus average. Useful for workload slice calc. We 188 * Per group busy queues average. Useful for workload slice calc. We
189 * create the array for each prio class but at run time it is used 189 * create the array for each prio class but at run time it is used
190 * only for RT and BE class and slot for IDLE class remains unused. 190 * only for RT and BE class and slot for IDLE class remains unused.
191 * This is primarily done to avoid confusion and a gcc warning. 191 * This is primarily done to avoid confusion and a gcc warning.
@@ -369,16 +369,16 @@ CFQ_CFQQ_FNS(wait_busy);
369#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ 369#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
370 blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \ 370 blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
371 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \ 371 cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
372 blkg_path(&(cfqq)->cfqg->blkg), ##args); 372 blkg_path(&(cfqq)->cfqg->blkg), ##args)
373 373
374#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) \ 374#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) \
375 blk_add_trace_msg((cfqd)->queue, "%s " fmt, \ 375 blk_add_trace_msg((cfqd)->queue, "%s " fmt, \
376 blkg_path(&(cfqg)->blkg), ##args); \ 376 blkg_path(&(cfqg)->blkg), ##args) \
377 377
378#else 378#else
379#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \ 379#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
380 blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args) 380 blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
381#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0); 381#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0)
382#endif 382#endif
383#define cfq_log(cfqd, fmt, args...) \ 383#define cfq_log(cfqd, fmt, args...) \
384 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args) 384 blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
@@ -3786,9 +3786,6 @@ new_queue:
3786 return 0; 3786 return 0;
3787 3787
3788queue_fail: 3788queue_fail:
3789 if (cic)
3790 put_io_context(cic->ioc);
3791
3792 cfq_schedule_dispatch(cfqd); 3789 cfq_schedule_dispatch(cfqd);
3793 spin_unlock_irqrestore(q->queue_lock, flags); 3790 spin_unlock_irqrestore(q->queue_lock, flags);
3794 cfq_log(cfqd, "set_request fail"); 3791 cfq_log(cfqd, "set_request fail");
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index e6fc716aca45..f533f3375e24 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -192,7 +192,8 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
192 if (lo->xmit_timeout) 192 if (lo->xmit_timeout)
193 del_timer_sync(&ti); 193 del_timer_sync(&ti);
194 } else 194 } else
195 result = kernel_recvmsg(sock, &msg, &iov, 1, size, 0); 195 result = kernel_recvmsg(sock, &msg, &iov, 1, size,
196 msg.msg_flags);
196 197
197 if (signal_pending(current)) { 198 if (signal_pending(current)) {
198 siginfo_t info; 199 siginfo_t info;
@@ -753,9 +754,26 @@ static int __init nbd_init(void)
753 return -ENOMEM; 754 return -ENOMEM;
754 755
755 part_shift = 0; 756 part_shift = 0;
756 if (max_part > 0) 757 if (max_part > 0) {
757 part_shift = fls(max_part); 758 part_shift = fls(max_part);
758 759
760 /*
761 * Adjust max_part according to part_shift as it is exported
762 * to user space so that user can know the max number of
763 * partition kernel should be able to manage.
764 *
765 * Note that -1 is required because partition 0 is reserved
766 * for the whole disk.
767 */
768 max_part = (1UL << part_shift) - 1;
769 }
770
771 if ((1UL << part_shift) > DISK_MAX_PARTS)
772 return -EINVAL;
773
774 if (nbds_max > 1UL << (MINORBITS - part_shift))
775 return -EINVAL;
776
759 for (i = 0; i < nbds_max; i++) { 777 for (i = 0; i < nbds_max; i++) {
760 struct gendisk *disk = alloc_disk(1 << part_shift); 778 struct gendisk *disk = alloc_disk(1 << part_shift);
761 if (!disk) 779 if (!disk)
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index c73910cc28c9..5cf2993a8338 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -809,11 +809,13 @@ static int __init xen_blkif_init(void)
809 failed_init: 809 failed_init:
810 kfree(blkbk->pending_reqs); 810 kfree(blkbk->pending_reqs);
811 kfree(blkbk->pending_grant_handles); 811 kfree(blkbk->pending_grant_handles);
812 for (i = 0; i < mmap_pages; i++) { 812 if (blkbk->pending_pages) {
813 if (blkbk->pending_pages[i]) 813 for (i = 0; i < mmap_pages; i++) {
814 __free_page(blkbk->pending_pages[i]); 814 if (blkbk->pending_pages[i])
815 __free_page(blkbk->pending_pages[i]);
816 }
817 kfree(blkbk->pending_pages);
815 } 818 }
816 kfree(blkbk->pending_pages);
817 kfree(blkbk); 819 kfree(blkbk);
818 blkbk = NULL; 820 blkbk = NULL;
819 return rc; 821 return rc;
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 34570823355b..6cc0db1bf522 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -357,14 +357,13 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
357 } 357 }
358 358
359 vbd->bdev = bdev; 359 vbd->bdev = bdev;
360 vbd->size = vbd_sz(vbd);
361
362 if (vbd->bdev->bd_disk == NULL) { 360 if (vbd->bdev->bd_disk == NULL) {
363 DPRINTK("xen_vbd_create: device %08x doesn't exist.\n", 361 DPRINTK("xen_vbd_create: device %08x doesn't exist.\n",
364 vbd->pdevice); 362 vbd->pdevice);
365 xen_vbd_free(vbd); 363 xen_vbd_free(vbd);
366 return -ENOENT; 364 return -ENOENT;
367 } 365 }
366 vbd->size = vbd_sz(vbd);
368 367
369 if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom) 368 if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom)
370 vbd->type |= VDISK_CDROM; 369 vbd->type |= VDISK_CDROM;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 1f2b19978333..1a2421f908f0 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1272,8 +1272,8 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
1272 * individual writeable reference is too fragile given the 1272 * individual writeable reference is too fragile given the
1273 * way @mode is used in blkdev_get/put(). 1273 * way @mode is used in blkdev_get/put().
1274 */ 1274 */
1275 if ((disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE) && 1275 if (!res && (mode & FMODE_WRITE) && !bdev->bd_write_holder &&
1276 !res && (mode & FMODE_WRITE) && !bdev->bd_write_holder) { 1276 (disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE)) {
1277 bdev->bd_write_holder = true; 1277 bdev->bd_write_holder = true;
1278 disk_block_events(disk); 1278 disk_block_events(disk);
1279 } 1279 }
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index f82e762eeca2..d545e97d99c3 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -255,13 +255,7 @@ ssize_t part_discard_alignment_show(struct device *dev,
255 struct device_attribute *attr, char *buf) 255 struct device_attribute *attr, char *buf)
256{ 256{
257 struct hd_struct *p = dev_to_part(dev); 257 struct hd_struct *p = dev_to_part(dev);
258 struct gendisk *disk = dev_to_disk(dev); 258 return sprintf(buf, "%u\n", p->discard_alignment);
259 unsigned int alignment = 0;
260
261 if (disk->queue)
262 alignment = queue_limit_discard_alignment(&disk->queue->limits,
263 p->start_sect);
264 return sprintf(buf, "%u\n", alignment);
265} 259}
266 260
267ssize_t part_stat_show(struct device *dev, 261ssize_t part_stat_show(struct device *dev,
@@ -455,6 +449,8 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
455 p->start_sect = start; 449 p->start_sect = start;
456 p->alignment_offset = 450 p->alignment_offset =
457 queue_limit_alignment_offset(&disk->queue->limits, start); 451 queue_limit_alignment_offset(&disk->queue->limits, start);
452 p->discard_alignment =
453 queue_limit_discard_alignment(&disk->queue->limits, start);
458 p->nr_sects = len; 454 p->nr_sects = len;
459 p->partno = partno; 455 p->partno = partno;
460 p->policy = get_disk_ro(disk); 456 p->policy = get_disk_ro(disk);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index ae9091a68480..1a23722e8878 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1282,8 +1282,8 @@ queue_max_integrity_segments(struct request_queue *q)
1282#define blk_get_integrity(a) (0) 1282#define blk_get_integrity(a) (0)
1283#define blk_integrity_compare(a, b) (0) 1283#define blk_integrity_compare(a, b) (0)
1284#define blk_integrity_register(a, b) (0) 1284#define blk_integrity_register(a, b) (0)
1285#define blk_integrity_unregister(a) do { } while (0); 1285#define blk_integrity_unregister(a) do { } while (0)
1286#define blk_queue_max_integrity_segments(a, b) do { } while (0); 1286#define blk_queue_max_integrity_segments(a, b) do { } while (0)
1287#define queue_max_integrity_segments(a) (0) 1287#define queue_max_integrity_segments(a) (0)
1288#define blk_integrity_merge_rq(a, b, c) (0) 1288#define blk_integrity_merge_rq(a, b, c) (0)
1289#define blk_integrity_merge_bio(a, b, c) (0) 1289#define blk_integrity_merge_bio(a, b, c) (0)
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index b78956b3c2e7..300d7582006e 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -100,6 +100,7 @@ struct hd_struct {
100 sector_t start_sect; 100 sector_t start_sect;
101 sector_t nr_sects; 101 sector_t nr_sects;
102 sector_t alignment_offset; 102 sector_t alignment_offset;
103 unsigned int discard_alignment;
103 struct device __dev; 104 struct device __dev;
104 struct kobject *holder_dir; 105 struct kobject *holder_dir;
105 int policy, partno; 106 int policy, partno;