aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorDavid Vrabel <david.vrabel@csr.com>2008-12-08 11:18:47 -0500
committerDavid Vrabel <david.vrabel@csr.com>2008-12-08 11:18:47 -0500
commitc35fa3ea1ae8198bd65c2c6e59d9ebd68c115a59 (patch)
tree878768b69df25459b448aa890352342c4a3c6e2d /block
parentdcc7461eef7341e84e2f7274f904ce01a43b2506 (diff)
parent218d11a8b071b23b76c484fd5f72a4fe3306801e (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6 into for-upstream
Diffstat (limited to 'block')
-rw-r--r--block/blk-barrier.c4
-rw-r--r--block/blk-core.c26
-rw-r--r--block/blk-map.c2
-rw-r--r--block/blk-settings.c4
-rw-r--r--block/bsg.c2
-rw-r--r--block/compat_ioctl.c31
-rw-r--r--block/elevator.c7
-rw-r--r--block/genhd.c2
-rw-r--r--block/scsi_ioctl.c2
9 files changed, 66 insertions, 14 deletions
diff --git a/block/blk-barrier.c b/block/blk-barrier.c
index 5c99ff8d2db8..6e72d661ae42 100644
--- a/block/blk-barrier.c
+++ b/block/blk-barrier.c
@@ -161,7 +161,7 @@ static inline struct request *start_ordered(struct request_queue *q,
161 /* 161 /*
162 * Prep proxy barrier request. 162 * Prep proxy barrier request.
163 */ 163 */
164 blkdev_dequeue_request(rq); 164 elv_dequeue_request(q, rq);
165 q->orig_bar_rq = rq; 165 q->orig_bar_rq = rq;
166 rq = &q->bar_rq; 166 rq = &q->bar_rq;
167 blk_rq_init(q, rq); 167 blk_rq_init(q, rq);
@@ -219,7 +219,7 @@ int blk_do_ordered(struct request_queue *q, struct request **rqp)
219 * This can happen when the queue switches to 219 * This can happen when the queue switches to
220 * ORDERED_NONE while this request is on it. 220 * ORDERED_NONE while this request is on it.
221 */ 221 */
222 blkdev_dequeue_request(rq); 222 elv_dequeue_request(q, rq);
223 if (__blk_end_request(rq, -EOPNOTSUPP, 223 if (__blk_end_request(rq, -EOPNOTSUPP,
224 blk_rq_bytes(rq))) 224 blk_rq_bytes(rq)))
225 BUG(); 225 BUG();
diff --git a/block/blk-core.c b/block/blk-core.c
index 10e8a64a5a5b..c36aa98fafa3 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -592,7 +592,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
592 1 << QUEUE_FLAG_STACKABLE); 592 1 << QUEUE_FLAG_STACKABLE);
593 q->queue_lock = lock; 593 q->queue_lock = lock;
594 594
595 blk_queue_segment_boundary(q, 0xffffffff); 595 blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);
596 596
597 blk_queue_make_request(q, __make_request); 597 blk_queue_make_request(q, __make_request);
598 blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE); 598 blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
@@ -1637,6 +1637,28 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
1637EXPORT_SYMBOL_GPL(blk_insert_cloned_request); 1637EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
1638 1638
1639/** 1639/**
1640 * blkdev_dequeue_request - dequeue request and start timeout timer
1641 * @req: request to dequeue
1642 *
1643 * Dequeue @req and start timeout timer on it. This hands off the
1644 * request to the driver.
1645 *
1646 * Block internal functions which don't want to start timer should
1647 * call elv_dequeue_request().
1648 */
1649void blkdev_dequeue_request(struct request *req)
1650{
1651 elv_dequeue_request(req->q, req);
1652
1653 /*
1654 * We are now handing the request to the hardware, add the
1655 * timeout handler.
1656 */
1657 blk_add_timer(req);
1658}
1659EXPORT_SYMBOL(blkdev_dequeue_request);
1660
1661/**
1640 * __end_that_request_first - end I/O on a request 1662 * __end_that_request_first - end I/O on a request
1641 * @req: the request being processed 1663 * @req: the request being processed
1642 * @error: %0 for success, < %0 for error 1664 * @error: %0 for success, < %0 for error
@@ -1774,7 +1796,7 @@ static void end_that_request_last(struct request *req, int error)
1774 blk_queue_end_tag(req->q, req); 1796 blk_queue_end_tag(req->q, req);
1775 1797
1776 if (blk_queued_rq(req)) 1798 if (blk_queued_rq(req))
1777 blkdev_dequeue_request(req); 1799 elv_dequeue_request(req->q, req);
1778 1800
1779 if (unlikely(laptop_mode) && blk_fs_request(req)) 1801 if (unlikely(laptop_mode) && blk_fs_request(req))
1780 laptop_io_completion(); 1802 laptop_io_completion();
diff --git a/block/blk-map.c b/block/blk-map.c
index 0f4b4b881811..2990447f45e9 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -224,7 +224,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
224 */ 224 */
225 bio_get(bio); 225 bio_get(bio);
226 bio_endio(bio, 0); 226 bio_endio(bio, 0);
227 bio_unmap_user(bio); 227 __blk_rq_unmap_user(bio);
228 return -EINVAL; 228 return -EINVAL;
229 } 229 }
230 230
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 41392fbe19ff..afa55e14e278 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -125,6 +125,9 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
125 q->nr_requests = BLKDEV_MAX_RQ; 125 q->nr_requests = BLKDEV_MAX_RQ;
126 blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); 126 blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS);
127 blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); 127 blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS);
128 blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK);
129 blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE);
130
128 q->make_request_fn = mfn; 131 q->make_request_fn = mfn;
129 q->backing_dev_info.ra_pages = 132 q->backing_dev_info.ra_pages =
130 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 133 (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE;
@@ -314,6 +317,7 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
314 /* zero is "infinity" */ 317 /* zero is "infinity" */
315 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); 318 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
316 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); 319 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
320 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask);
317 321
318 t->max_phys_segments = min(t->max_phys_segments, b->max_phys_segments); 322 t->max_phys_segments = min(t->max_phys_segments, b->max_phys_segments);
319 t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments); 323 t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments);
diff --git a/block/bsg.c b/block/bsg.c
index e8bd2475682a..e73e50daf3d0 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -202,6 +202,8 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
202 rq->timeout = q->sg_timeout; 202 rq->timeout = q->sg_timeout;
203 if (!rq->timeout) 203 if (!rq->timeout)
204 rq->timeout = BLK_DEFAULT_SG_TIMEOUT; 204 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
205 if (rq->timeout < BLK_MIN_SG_TIMEOUT)
206 rq->timeout = BLK_MIN_SG_TIMEOUT;
205 207
206 return 0; 208 return 0;
207} 209}
diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
index 3d3e7a46f38c..67eb93cff699 100644
--- a/block/compat_ioctl.c
+++ b/block/compat_ioctl.c
@@ -677,6 +677,29 @@ static int compat_blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode,
677 case DVD_WRITE_STRUCT: 677 case DVD_WRITE_STRUCT:
678 case DVD_AUTH: 678 case DVD_AUTH:
679 arg = (unsigned long)compat_ptr(arg); 679 arg = (unsigned long)compat_ptr(arg);
680 /* These intepret arg as an unsigned long, not as a pointer,
681 * so we must not do compat_ptr() conversion. */
682 case HDIO_SET_MULTCOUNT:
683 case HDIO_SET_UNMASKINTR:
684 case HDIO_SET_KEEPSETTINGS:
685 case HDIO_SET_32BIT:
686 case HDIO_SET_NOWERR:
687 case HDIO_SET_DMA:
688 case HDIO_SET_PIO_MODE:
689 case HDIO_SET_NICE:
690 case HDIO_SET_WCACHE:
691 case HDIO_SET_ACOUSTIC:
692 case HDIO_SET_BUSSTATE:
693 case HDIO_SET_ADDRESS:
694 case CDROMEJECT_SW:
695 case CDROM_SET_OPTIONS:
696 case CDROM_CLEAR_OPTIONS:
697 case CDROM_SELECT_SPEED:
698 case CDROM_SELECT_DISC:
699 case CDROM_MEDIA_CHANGED:
700 case CDROM_DRIVE_STATUS:
701 case CDROM_LOCKDOOR:
702 case CDROM_DEBUG:
680 break; 703 break;
681 default: 704 default:
682 /* unknown ioctl number */ 705 /* unknown ioctl number */
@@ -699,8 +722,14 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
699 struct backing_dev_info *bdi; 722 struct backing_dev_info *bdi;
700 loff_t size; 723 loff_t size;
701 724
725 /*
726 * O_NDELAY can be altered using fcntl(.., F_SETFL, ..), so we have
727 * to updated it before every ioctl.
728 */
702 if (file->f_flags & O_NDELAY) 729 if (file->f_flags & O_NDELAY)
703 mode |= FMODE_NDELAY_NOW; 730 mode |= FMODE_NDELAY;
731 else
732 mode &= ~FMODE_NDELAY;
704 733
705 switch (cmd) { 734 switch (cmd) {
706 case HDIO_GETGEO: 735 case HDIO_GETGEO:
diff --git a/block/elevator.c b/block/elevator.c
index 9ac82dde99dd..a6951f76ba0c 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -844,14 +844,7 @@ void elv_dequeue_request(struct request_queue *q, struct request *rq)
844 */ 844 */
845 if (blk_account_rq(rq)) 845 if (blk_account_rq(rq))
846 q->in_flight++; 846 q->in_flight++;
847
848 /*
849 * We are now handing the request to the hardware, add the
850 * timeout handler.
851 */
852 blk_add_timer(rq);
853} 847}
854EXPORT_SYMBOL(elv_dequeue_request);
855 848
856int elv_queue_empty(struct request_queue *q) 849int elv_queue_empty(struct request_queue *q)
857{ 850{
diff --git a/block/genhd.c b/block/genhd.c
index 27549e470da5..2f7feda61e35 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -1102,6 +1102,7 @@ struct gendisk *alloc_disk_node(int minors, int node_id)
1102 kfree(disk); 1102 kfree(disk);
1103 return NULL; 1103 return NULL;
1104 } 1104 }
1105 disk->node_id = node_id;
1105 if (disk_expand_part_tbl(disk, 0)) { 1106 if (disk_expand_part_tbl(disk, 0)) {
1106 free_part_stats(&disk->part0); 1107 free_part_stats(&disk->part0);
1107 kfree(disk); 1108 kfree(disk);
@@ -1116,7 +1117,6 @@ struct gendisk *alloc_disk_node(int minors, int node_id)
1116 device_initialize(disk_to_dev(disk)); 1117 device_initialize(disk_to_dev(disk));
1117 INIT_WORK(&disk->async_notify, 1118 INIT_WORK(&disk->async_notify,
1118 media_change_notify_thread); 1119 media_change_notify_thread);
1119 disk->node_id = node_id;
1120 } 1120 }
1121 return disk; 1121 return disk;
1122} 1122}
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index 5963cf91a3a0..d0bb92cbefb9 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -208,6 +208,8 @@ static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
208 rq->timeout = q->sg_timeout; 208 rq->timeout = q->sg_timeout;
209 if (!rq->timeout) 209 if (!rq->timeout)
210 rq->timeout = BLK_DEFAULT_SG_TIMEOUT; 210 rq->timeout = BLK_DEFAULT_SG_TIMEOUT;
211 if (rq->timeout < BLK_MIN_SG_TIMEOUT)
212 rq->timeout = BLK_MIN_SG_TIMEOUT;
211 213
212 return 0; 214 return 0;
213} 215}