aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorLen Brown <len.brown@intel.com>2006-01-27 17:18:29 -0500
committerLen Brown <len.brown@intel.com>2006-01-27 17:18:29 -0500
commit292dd876ee765c478b27c93cc51e93a558ed58bf (patch)
tree5b740e93253295baee2a9c414a6c66d03d44a9ef /block
parentd4ec6c7cc9a15a7a529719bc3b84f46812f9842e (diff)
parent9fdb62af92c741addbea15545f214a6e89460865 (diff)
Pull release into acpica branch
Diffstat (limited to 'block')
-rw-r--r--block/elevator.c34
-rw-r--r--block/genhd.c106
-rw-r--r--block/ioctl.c24
-rw-r--r--block/ll_rw_blk.c131
-rw-r--r--block/scsi_ioctl.c14
5 files changed, 244 insertions, 65 deletions
diff --git a/block/elevator.c b/block/elevator.c
index 39dcccc82ada..c9f424d5399c 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -64,7 +64,7 @@ inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
64} 64}
65EXPORT_SYMBOL(elv_rq_merge_ok); 65EXPORT_SYMBOL(elv_rq_merge_ok);
66 66
67inline int elv_try_merge(struct request *__rq, struct bio *bio) 67static inline int elv_try_merge(struct request *__rq, struct bio *bio)
68{ 68{
69 int ret = ELEVATOR_NO_MERGE; 69 int ret = ELEVATOR_NO_MERGE;
70 70
@@ -80,7 +80,6 @@ inline int elv_try_merge(struct request *__rq, struct bio *bio)
80 80
81 return ret; 81 return ret;
82} 82}
83EXPORT_SYMBOL(elv_try_merge);
84 83
85static struct elevator_type *elevator_find(const char *name) 84static struct elevator_type *elevator_find(const char *name)
86{ 85{
@@ -150,13 +149,20 @@ static void elevator_setup_default(void)
150 if (!chosen_elevator[0]) 149 if (!chosen_elevator[0])
151 strcpy(chosen_elevator, CONFIG_DEFAULT_IOSCHED); 150 strcpy(chosen_elevator, CONFIG_DEFAULT_IOSCHED);
152 151
152 /*
153 * Be backwards-compatible with previous kernels, so users
154 * won't get the wrong elevator.
155 */
156 if (!strcmp(chosen_elevator, "as"))
157 strcpy(chosen_elevator, "anticipatory");
158
153 /* 159 /*
154 * If the given scheduler is not available, fall back to no-op. 160 * If the given scheduler is not available, fall back to the default
155 */ 161 */
156 if ((e = elevator_find(chosen_elevator))) 162 if ((e = elevator_find(chosen_elevator)))
157 elevator_put(e); 163 elevator_put(e);
158 else 164 else
159 strcpy(chosen_elevator, "noop"); 165 strcpy(chosen_elevator, CONFIG_DEFAULT_IOSCHED);
160} 166}
161 167
162static int __init elevator_setup(char *str) 168static int __init elevator_setup(char *str)
@@ -611,23 +617,23 @@ void elv_completed_request(request_queue_t *q, struct request *rq)
611 * request is released from the driver, io must be done 617 * request is released from the driver, io must be done
612 */ 618 */
613 if (blk_account_rq(rq)) { 619 if (blk_account_rq(rq)) {
614 struct request *first_rq = list_entry_rq(q->queue_head.next);
615
616 q->in_flight--; 620 q->in_flight--;
621 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
622 e->ops->elevator_completed_req_fn(q, rq);
623 }
617 624
618 /* 625 /*
619 * Check if the queue is waiting for fs requests to be 626 * Check if the queue is waiting for fs requests to be
620 * drained for flush sequence. 627 * drained for flush sequence.
621 */ 628 */
622 if (q->ordseq && q->in_flight == 0 && 629 if (unlikely(q->ordseq)) {
630 struct request *first_rq = list_entry_rq(q->queue_head.next);
631 if (q->in_flight == 0 &&
623 blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN && 632 blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
624 blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) { 633 blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
625 blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0); 634 blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
626 q->request_fn(q); 635 q->request_fn(q);
627 } 636 }
628
629 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn)
630 e->ops->elevator_completed_req_fn(q, rq);
631 } 637 }
632} 638}
633 639
diff --git a/block/genhd.c b/block/genhd.c
index f1ed83f3f083..db57546a709d 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -38,34 +38,100 @@ static inline int major_to_index(int major)
38 return major % MAX_PROBE_HASH; 38 return major % MAX_PROBE_HASH;
39} 39}
40 40
41#ifdef CONFIG_PROC_FS 41struct blkdev_info {
42/* get block device names in somewhat random order */ 42 int index;
43int get_blkdev_list(char *p, int used) 43 struct blk_major_name *bd;
44};
45
46/*
47 * iterate over a list of blkdev_info structures. allows
48 * the major_names array to be iterated over from outside this file
49 * must be called with the block_subsys_sem held
50 */
51void *get_next_blkdev(void *dev)
52{
53 struct blkdev_info *info;
54
55 if (dev == NULL) {
56 info = kmalloc(sizeof(*info), GFP_KERNEL);
57 if (!info)
58 goto out;
59 info->index=0;
60 info->bd = major_names[info->index];
61 if (info->bd)
62 goto out;
63 } else {
64 info = dev;
65 }
66
67 while (info->index < ARRAY_SIZE(major_names)) {
68 if (info->bd)
69 info->bd = info->bd->next;
70 if (info->bd)
71 goto out;
72 /*
73 * No devices on this chain, move to the next
74 */
75 info->index++;
76 info->bd = (info->index < ARRAY_SIZE(major_names)) ?
77 major_names[info->index] : NULL;
78 if (info->bd)
79 goto out;
80 }
81
82out:
83 return info;
84}
85
86void *acquire_blkdev_list(void)
87{
88 down(&block_subsys_sem);
89 return get_next_blkdev(NULL);
90}
91
92void release_blkdev_list(void *dev)
93{
94 up(&block_subsys_sem);
95 kfree(dev);
96}
97
98
99/*
100 * Count the number of records in the blkdev_list.
101 * must be called with the block_subsys_sem held
102 */
103int count_blkdev_list(void)
44{ 104{
45 struct blk_major_name *n; 105 struct blk_major_name *n;
46 int i, len; 106 int i, count;
47 107
48 len = snprintf(p, (PAGE_SIZE-used), "\nBlock devices:\n"); 108 count = 0;
49 109
50 down(&block_subsys_sem);
51 for (i = 0; i < ARRAY_SIZE(major_names); i++) { 110 for (i = 0; i < ARRAY_SIZE(major_names); i++) {
52 for (n = major_names[i]; n; n = n->next) { 111 for (n = major_names[i]; n; n = n->next)
53 /* 112 count++;
54 * If the curent string plus the 5 extra characters
55 * in the line would run us off the page, then we're done
56 */
57 if ((len + used + strlen(n->name) + 5) >= PAGE_SIZE)
58 goto page_full;
59 len += sprintf(p+len, "%3d %s\n",
60 n->major, n->name);
61 }
62 } 113 }
63page_full:
64 up(&block_subsys_sem);
65 114
66 return len; 115 return count;
67} 116}
68#endif 117
118/*
119 * extract the major and name values from a blkdev_info struct
120 * passed in as a void to *dev. Must be called with
121 * block_subsys_sem held
122 */
123int get_blkdev_info(void *dev, int *major, char **name)
124{
125 struct blkdev_info *info = dev;
126
127 if (info->bd == NULL)
128 return 1;
129
130 *major = info->bd->major;
131 *name = info->bd->name;
132 return 0;
133}
134
69 135
70int register_blkdev(unsigned int major, const char *name) 136int register_blkdev(unsigned int major, const char *name)
71{ 137{
diff --git a/block/ioctl.c b/block/ioctl.c
index 6e278474f9a8..e1109491c234 100644
--- a/block/ioctl.c
+++ b/block/ioctl.c
@@ -1,6 +1,7 @@
1#include <linux/sched.h> /* for capable() */ 1#include <linux/capability.h>
2#include <linux/blkdev.h> 2#include <linux/blkdev.h>
3#include <linux/blkpg.h> 3#include <linux/blkpg.h>
4#include <linux/hdreg.h>
4#include <linux/backing-dev.h> 5#include <linux/backing-dev.h>
5#include <linux/buffer_head.h> 6#include <linux/buffer_head.h>
6#include <linux/smp_lock.h> 7#include <linux/smp_lock.h>
@@ -245,6 +246,27 @@ int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd,
245 set_device_ro(bdev, n); 246 set_device_ro(bdev, n);
246 unlock_kernel(); 247 unlock_kernel();
247 return 0; 248 return 0;
249 case HDIO_GETGEO: {
250 struct hd_geometry geo;
251
252 if (!arg)
253 return -EINVAL;
254 if (!disk->fops->getgeo)
255 return -ENOTTY;
256
257 /*
258 * We need to set the startsect first, the driver may
259 * want to override it.
260 */
261 geo.start = get_start_sect(bdev);
262 ret = disk->fops->getgeo(bdev, &geo);
263 if (ret)
264 return ret;
265 if (copy_to_user((struct hd_geometry __user *)arg, &geo,
266 sizeof(geo)))
267 return -EFAULT;
268 return 0;
269 }
248 } 270 }
249 271
250 lock_kernel(); 272 lock_kernel();
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 91d3b4828c49..8e27d0ab0d7c 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -26,7 +26,8 @@
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/swap.h> 27#include <linux/swap.h>
28#include <linux/writeback.h> 28#include <linux/writeback.h>
29#include <linux/blkdev.h> 29#include <linux/interrupt.h>
30#include <linux/cpu.h>
30 31
31/* 32/*
32 * for max sense size 33 * for max sense size
@@ -62,13 +63,15 @@ static wait_queue_head_t congestion_wqh[2] = {
62/* 63/*
63 * Controlling structure to kblockd 64 * Controlling structure to kblockd
64 */ 65 */
65static struct workqueue_struct *kblockd_workqueue; 66static struct workqueue_struct *kblockd_workqueue;
66 67
67unsigned long blk_max_low_pfn, blk_max_pfn; 68unsigned long blk_max_low_pfn, blk_max_pfn;
68 69
69EXPORT_SYMBOL(blk_max_low_pfn); 70EXPORT_SYMBOL(blk_max_low_pfn);
70EXPORT_SYMBOL(blk_max_pfn); 71EXPORT_SYMBOL(blk_max_pfn);
71 72
73static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
74
72/* Amount of time in which a process may batch requests */ 75/* Amount of time in which a process may batch requests */
73#define BLK_BATCH_TIME (HZ/50UL) 76#define BLK_BATCH_TIME (HZ/50UL)
74 77
@@ -207,6 +210,13 @@ void blk_queue_merge_bvec(request_queue_t *q, merge_bvec_fn *mbfn)
207 210
208EXPORT_SYMBOL(blk_queue_merge_bvec); 211EXPORT_SYMBOL(blk_queue_merge_bvec);
209 212
213void blk_queue_softirq_done(request_queue_t *q, softirq_done_fn *fn)
214{
215 q->softirq_done_fn = fn;
216}
217
218EXPORT_SYMBOL(blk_queue_softirq_done);
219
210/** 220/**
211 * blk_queue_make_request - define an alternate make_request function for a device 221 * blk_queue_make_request - define an alternate make_request function for a device
212 * @q: the request queue for the device to be affected 222 * @q: the request queue for the device to be affected
@@ -270,6 +280,7 @@ EXPORT_SYMBOL(blk_queue_make_request);
270static inline void rq_init(request_queue_t *q, struct request *rq) 280static inline void rq_init(request_queue_t *q, struct request *rq)
271{ 281{
272 INIT_LIST_HEAD(&rq->queuelist); 282 INIT_LIST_HEAD(&rq->queuelist);
283 INIT_LIST_HEAD(&rq->donelist);
273 284
274 rq->errors = 0; 285 rq->errors = 0;
275 rq->rq_status = RQ_ACTIVE; 286 rq->rq_status = RQ_ACTIVE;
@@ -286,6 +297,7 @@ static inline void rq_init(request_queue_t *q, struct request *rq)
286 rq->sense = NULL; 297 rq->sense = NULL;
287 rq->end_io = NULL; 298 rq->end_io = NULL;
288 rq->end_io_data = NULL; 299 rq->end_io_data = NULL;
300 rq->completion_data = NULL;
289} 301}
290 302
291/** 303/**
@@ -2735,30 +2747,6 @@ static inline int attempt_front_merge(request_queue_t *q, struct request *rq)
2735 return 0; 2747 return 0;
2736} 2748}
2737 2749
2738/**
2739 * blk_attempt_remerge - attempt to remerge active head with next request
2740 * @q: The &request_queue_t belonging to the device
2741 * @rq: The head request (usually)
2742 *
2743 * Description:
2744 * For head-active devices, the queue can easily be unplugged so quickly
2745 * that proper merging is not done on the front request. This may hurt
2746 * performance greatly for some devices. The block layer cannot safely
2747 * do merging on that first request for these queues, but the driver can
2748 * call this function and make it happen any way. Only the driver knows
2749 * when it is safe to do so.
2750 **/
2751void blk_attempt_remerge(request_queue_t *q, struct request *rq)
2752{
2753 unsigned long flags;
2754
2755 spin_lock_irqsave(q->queue_lock, flags);
2756 attempt_back_merge(q, rq);
2757 spin_unlock_irqrestore(q->queue_lock, flags);
2758}
2759
2760EXPORT_SYMBOL(blk_attempt_remerge);
2761
2762static void init_request_from_bio(struct request *req, struct bio *bio) 2750static void init_request_from_bio(struct request *req, struct bio *bio)
2763{ 2751{
2764 req->flags |= REQ_CMD; 2752 req->flags |= REQ_CMD;
@@ -3287,6 +3275,87 @@ int end_that_request_chunk(struct request *req, int uptodate, int nr_bytes)
3287EXPORT_SYMBOL(end_that_request_chunk); 3275EXPORT_SYMBOL(end_that_request_chunk);
3288 3276
3289/* 3277/*
3278 * splice the completion data to a local structure and hand off to
3279 * process_completion_queue() to complete the requests
3280 */
3281static void blk_done_softirq(struct softirq_action *h)
3282{
3283 struct list_head *cpu_list;
3284 LIST_HEAD(local_list);
3285
3286 local_irq_disable();
3287 cpu_list = &__get_cpu_var(blk_cpu_done);
3288 list_splice_init(cpu_list, &local_list);
3289 local_irq_enable();
3290
3291 while (!list_empty(&local_list)) {
3292 struct request *rq = list_entry(local_list.next, struct request, donelist);
3293
3294 list_del_init(&rq->donelist);
3295 rq->q->softirq_done_fn(rq);
3296 }
3297}
3298
3299#ifdef CONFIG_HOTPLUG_CPU
3300
3301static int blk_cpu_notify(struct notifier_block *self, unsigned long action,
3302 void *hcpu)
3303{
3304 /*
3305 * If a CPU goes away, splice its entries to the current CPU
3306 * and trigger a run of the softirq
3307 */
3308 if (action == CPU_DEAD) {
3309 int cpu = (unsigned long) hcpu;
3310
3311 local_irq_disable();
3312 list_splice_init(&per_cpu(blk_cpu_done, cpu),
3313 &__get_cpu_var(blk_cpu_done));
3314 raise_softirq_irqoff(BLOCK_SOFTIRQ);
3315 local_irq_enable();
3316 }
3317
3318 return NOTIFY_OK;
3319}
3320
3321
3322static struct notifier_block __devinitdata blk_cpu_notifier = {
3323 .notifier_call = blk_cpu_notify,
3324};
3325
3326#endif /* CONFIG_HOTPLUG_CPU */
3327
3328/**
3329 * blk_complete_request - end I/O on a request
3330 * @req: the request being processed
3331 *
3332 * Description:
3333 * Ends all I/O on a request. It does not handle partial completions,
3334 * unless the driver actually implements this in its completionc callback
3335 * through requeueing. Theh actual completion happens out-of-order,
3336 * through a softirq handler. The user must have registered a completion
3337 * callback through blk_queue_softirq_done().
3338 **/
3339
3340void blk_complete_request(struct request *req)
3341{
3342 struct list_head *cpu_list;
3343 unsigned long flags;
3344
3345 BUG_ON(!req->q->softirq_done_fn);
3346
3347 local_irq_save(flags);
3348
3349 cpu_list = &__get_cpu_var(blk_cpu_done);
3350 list_add_tail(&req->donelist, cpu_list);
3351 raise_softirq_irqoff(BLOCK_SOFTIRQ);
3352
3353 local_irq_restore(flags);
3354}
3355
3356EXPORT_SYMBOL(blk_complete_request);
3357
3358/*
3290 * queue lock must be held 3359 * queue lock must be held
3291 */ 3360 */
3292void end_that_request_last(struct request *req, int uptodate) 3361void end_that_request_last(struct request *req, int uptodate)
@@ -3364,6 +3433,8 @@ EXPORT_SYMBOL(kblockd_flush);
3364 3433
3365int __init blk_dev_init(void) 3434int __init blk_dev_init(void)
3366{ 3435{
3436 int i;
3437
3367 kblockd_workqueue = create_workqueue("kblockd"); 3438 kblockd_workqueue = create_workqueue("kblockd");
3368 if (!kblockd_workqueue) 3439 if (!kblockd_workqueue)
3369 panic("Failed to create kblockd\n"); 3440 panic("Failed to create kblockd\n");
@@ -3377,6 +3448,14 @@ int __init blk_dev_init(void)
3377 iocontext_cachep = kmem_cache_create("blkdev_ioc", 3448 iocontext_cachep = kmem_cache_create("blkdev_ioc",
3378 sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL); 3449 sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL);
3379 3450
3451 for (i = 0; i < NR_CPUS; i++)
3452 INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i));
3453
3454 open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL);
3455#ifdef CONFIG_HOTPLUG_CPU
3456 register_cpu_notifier(&blk_cpu_notifier);
3457#endif
3458
3380 blk_max_low_pfn = max_low_pfn; 3459 blk_max_low_pfn = max_low_pfn;
3381 blk_max_pfn = max_pfn; 3460 blk_max_pfn = max_pfn;
3382 3461
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index c2ac36dfe4f3..cc72210687eb 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -21,6 +21,7 @@
21#include <linux/string.h> 21#include <linux/string.h>
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/blkdev.h> 23#include <linux/blkdev.h>
24#include <linux/capability.h>
24#include <linux/completion.h> 25#include <linux/completion.h>
25#include <linux/cdrom.h> 26#include <linux/cdrom.h>
26#include <linux/slab.h> 27#include <linux/slab.h>
@@ -190,16 +191,21 @@ static int verify_command(struct file *file, unsigned char *cmd)
190 safe_for_write(GPCMD_SET_STREAMING), 191 safe_for_write(GPCMD_SET_STREAMING),
191 }; 192 };
192 unsigned char type = cmd_type[cmd[0]]; 193 unsigned char type = cmd_type[cmd[0]];
194 int has_write_perm = 0;
193 195
194 /* Anybody who can open the device can do a read-safe command */ 196 /* Anybody who can open the device can do a read-safe command */
195 if (type & CMD_READ_SAFE) 197 if (type & CMD_READ_SAFE)
196 return 0; 198 return 0;
197 199
200 /*
201 * file can be NULL from ioctl_by_bdev()...
202 */
203 if (file)
204 has_write_perm = file->f_mode & FMODE_WRITE;
205
198 /* Write-safe commands just require a writable open.. */ 206 /* Write-safe commands just require a writable open.. */
199 if (type & CMD_WRITE_SAFE) { 207 if ((type & CMD_WRITE_SAFE) && has_write_perm)
200 if (file->f_mode & FMODE_WRITE) 208 return 0;
201 return 0;
202 }
203 209
204 /* And root can do any command.. */ 210 /* And root can do any command.. */
205 if (capable(CAP_SYS_RAWIO)) 211 if (capable(CAP_SYS_RAWIO))