aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-10-17 12:29:55 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-10-17 12:29:55 -0400
commitc53dbf54863e7f3b0b8810dda2bdd0290006bdac (patch)
treef783074f1bec1112bf1148a077e0114a38403ad4
parentb73b636e8987f8728c6c700377615757691b9a55 (diff)
parentf73e2d13a16cc88c4faa4729967f92bfeec8a142 (diff)
Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/linux-2.6-block: block: remove __generic_unplug_device() from exports block: move q->unplug_work initialization blktrace: pass zfcp driver data blktrace: add support for driver data block: fix current kernel-doc warnings block: only call ->request_fn when the queue is not stopped block: simplify string handling in elv_iosched_store() block: fix kernel-doc for blk_alloc_devt() block: fix nr_phys_segments miscalculation bug block: add partition attribute for partition number block: add BIG FAT WARNING to CONFIG_DEBUG_BLOCK_EXT_DEVT softirq: Add support for triggering softirq work on softirqs.
-rw-r--r--block/blk-core.c26
-rw-r--r--block/blk-merge.c20
-rw-r--r--block/blk-settings.c2
-rw-r--r--block/blk.h1
-rw-r--r--block/elevator.c16
-rw-r--r--block/genhd.c3
-rw-r--r--drivers/ide/ide-io.c4
-rw-r--r--drivers/s390/scsi/zfcp_def.h2
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c34
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h12
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c1
-rw-r--r--fs/block_dev.c2
-rw-r--r--fs/partitions/check.c10
-rw-r--r--include/linux/bio.h7
-rw-r--r--include/linux/blkdev.h1
-rw-r--r--include/linux/blktrace_api.h32
-rw-r--r--include/linux/interrupt.h21
-rw-r--r--include/linux/smp.h4
-rw-r--r--kernel/softirq.c129
-rw-r--r--lib/Kconfig.debug5
20 files changed, 305 insertions, 27 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 9e79a485e4f3..c3df30cfb3fc 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -257,7 +257,6 @@ void __generic_unplug_device(struct request_queue *q)
257 257
258 q->request_fn(q); 258 q->request_fn(q);
259} 259}
260EXPORT_SYMBOL(__generic_unplug_device);
261 260
262/** 261/**
263 * generic_unplug_device - fire a request queue 262 * generic_unplug_device - fire a request queue
@@ -325,6 +324,9 @@ EXPORT_SYMBOL(blk_unplug);
325 324
326static void blk_invoke_request_fn(struct request_queue *q) 325static void blk_invoke_request_fn(struct request_queue *q)
327{ 326{
327 if (unlikely(blk_queue_stopped(q)))
328 return;
329
328 /* 330 /*
329 * one level of recursion is ok and is much faster than kicking 331 * one level of recursion is ok and is much faster than kicking
330 * the unplug handling 332 * the unplug handling
@@ -399,8 +401,13 @@ void blk_sync_queue(struct request_queue *q)
399EXPORT_SYMBOL(blk_sync_queue); 401EXPORT_SYMBOL(blk_sync_queue);
400 402
401/** 403/**
402 * blk_run_queue - run a single device queue 404 * __blk_run_queue - run a single device queue
403 * @q: The queue to run 405 * @q: The queue to run
406 *
407 * Description:
408 * See @blk_run_queue. This variant must be called with the queue lock
409 * held and interrupts disabled.
410 *
404 */ 411 */
405void __blk_run_queue(struct request_queue *q) 412void __blk_run_queue(struct request_queue *q)
406{ 413{
@@ -418,6 +425,12 @@ EXPORT_SYMBOL(__blk_run_queue);
418/** 425/**
419 * blk_run_queue - run a single device queue 426 * blk_run_queue - run a single device queue
420 * @q: The queue to run 427 * @q: The queue to run
428 *
429 * Description:
430 * Invoke request handling on this queue, if it has pending work to do.
431 * May be used to restart queueing when a request has completed. Also
432 * See @blk_start_queueing.
433 *
421 */ 434 */
422void blk_run_queue(struct request_queue *q) 435void blk_run_queue(struct request_queue *q)
423{ 436{
@@ -501,6 +514,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
501 init_timer(&q->unplug_timer); 514 init_timer(&q->unplug_timer);
502 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q); 515 setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
503 INIT_LIST_HEAD(&q->timeout_list); 516 INIT_LIST_HEAD(&q->timeout_list);
517 INIT_WORK(&q->unplug_work, blk_unplug_work);
504 518
505 kobject_init(&q->kobj, &blk_queue_ktype); 519 kobject_init(&q->kobj, &blk_queue_ktype);
506 520
@@ -884,7 +898,8 @@ EXPORT_SYMBOL(blk_get_request);
884 * 898 *
885 * This is basically a helper to remove the need to know whether a queue 899 * This is basically a helper to remove the need to know whether a queue
886 * is plugged or not if someone just wants to initiate dispatch of requests 900 * is plugged or not if someone just wants to initiate dispatch of requests
887 * for this queue. 901 * for this queue. Should be used to start queueing on a device outside
902 * of ->request_fn() context. Also see @blk_run_queue.
888 * 903 *
889 * The queue lock must be held with interrupts disabled. 904 * The queue lock must be held with interrupts disabled.
890 */ 905 */
@@ -1003,8 +1018,9 @@ static void part_round_stats_single(int cpu, struct hd_struct *part,
1003} 1018}
1004 1019
1005/** 1020/**
1006 * part_round_stats() - Round off the performance stats on a struct 1021 * part_round_stats() - Round off the performance stats on a struct disk_stats.
1007 * disk_stats. 1022 * @cpu: cpu number for stats access
1023 * @part: target partition
1008 * 1024 *
1009 * The average IO queue length and utilisation statistics are maintained 1025 * The average IO queue length and utilisation statistics are maintained
1010 * by observing the current state of the queue length and the amount of 1026 * by observing the current state of the queue length and the amount of
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 908d3e11ac52..8681cd6f9911 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -77,12 +77,20 @@ void blk_recalc_rq_segments(struct request *rq)
77 continue; 77 continue;
78 } 78 }
79new_segment: 79new_segment:
80 if (nr_phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size)
81 rq->bio->bi_seg_front_size = seg_size;
82
80 nr_phys_segs++; 83 nr_phys_segs++;
81 bvprv = bv; 84 bvprv = bv;
82 seg_size = bv->bv_len; 85 seg_size = bv->bv_len;
83 highprv = high; 86 highprv = high;
84 } 87 }
85 88
89 if (nr_phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size)
90 rq->bio->bi_seg_front_size = seg_size;
91 if (seg_size > rq->biotail->bi_seg_back_size)
92 rq->biotail->bi_seg_back_size = seg_size;
93
86 rq->nr_phys_segments = nr_phys_segs; 94 rq->nr_phys_segments = nr_phys_segs;
87} 95}
88 96
@@ -106,7 +114,8 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
106 if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) 114 if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
107 return 0; 115 return 0;
108 116
109 if (bio->bi_size + nxt->bi_size > q->max_segment_size) 117 if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
118 q->max_segment_size)
110 return 0; 119 return 0;
111 120
112 if (!bio_has_data(bio)) 121 if (!bio_has_data(bio))
@@ -309,6 +318,8 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
309 struct request *next) 318 struct request *next)
310{ 319{
311 int total_phys_segments; 320 int total_phys_segments;
321 unsigned int seg_size =
322 req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
312 323
313 /* 324 /*
314 * First check if the either of the requests are re-queued 325 * First check if the either of the requests are re-queued
@@ -324,8 +335,13 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
324 return 0; 335 return 0;
325 336
326 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; 337 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
327 if (blk_phys_contig_segment(q, req->biotail, next->bio)) 338 if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
339 if (req->nr_phys_segments == 1)
340 req->bio->bi_seg_front_size = seg_size;
341 if (next->nr_phys_segments == 1)
342 next->biotail->bi_seg_back_size = seg_size;
328 total_phys_segments--; 343 total_phys_segments--;
344 }
329 345
330 if (total_phys_segments > q->max_phys_segments) 346 if (total_phys_segments > q->max_phys_segments)
331 return 0; 347 return 0;
diff --git a/block/blk-settings.c b/block/blk-settings.c
index b21dcdb64151..41392fbe19ff 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -141,8 +141,6 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
141 if (q->unplug_delay == 0) 141 if (q->unplug_delay == 0)
142 q->unplug_delay = 1; 142 q->unplug_delay = 1;
143 143
144 INIT_WORK(&q->unplug_work, blk_unplug_work);
145
146 q->unplug_timer.function = blk_unplug_timeout; 144 q->unplug_timer.function = blk_unplug_timeout;
147 q->unplug_timer.data = (unsigned long)q; 145 q->unplug_timer.data = (unsigned long)q;
148 146
diff --git a/block/blk.h b/block/blk.h
index e5c579769963..d2e49af90db5 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -20,6 +20,7 @@ void blk_unplug_timeout(unsigned long data);
20void blk_rq_timed_out_timer(unsigned long data); 20void blk_rq_timed_out_timer(unsigned long data);
21void blk_delete_timer(struct request *); 21void blk_delete_timer(struct request *);
22void blk_add_timer(struct request *); 22void blk_add_timer(struct request *);
23void __generic_unplug_device(struct request_queue *);
23 24
24/* 25/*
25 * Internal atomic flags for request handling 26 * Internal atomic flags for request handling
diff --git a/block/elevator.c b/block/elevator.c
index 04518921db31..59173a69ebdf 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -612,7 +612,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
612 * processing. 612 * processing.
613 */ 613 */
614 blk_remove_plug(q); 614 blk_remove_plug(q);
615 q->request_fn(q); 615 blk_start_queueing(q);
616 break; 616 break;
617 617
618 case ELEVATOR_INSERT_SORT: 618 case ELEVATOR_INSERT_SORT:
@@ -950,7 +950,7 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
950 blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN && 950 blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
951 blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) { 951 blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
952 blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0); 952 blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
953 q->request_fn(q); 953 blk_start_queueing(q);
954 } 954 }
955 } 955 }
956} 956}
@@ -1109,8 +1109,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
1109 elv_drain_elevator(q); 1109 elv_drain_elevator(q);
1110 1110
1111 while (q->rq.elvpriv) { 1111 while (q->rq.elvpriv) {
1112 blk_remove_plug(q); 1112 blk_start_queueing(q);
1113 q->request_fn(q);
1114 spin_unlock_irq(q->queue_lock); 1113 spin_unlock_irq(q->queue_lock);
1115 msleep(10); 1114 msleep(10);
1116 spin_lock_irq(q->queue_lock); 1115 spin_lock_irq(q->queue_lock);
@@ -1166,15 +1165,10 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
1166 size_t count) 1165 size_t count)
1167{ 1166{
1168 char elevator_name[ELV_NAME_MAX]; 1167 char elevator_name[ELV_NAME_MAX];
1169 size_t len;
1170 struct elevator_type *e; 1168 struct elevator_type *e;
1171 1169
1172 elevator_name[sizeof(elevator_name) - 1] = '\0'; 1170 strlcpy(elevator_name, name, sizeof(elevator_name));
1173 strncpy(elevator_name, name, sizeof(elevator_name) - 1); 1171 strstrip(elevator_name);
1174 len = strlen(elevator_name);
1175
1176 if (len && elevator_name[len - 1] == '\n')
1177 elevator_name[len - 1] = '\0';
1178 1172
1179 e = elevator_get(elevator_name); 1173 e = elevator_get(elevator_name);
1180 if (!e) { 1174 if (!e) {
diff --git a/block/genhd.c b/block/genhd.c
index 4cd3433c99ac..646e1d2507c7 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -358,7 +358,6 @@ static int blk_mangle_minor(int minor)
358/** 358/**
359 * blk_alloc_devt - allocate a dev_t for a partition 359 * blk_alloc_devt - allocate a dev_t for a partition
360 * @part: partition to allocate dev_t for 360 * @part: partition to allocate dev_t for
361 * @gfp_mask: memory allocation flag
362 * @devt: out parameter for resulting dev_t 361 * @devt: out parameter for resulting dev_t
363 * 362 *
364 * Allocate a dev_t for block device. 363 * Allocate a dev_t for block device.
@@ -535,7 +534,7 @@ void unlink_gendisk(struct gendisk *disk)
535/** 534/**
536 * get_gendisk - get partitioning information for a given device 535 * get_gendisk - get partitioning information for a given device
537 * @devt: device to get partitioning information for 536 * @devt: device to get partitioning information for
538 * @part: returned partition index 537 * @partno: returned partition index
539 * 538 *
540 * This function gets the structure containing partitioning 539 * This function gets the structure containing partitioning
541 * information for the given device @devt. 540 * information for the given device @devt.
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c
index 77c6eaeacefa..7162d67562af 100644
--- a/drivers/ide/ide-io.c
+++ b/drivers/ide/ide-io.c
@@ -1493,8 +1493,8 @@ void ide_do_drive_cmd(ide_drive_t *drive, struct request *rq)
1493 1493
1494 spin_lock_irqsave(&ide_lock, flags); 1494 spin_lock_irqsave(&ide_lock, flags);
1495 hwgroup->rq = NULL; 1495 hwgroup->rq = NULL;
1496 __elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 1); 1496 __elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 0);
1497 __generic_unplug_device(drive->queue); 1497 blk_start_queueing(drive->queue);
1498 spin_unlock_irqrestore(&ide_lock, flags); 1498 spin_unlock_irqrestore(&ide_lock, flags);
1499} 1499}
1500 1500
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 8a13071c444c..9ce4c75bd190 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -583,6 +583,8 @@ struct zfcp_fsf_req {
583 unsigned long long issued; /* request sent time (STCK) */ 583 unsigned long long issued; /* request sent time (STCK) */
584 struct zfcp_unit *unit; 584 struct zfcp_unit *unit;
585 void (*handler)(struct zfcp_fsf_req *); 585 void (*handler)(struct zfcp_fsf_req *);
586 u16 qdio_outb_usage;/* usage of outbound queue */
587 u16 qdio_inb_usage; /* usage of inbound queue */
586}; 588};
587 589
588/* driver data */ 590/* driver data */
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 739356a5c123..5ae1d497e5ed 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -6,6 +6,7 @@
6 * Copyright IBM Corporation 2002, 2008 6 * Copyright IBM Corporation 2002, 2008
7 */ 7 */
8 8
9#include <linux/blktrace_api.h>
9#include "zfcp_ext.h" 10#include "zfcp_ext.h"
10 11
11static void zfcp_fsf_request_timeout_handler(unsigned long data) 12static void zfcp_fsf_request_timeout_handler(unsigned long data)
@@ -777,6 +778,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
777 list_add_tail(&req->list, &adapter->req_list[idx]); 778 list_add_tail(&req->list, &adapter->req_list[idx]);
778 spin_unlock(&adapter->req_list_lock); 779 spin_unlock(&adapter->req_list_lock);
779 780
781 req->qdio_outb_usage = atomic_read(&req_q->count);
780 req->issued = get_clock(); 782 req->issued = get_clock();
781 if (zfcp_qdio_send(req)) { 783 if (zfcp_qdio_send(req)) {
782 /* Queues are down..... */ 784 /* Queues are down..... */
@@ -2082,6 +2084,36 @@ static void zfcp_fsf_req_latency(struct zfcp_fsf_req *req)
2082 spin_unlock_irqrestore(&unit->latencies.lock, flags); 2084 spin_unlock_irqrestore(&unit->latencies.lock, flags);
2083} 2085}
2084 2086
2087#ifdef CONFIG_BLK_DEV_IO_TRACE
2088static void zfcp_fsf_trace_latency(struct zfcp_fsf_req *fsf_req)
2089{
2090 struct fsf_qual_latency_info *lat_inf;
2091 struct scsi_cmnd *scsi_cmnd = (struct scsi_cmnd *)fsf_req->data;
2092 struct request *req = scsi_cmnd->request;
2093 struct zfcp_blk_drv_data trace;
2094 int ticks = fsf_req->adapter->timer_ticks;
2095
2096 trace.flags = 0;
2097 trace.magic = ZFCP_BLK_DRV_DATA_MAGIC;
2098 if (fsf_req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) {
2099 trace.flags |= ZFCP_BLK_LAT_VALID;
2100 lat_inf = &fsf_req->qtcb->prefix.prot_status_qual.latency_info;
2101 trace.channel_lat = lat_inf->channel_lat * ticks;
2102 trace.fabric_lat = lat_inf->fabric_lat * ticks;
2103 }
2104 if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
2105 trace.flags |= ZFCP_BLK_REQ_ERROR;
2106 trace.inb_usage = fsf_req->qdio_inb_usage;
2107 trace.outb_usage = fsf_req->qdio_outb_usage;
2108
2109 blk_add_driver_data(req->q, req, &trace, sizeof(trace));
2110}
2111#else
2112static inline void zfcp_fsf_trace_latency(struct zfcp_fsf_req *fsf_req)
2113{
2114}
2115#endif
2116
2085static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req) 2117static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
2086{ 2118{
2087 struct scsi_cmnd *scpnt = req->data; 2119 struct scsi_cmnd *scpnt = req->data;
@@ -2114,6 +2146,8 @@ static void zfcp_fsf_send_fcp_command_task_handler(struct zfcp_fsf_req *req)
2114 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA) 2146 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA)
2115 zfcp_fsf_req_latency(req); 2147 zfcp_fsf_req_latency(req);
2116 2148
2149 zfcp_fsf_trace_latency(req);
2150
2117 if (unlikely(fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)) { 2151 if (unlikely(fcp_rsp_iu->validity.bits.fcp_rsp_len_valid)) {
2118 if (fcp_rsp_info[3] == RSP_CODE_GOOD) 2152 if (fcp_rsp_info[3] == RSP_CODE_GOOD)
2119 set_host_byte(scpnt, DID_OK); 2153 set_host_byte(scpnt, DID_OK);
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index fd3a88777ac8..fa2a31780611 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -439,4 +439,16 @@ struct fsf_qtcb {
439 u8 log[FSF_QTCB_LOG_SIZE]; 439 u8 log[FSF_QTCB_LOG_SIZE];
440} __attribute__ ((packed)); 440} __attribute__ ((packed));
441 441
442struct zfcp_blk_drv_data {
443#define ZFCP_BLK_DRV_DATA_MAGIC 0x1
444 u32 magic;
445#define ZFCP_BLK_LAT_VALID 0x1
446#define ZFCP_BLK_REQ_ERROR 0x2
447 u16 flags;
448 u8 inb_usage;
449 u8 outb_usage;
450 u64 channel_lat;
451 u64 fabric_lat;
452} __attribute__ ((packed));
453
442#endif /* FSF_H */ 454#endif /* FSF_H */
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 3e05080e62d4..664752f90b20 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -115,6 +115,7 @@ static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter,
115 spin_unlock_irqrestore(&adapter->req_list_lock, flags); 115 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
116 116
117 fsf_req->sbal_response = sbal_idx; 117 fsf_req->sbal_response = sbal_idx;
118 fsf_req->qdio_inb_usage = atomic_read(&adapter->resp_q.count);
118 zfcp_fsf_req_complete(fsf_req); 119 zfcp_fsf_req_complete(fsf_req);
119} 120}
120 121
diff --git a/fs/block_dev.c b/fs/block_dev.c
index d84f0469a016..218408eed1bb 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1262,7 +1262,7 @@ EXPORT_SYMBOL(ioctl_by_bdev);
1262 1262
1263/** 1263/**
1264 * lookup_bdev - lookup a struct block_device by name 1264 * lookup_bdev - lookup a struct block_device by name
1265 * @pathname: special file representing the block device 1265 * @path: special file representing the block device
1266 * 1266 *
1267 * Get a reference to the blockdevice at @pathname in the current 1267 * Get a reference to the blockdevice at @pathname in the current
1268 * namespace if possible and return it. Return ERR_PTR(error) 1268 * namespace if possible and return it. Return ERR_PTR(error)
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index fbeb2f372a93..cfb0c80690aa 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -195,6 +195,14 @@ check_partition(struct gendisk *hd, struct block_device *bdev)
195 return ERR_PTR(res); 195 return ERR_PTR(res);
196} 196}
197 197
198static ssize_t part_partition_show(struct device *dev,
199 struct device_attribute *attr, char *buf)
200{
201 struct hd_struct *p = dev_to_part(dev);
202
203 return sprintf(buf, "%d\n", p->partno);
204}
205
198static ssize_t part_start_show(struct device *dev, 206static ssize_t part_start_show(struct device *dev,
199 struct device_attribute *attr, char *buf) 207 struct device_attribute *attr, char *buf)
200{ 208{
@@ -260,6 +268,7 @@ ssize_t part_fail_store(struct device *dev,
260} 268}
261#endif 269#endif
262 270
271static DEVICE_ATTR(partition, S_IRUGO, part_partition_show, NULL);
263static DEVICE_ATTR(start, S_IRUGO, part_start_show, NULL); 272static DEVICE_ATTR(start, S_IRUGO, part_start_show, NULL);
264static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); 273static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL);
265static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); 274static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL);
@@ -269,6 +278,7 @@ static struct device_attribute dev_attr_fail =
269#endif 278#endif
270 279
271static struct attribute *part_attrs[] = { 280static struct attribute *part_attrs[] = {
281 &dev_attr_partition.attr,
272 &dev_attr_start.attr, 282 &dev_attr_start.attr,
273 &dev_attr_size.attr, 283 &dev_attr_size.attr,
274 &dev_attr_stat.attr, 284 &dev_attr_stat.attr,
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 1beda208cbfb..1c91a176b9ae 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -79,6 +79,13 @@ struct bio {
79 79
80 unsigned int bi_size; /* residual I/O count */ 80 unsigned int bi_size; /* residual I/O count */
81 81
82 /*
83 * To keep track of the max segment size, we account for the
84 * sizes of the first and last mergeable segments in this bio.
85 */
86 unsigned int bi_seg_front_size;
87 unsigned int bi_seg_back_size;
88
82 unsigned int bi_max_vecs; /* max bvl_vecs we can hold */ 89 unsigned int bi_max_vecs; /* max bvl_vecs we can hold */
83 90
84 unsigned int bi_comp_cpu; /* completion CPU */ 91 unsigned int bi_comp_cpu; /* completion CPU */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index f3491d225268..b4fe68fe3a57 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -865,7 +865,6 @@ extern void blk_ordered_complete_seq(struct request_queue *, unsigned, int);
865extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 865extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
866extern void blk_dump_rq_flags(struct request *, char *); 866extern void blk_dump_rq_flags(struct request *, char *);
867extern void generic_unplug_device(struct request_queue *); 867extern void generic_unplug_device(struct request_queue *);
868extern void __generic_unplug_device(struct request_queue *);
869extern long nr_blockdev_pages(void); 868extern long nr_blockdev_pages(void);
870 869
871int blk_get_queue(struct request_queue *); 870int blk_get_queue(struct request_queue *);
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 3a31eb506164..bdf505d33e77 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -24,6 +24,7 @@ enum blktrace_cat {
24 BLK_TC_AHEAD = 1 << 11, /* readahead */ 24 BLK_TC_AHEAD = 1 << 11, /* readahead */
25 BLK_TC_META = 1 << 12, /* metadata */ 25 BLK_TC_META = 1 << 12, /* metadata */
26 BLK_TC_DISCARD = 1 << 13, /* discard requests */ 26 BLK_TC_DISCARD = 1 << 13, /* discard requests */
27 BLK_TC_DRV_DATA = 1 << 14, /* binary per-driver data */
27 28
28 BLK_TC_END = 1 << 15, /* only 16-bits, reminder */ 29 BLK_TC_END = 1 << 15, /* only 16-bits, reminder */
29}; 30};
@@ -51,6 +52,7 @@ enum blktrace_act {
51 __BLK_TA_BOUNCE, /* bio was bounced */ 52 __BLK_TA_BOUNCE, /* bio was bounced */
52 __BLK_TA_REMAP, /* bio was remapped */ 53 __BLK_TA_REMAP, /* bio was remapped */
53 __BLK_TA_ABORT, /* request aborted */ 54 __BLK_TA_ABORT, /* request aborted */
55 __BLK_TA_DRV_DATA, /* driver-specific binary data */
54}; 56};
55 57
56/* 58/*
@@ -82,6 +84,7 @@ enum blktrace_notify {
82#define BLK_TA_BOUNCE (__BLK_TA_BOUNCE) 84#define BLK_TA_BOUNCE (__BLK_TA_BOUNCE)
83#define BLK_TA_REMAP (__BLK_TA_REMAP | BLK_TC_ACT(BLK_TC_QUEUE)) 85#define BLK_TA_REMAP (__BLK_TA_REMAP | BLK_TC_ACT(BLK_TC_QUEUE))
84#define BLK_TA_ABORT (__BLK_TA_ABORT | BLK_TC_ACT(BLK_TC_QUEUE)) 86#define BLK_TA_ABORT (__BLK_TA_ABORT | BLK_TC_ACT(BLK_TC_QUEUE))
87#define BLK_TA_DRV_DATA (__BLK_TA_DRV_DATA | BLK_TC_ACT(BLK_TC_DRV_DATA))
85 88
86#define BLK_TN_PROCESS (__BLK_TN_PROCESS | BLK_TC_ACT(BLK_TC_NOTIFY)) 89#define BLK_TN_PROCESS (__BLK_TN_PROCESS | BLK_TC_ACT(BLK_TC_NOTIFY))
87#define BLK_TN_TIMESTAMP (__BLK_TN_TIMESTAMP | BLK_TC_ACT(BLK_TC_NOTIFY)) 90#define BLK_TN_TIMESTAMP (__BLK_TN_TIMESTAMP | BLK_TC_ACT(BLK_TC_NOTIFY))
@@ -317,6 +320,34 @@ static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio,
317 __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r); 320 __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
318} 321}
319 322
323/**
324 * blk_add_driver_data - Add binary message with driver-specific data
325 * @q: queue the io is for
326 * @rq: io request
327 * @data: driver-specific data
328 * @len: length of driver-specific data
329 *
330 * Description:
331 * Some drivers might want to write driver-specific data per request.
332 *
333 **/
334static inline void blk_add_driver_data(struct request_queue *q,
335 struct request *rq,
336 void *data, size_t len)
337{
338 struct blk_trace *bt = q->blk_trace;
339
340 if (likely(!bt))
341 return;
342
343 if (blk_pc_request(rq))
344 __blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA,
345 rq->errors, len, data);
346 else
347 __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9,
348 0, BLK_TA_DRV_DATA, rq->errors, len, data);
349}
350
320extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, 351extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
321 char __user *arg); 352 char __user *arg);
322extern int blk_trace_startstop(struct request_queue *q, int start); 353extern int blk_trace_startstop(struct request_queue *q, int start);
@@ -330,6 +361,7 @@ extern int blk_trace_remove(struct request_queue *q);
330#define blk_add_trace_generic(q, rq, rw, what) do { } while (0) 361#define blk_add_trace_generic(q, rq, rw, what) do { } while (0)
331#define blk_add_trace_pdu_int(q, what, bio, pdu) do { } while (0) 362#define blk_add_trace_pdu_int(q, what, bio, pdu) do { } while (0)
332#define blk_add_trace_remap(q, bio, dev, f, t) do {} while (0) 363#define blk_add_trace_remap(q, bio, dev, f, t) do {} while (0)
364#define blk_add_driver_data(q, rq, data, len) do {} while (0)
333#define do_blk_trace_setup(q, name, dev, buts) (-ENOTTY) 365#define do_blk_trace_setup(q, name, dev, buts) (-ENOTTY)
334#define blk_trace_setup(q, name, dev, arg) (-ENOTTY) 366#define blk_trace_setup(q, name, dev, arg) (-ENOTTY)
335#define blk_trace_startstop(q, start) (-ENOTTY) 367#define blk_trace_startstop(q, start) (-ENOTTY)
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 54b3623434ec..35a61dc60d51 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -11,6 +11,8 @@
11#include <linux/hardirq.h> 11#include <linux/hardirq.h>
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/irqflags.h> 13#include <linux/irqflags.h>
14#include <linux/smp.h>
15#include <linux/percpu.h>
14#include <asm/atomic.h> 16#include <asm/atomic.h>
15#include <asm/ptrace.h> 17#include <asm/ptrace.h>
16#include <asm/system.h> 18#include <asm/system.h>
@@ -273,6 +275,25 @@ extern void softirq_init(void);
273extern void raise_softirq_irqoff(unsigned int nr); 275extern void raise_softirq_irqoff(unsigned int nr);
274extern void raise_softirq(unsigned int nr); 276extern void raise_softirq(unsigned int nr);
275 277
278/* This is the worklist that queues up per-cpu softirq work.
279 *
280 * send_remote_sendirq() adds work to these lists, and
281 * the softirq handler itself dequeues from them. The queues
282 * are protected by disabling local cpu interrupts and they must
283 * only be accessed by the local cpu that they are for.
284 */
285DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
286
287/* Try to send a softirq to a remote cpu. If this cannot be done, the
288 * work will be queued to the local cpu.
289 */
290extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq);
291
292/* Like send_remote_softirq(), but the caller must disable local cpu interrupts
293 * and compute the current cpu, passed in as 'this_cpu'.
294 */
295extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
296 int this_cpu, int softirq);
276 297
277/* Tasklets --- multithreaded analogue of BHs. 298/* Tasklets --- multithreaded analogue of BHs.
278 299
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 66484d4a8459..2e4d58b26c06 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -7,6 +7,7 @@
7 */ 7 */
8 8
9#include <linux/errno.h> 9#include <linux/errno.h>
10#include <linux/types.h>
10#include <linux/list.h> 11#include <linux/list.h>
11#include <linux/cpumask.h> 12#include <linux/cpumask.h>
12 13
@@ -16,7 +17,8 @@ struct call_single_data {
16 struct list_head list; 17 struct list_head list;
17 void (*func) (void *info); 18 void (*func) (void *info);
18 void *info; 19 void *info;
19 unsigned int flags; 20 u16 flags;
21 u16 priv;
20}; 22};
21 23
22#ifdef CONFIG_SMP 24#ifdef CONFIG_SMP
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 37d67aa2d56f..83ba21a13bd4 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -6,6 +6,8 @@
6 * Distribute under GPLv2. 6 * Distribute under GPLv2.
7 * 7 *
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) 8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
9 *
10 * Remote softirq infrastructure is by Jens Axboe.
9 */ 11 */
10 12
11#include <linux/module.h> 13#include <linux/module.h>
@@ -474,17 +476,144 @@ void tasklet_kill(struct tasklet_struct *t)
474 476
475EXPORT_SYMBOL(tasklet_kill); 477EXPORT_SYMBOL(tasklet_kill);
476 478
479DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
480EXPORT_PER_CPU_SYMBOL(softirq_work_list);
481
482static void __local_trigger(struct call_single_data *cp, int softirq)
483{
484 struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
485
486 list_add_tail(&cp->list, head);
487
488 /* Trigger the softirq only if the list was previously empty. */
489 if (head->next == &cp->list)
490 raise_softirq_irqoff(softirq);
491}
492
493#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
494static void remote_softirq_receive(void *data)
495{
496 struct call_single_data *cp = data;
497 unsigned long flags;
498 int softirq;
499
500 softirq = cp->priv;
501
502 local_irq_save(flags);
503 __local_trigger(cp, softirq);
504 local_irq_restore(flags);
505}
506
507static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
508{
509 if (cpu_online(cpu)) {
510 cp->func = remote_softirq_receive;
511 cp->info = cp;
512 cp->flags = 0;
513 cp->priv = softirq;
514
515 __smp_call_function_single(cpu, cp);
516 return 0;
517 }
518 return 1;
519}
520#else /* CONFIG_USE_GENERIC_SMP_HELPERS */
521static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
522{
523 return 1;
524}
525#endif
526
527/**
528 * __send_remote_softirq - try to schedule softirq work on a remote cpu
529 * @cp: private SMP call function data area
530 * @cpu: the remote cpu
531 * @this_cpu: the currently executing cpu
532 * @softirq: the softirq for the work
533 *
534 * Attempt to schedule softirq work on a remote cpu. If this cannot be
535 * done, the work is instead queued up on the local cpu.
536 *
537 * Interrupts must be disabled.
538 */
539void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
540{
541 if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
542 __local_trigger(cp, softirq);
543}
544EXPORT_SYMBOL(__send_remote_softirq);
545
546/**
547 * send_remote_softirq - try to schedule softirq work on a remote cpu
548 * @cp: private SMP call function data area
549 * @cpu: the remote cpu
550 * @softirq: the softirq for the work
551 *
552 * Like __send_remote_softirq except that disabling interrupts and
553 * computing the current cpu is done for the caller.
554 */
555void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
556{
557 unsigned long flags;
558 int this_cpu;
559
560 local_irq_save(flags);
561 this_cpu = smp_processor_id();
562 __send_remote_softirq(cp, cpu, this_cpu, softirq);
563 local_irq_restore(flags);
564}
565EXPORT_SYMBOL(send_remote_softirq);
566
567static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
568 unsigned long action, void *hcpu)
569{
570 /*
571 * If a CPU goes away, splice its entries to the current CPU
572 * and trigger a run of the softirq
573 */
574 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
575 int cpu = (unsigned long) hcpu;
576 int i;
577
578 local_irq_disable();
579 for (i = 0; i < NR_SOFTIRQS; i++) {
580 struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
581 struct list_head *local_head;
582
583 if (list_empty(head))
584 continue;
585
586 local_head = &__get_cpu_var(softirq_work_list[i]);
587 list_splice_init(head, local_head);
588 raise_softirq_irqoff(i);
589 }
590 local_irq_enable();
591 }
592
593 return NOTIFY_OK;
594}
595
596static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
597 .notifier_call = remote_softirq_cpu_notify,
598};
599
477void __init softirq_init(void) 600void __init softirq_init(void)
478{ 601{
479 int cpu; 602 int cpu;
480 603
481 for_each_possible_cpu(cpu) { 604 for_each_possible_cpu(cpu) {
605 int i;
606
482 per_cpu(tasklet_vec, cpu).tail = 607 per_cpu(tasklet_vec, cpu).tail =
483 &per_cpu(tasklet_vec, cpu).head; 608 &per_cpu(tasklet_vec, cpu).head;
484 per_cpu(tasklet_hi_vec, cpu).tail = 609 per_cpu(tasklet_hi_vec, cpu).tail =
485 &per_cpu(tasklet_hi_vec, cpu).head; 610 &per_cpu(tasklet_hi_vec, cpu).head;
611 for (i = 0; i < NR_SOFTIRQS; i++)
612 INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
486 } 613 }
487 614
615 register_hotcpu_notifier(&remote_softirq_cpu_notifier);
616
488 open_softirq(TASKLET_SOFTIRQ, tasklet_action); 617 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
489 open_softirq(HI_SOFTIRQ, tasklet_hi_action); 618 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
490} 619}
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 31d784dd80d0..b0f239e443bc 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -652,6 +652,11 @@ config DEBUG_BLOCK_EXT_DEVT
652 depends on BLOCK 652 depends on BLOCK
653 default n 653 default n
654 help 654 help
655 BIG FAT WARNING: ENABLING THIS OPTION MIGHT BREAK BOOTING ON
656 SOME DISTRIBUTIONS. DO NOT ENABLE THIS UNLESS YOU KNOW WHAT
657 YOU ARE DOING. Distros, please enable this and fix whatever
658 is broken.
659
655 Conventionally, block device numbers are allocated from 660 Conventionally, block device numbers are allocated from
656 predetermined contiguous area. However, extended block area 661 predetermined contiguous area. However, extended block area
657 may introduce non-contiguous block device numbers. This 662 may introduce non-contiguous block device numbers. This