diff options
-rw-r--r-- | block/elevator.c | 3 | ||||
-rw-r--r-- | block/ll_rw_blk.c | 131 | ||||
-rw-r--r-- | block/scsi_ioctl.c | 13 | ||||
-rw-r--r-- | drivers/block/cciss.c | 72 | ||||
-rw-r--r-- | drivers/ide/ide-cd.c | 10 | ||||
-rw-r--r-- | drivers/ide/ide-io.c | 42 | ||||
-rw-r--r-- | drivers/ide/ide-probe.c | 2 | ||||
-rw-r--r-- | drivers/scsi/scsi.c | 109 | ||||
-rw-r--r-- | drivers/scsi/scsi_lib.c | 36 | ||||
-rw-r--r-- | drivers/scsi/scsi_priv.h | 1 | ||||
-rw-r--r-- | fs/bio.c | 1 | ||||
-rw-r--r-- | include/linux/blkdev.h | 22 | ||||
-rw-r--r-- | include/linux/elevator.h | 2 | ||||
-rw-r--r-- | include/linux/ide.h | 1 | ||||
-rw-r--r-- | include/linux/interrupt.h | 2 | ||||
-rw-r--r-- | include/linux/rcupdate.h | 31 | ||||
-rw-r--r-- | kernel/rcupdate.c | 30 | ||||
-rw-r--r-- | net/netlink/af_netlink.c | 2 |
18 files changed, 296 insertions, 214 deletions
diff --git a/block/elevator.c b/block/elevator.c index 39dcccc82a..99a4d7b2f8 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -64,7 +64,7 @@ inline int elv_rq_merge_ok(struct request *rq, struct bio *bio) | |||
64 | } | 64 | } |
65 | EXPORT_SYMBOL(elv_rq_merge_ok); | 65 | EXPORT_SYMBOL(elv_rq_merge_ok); |
66 | 66 | ||
67 | inline int elv_try_merge(struct request *__rq, struct bio *bio) | 67 | static inline int elv_try_merge(struct request *__rq, struct bio *bio) |
68 | { | 68 | { |
69 | int ret = ELEVATOR_NO_MERGE; | 69 | int ret = ELEVATOR_NO_MERGE; |
70 | 70 | ||
@@ -80,7 +80,6 @@ inline int elv_try_merge(struct request *__rq, struct bio *bio) | |||
80 | 80 | ||
81 | return ret; | 81 | return ret; |
82 | } | 82 | } |
83 | EXPORT_SYMBOL(elv_try_merge); | ||
84 | 83 | ||
85 | static struct elevator_type *elevator_find(const char *name) | 84 | static struct elevator_type *elevator_find(const char *name) |
86 | { | 85 | { |
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 91d3b4828c..8e27d0ab0d 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -26,7 +26,8 @@ | |||
26 | #include <linux/slab.h> | 26 | #include <linux/slab.h> |
27 | #include <linux/swap.h> | 27 | #include <linux/swap.h> |
28 | #include <linux/writeback.h> | 28 | #include <linux/writeback.h> |
29 | #include <linux/blkdev.h> | 29 | #include <linux/interrupt.h> |
30 | #include <linux/cpu.h> | ||
30 | 31 | ||
31 | /* | 32 | /* |
32 | * for max sense size | 33 | * for max sense size |
@@ -62,13 +63,15 @@ static wait_queue_head_t congestion_wqh[2] = { | |||
62 | /* | 63 | /* |
63 | * Controlling structure to kblockd | 64 | * Controlling structure to kblockd |
64 | */ | 65 | */ |
65 | static struct workqueue_struct *kblockd_workqueue; | 66 | static struct workqueue_struct *kblockd_workqueue; |
66 | 67 | ||
67 | unsigned long blk_max_low_pfn, blk_max_pfn; | 68 | unsigned long blk_max_low_pfn, blk_max_pfn; |
68 | 69 | ||
69 | EXPORT_SYMBOL(blk_max_low_pfn); | 70 | EXPORT_SYMBOL(blk_max_low_pfn); |
70 | EXPORT_SYMBOL(blk_max_pfn); | 71 | EXPORT_SYMBOL(blk_max_pfn); |
71 | 72 | ||
73 | static DEFINE_PER_CPU(struct list_head, blk_cpu_done); | ||
74 | |||
72 | /* Amount of time in which a process may batch requests */ | 75 | /* Amount of time in which a process may batch requests */ |
73 | #define BLK_BATCH_TIME (HZ/50UL) | 76 | #define BLK_BATCH_TIME (HZ/50UL) |
74 | 77 | ||
@@ -207,6 +210,13 @@ void blk_queue_merge_bvec(request_queue_t *q, merge_bvec_fn *mbfn) | |||
207 | 210 | ||
208 | EXPORT_SYMBOL(blk_queue_merge_bvec); | 211 | EXPORT_SYMBOL(blk_queue_merge_bvec); |
209 | 212 | ||
213 | void blk_queue_softirq_done(request_queue_t *q, softirq_done_fn *fn) | ||
214 | { | ||
215 | q->softirq_done_fn = fn; | ||
216 | } | ||
217 | |||
218 | EXPORT_SYMBOL(blk_queue_softirq_done); | ||
219 | |||
210 | /** | 220 | /** |
211 | * blk_queue_make_request - define an alternate make_request function for a device | 221 | * blk_queue_make_request - define an alternate make_request function for a device |
212 | * @q: the request queue for the device to be affected | 222 | * @q: the request queue for the device to be affected |
@@ -270,6 +280,7 @@ EXPORT_SYMBOL(blk_queue_make_request); | |||
270 | static inline void rq_init(request_queue_t *q, struct request *rq) | 280 | static inline void rq_init(request_queue_t *q, struct request *rq) |
271 | { | 281 | { |
272 | INIT_LIST_HEAD(&rq->queuelist); | 282 | INIT_LIST_HEAD(&rq->queuelist); |
283 | INIT_LIST_HEAD(&rq->donelist); | ||
273 | 284 | ||
274 | rq->errors = 0; | 285 | rq->errors = 0; |
275 | rq->rq_status = RQ_ACTIVE; | 286 | rq->rq_status = RQ_ACTIVE; |
@@ -286,6 +297,7 @@ static inline void rq_init(request_queue_t *q, struct request *rq) | |||
286 | rq->sense = NULL; | 297 | rq->sense = NULL; |
287 | rq->end_io = NULL; | 298 | rq->end_io = NULL; |
288 | rq->end_io_data = NULL; | 299 | rq->end_io_data = NULL; |
300 | rq->completion_data = NULL; | ||
289 | } | 301 | } |
290 | 302 | ||
291 | /** | 303 | /** |
@@ -2735,30 +2747,6 @@ static inline int attempt_front_merge(request_queue_t *q, struct request *rq) | |||
2735 | return 0; | 2747 | return 0; |
2736 | } | 2748 | } |
2737 | 2749 | ||
2738 | /** | ||
2739 | * blk_attempt_remerge - attempt to remerge active head with next request | ||
2740 | * @q: The &request_queue_t belonging to the device | ||
2741 | * @rq: The head request (usually) | ||
2742 | * | ||
2743 | * Description: | ||
2744 | * For head-active devices, the queue can easily be unplugged so quickly | ||
2745 | * that proper merging is not done on the front request. This may hurt | ||
2746 | * performance greatly for some devices. The block layer cannot safely | ||
2747 | * do merging on that first request for these queues, but the driver can | ||
2748 | * call this function and make it happen any way. Only the driver knows | ||
2749 | * when it is safe to do so. | ||
2750 | **/ | ||
2751 | void blk_attempt_remerge(request_queue_t *q, struct request *rq) | ||
2752 | { | ||
2753 | unsigned long flags; | ||
2754 | |||
2755 | spin_lock_irqsave(q->queue_lock, flags); | ||
2756 | attempt_back_merge(q, rq); | ||
2757 | spin_unlock_irqrestore(q->queue_lock, flags); | ||
2758 | } | ||
2759 | |||
2760 | EXPORT_SYMBOL(blk_attempt_remerge); | ||
2761 | |||
2762 | static void init_request_from_bio(struct request *req, struct bio *bio) | 2750 | static void init_request_from_bio(struct request *req, struct bio *bio) |
2763 | { | 2751 | { |
2764 | req->flags |= REQ_CMD; | 2752 | req->flags |= REQ_CMD; |
@@ -3287,6 +3275,87 @@ int end_that_request_chunk(struct request *req, int uptodate, int nr_bytes) | |||
3287 | EXPORT_SYMBOL(end_that_request_chunk); | 3275 | EXPORT_SYMBOL(end_that_request_chunk); |
3288 | 3276 | ||
3289 | /* | 3277 | /* |
3278 | * splice the completion data to a local structure and hand off to | ||
3279 | * process_completion_queue() to complete the requests | ||
3280 | */ | ||
3281 | static void blk_done_softirq(struct softirq_action *h) | ||
3282 | { | ||
3283 | struct list_head *cpu_list; | ||
3284 | LIST_HEAD(local_list); | ||
3285 | |||
3286 | local_irq_disable(); | ||
3287 | cpu_list = &__get_cpu_var(blk_cpu_done); | ||
3288 | list_splice_init(cpu_list, &local_list); | ||
3289 | local_irq_enable(); | ||
3290 | |||
3291 | while (!list_empty(&local_list)) { | ||
3292 | struct request *rq = list_entry(local_list.next, struct request, donelist); | ||
3293 | |||
3294 | list_del_init(&rq->donelist); | ||
3295 | rq->q->softirq_done_fn(rq); | ||
3296 | } | ||
3297 | } | ||
3298 | |||
3299 | #ifdef CONFIG_HOTPLUG_CPU | ||
3300 | |||
3301 | static int blk_cpu_notify(struct notifier_block *self, unsigned long action, | ||
3302 | void *hcpu) | ||
3303 | { | ||
3304 | /* | ||
3305 | * If a CPU goes away, splice its entries to the current CPU | ||
3306 | * and trigger a run of the softirq | ||
3307 | */ | ||
3308 | if (action == CPU_DEAD) { | ||
3309 | int cpu = (unsigned long) hcpu; | ||
3310 | |||
3311 | local_irq_disable(); | ||
3312 | list_splice_init(&per_cpu(blk_cpu_done, cpu), | ||
3313 | &__get_cpu_var(blk_cpu_done)); | ||
3314 | raise_softirq_irqoff(BLOCK_SOFTIRQ); | ||
3315 | local_irq_enable(); | ||
3316 | } | ||
3317 | |||
3318 | return NOTIFY_OK; | ||
3319 | } | ||
3320 | |||
3321 | |||
3322 | static struct notifier_block __devinitdata blk_cpu_notifier = { | ||
3323 | .notifier_call = blk_cpu_notify, | ||
3324 | }; | ||
3325 | |||
3326 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
3327 | |||
3328 | /** | ||
3329 | * blk_complete_request - end I/O on a request | ||
3330 | * @req: the request being processed | ||
3331 | * | ||
3332 | * Description: | ||
3333 | * Ends all I/O on a request. It does not handle partial completions, | ||
3334 | * unless the driver actually implements this in its completionc callback | ||
3335 | * through requeueing. Theh actual completion happens out-of-order, | ||
3336 | * through a softirq handler. The user must have registered a completion | ||
3337 | * callback through blk_queue_softirq_done(). | ||
3338 | **/ | ||
3339 | |||
3340 | void blk_complete_request(struct request *req) | ||
3341 | { | ||
3342 | struct list_head *cpu_list; | ||
3343 | unsigned long flags; | ||
3344 | |||
3345 | BUG_ON(!req->q->softirq_done_fn); | ||
3346 | |||
3347 | local_irq_save(flags); | ||
3348 | |||
3349 | cpu_list = &__get_cpu_var(blk_cpu_done); | ||
3350 | list_add_tail(&req->donelist, cpu_list); | ||
3351 | raise_softirq_irqoff(BLOCK_SOFTIRQ); | ||
3352 | |||
3353 | local_irq_restore(flags); | ||
3354 | } | ||
3355 | |||
3356 | EXPORT_SYMBOL(blk_complete_request); | ||
3357 | |||
3358 | /* | ||
3290 | * queue lock must be held | 3359 | * queue lock must be held |
3291 | */ | 3360 | */ |
3292 | void end_that_request_last(struct request *req, int uptodate) | 3361 | void end_that_request_last(struct request *req, int uptodate) |
@@ -3364,6 +3433,8 @@ EXPORT_SYMBOL(kblockd_flush); | |||
3364 | 3433 | ||
3365 | int __init blk_dev_init(void) | 3434 | int __init blk_dev_init(void) |
3366 | { | 3435 | { |
3436 | int i; | ||
3437 | |||
3367 | kblockd_workqueue = create_workqueue("kblockd"); | 3438 | kblockd_workqueue = create_workqueue("kblockd"); |
3368 | if (!kblockd_workqueue) | 3439 | if (!kblockd_workqueue) |
3369 | panic("Failed to create kblockd\n"); | 3440 | panic("Failed to create kblockd\n"); |
@@ -3377,6 +3448,14 @@ int __init blk_dev_init(void) | |||
3377 | iocontext_cachep = kmem_cache_create("blkdev_ioc", | 3448 | iocontext_cachep = kmem_cache_create("blkdev_ioc", |
3378 | sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL); | 3449 | sizeof(struct io_context), 0, SLAB_PANIC, NULL, NULL); |
3379 | 3450 | ||
3451 | for (i = 0; i < NR_CPUS; i++) | ||
3452 | INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); | ||
3453 | |||
3454 | open_softirq(BLOCK_SOFTIRQ, blk_done_softirq, NULL); | ||
3455 | #ifdef CONFIG_HOTPLUG_CPU | ||
3456 | register_cpu_notifier(&blk_cpu_notifier); | ||
3457 | #endif | ||
3458 | |||
3380 | blk_max_low_pfn = max_low_pfn; | 3459 | blk_max_low_pfn = max_low_pfn; |
3381 | blk_max_pfn = max_pfn; | 3460 | blk_max_pfn = max_pfn; |
3382 | 3461 | ||
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index c2ac36dfe4..18de84c8cc 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c | |||
@@ -190,16 +190,21 @@ static int verify_command(struct file *file, unsigned char *cmd) | |||
190 | safe_for_write(GPCMD_SET_STREAMING), | 190 | safe_for_write(GPCMD_SET_STREAMING), |
191 | }; | 191 | }; |
192 | unsigned char type = cmd_type[cmd[0]]; | 192 | unsigned char type = cmd_type[cmd[0]]; |
193 | int has_write_perm = 0; | ||
193 | 194 | ||
194 | /* Anybody who can open the device can do a read-safe command */ | 195 | /* Anybody who can open the device can do a read-safe command */ |
195 | if (type & CMD_READ_SAFE) | 196 | if (type & CMD_READ_SAFE) |
196 | return 0; | 197 | return 0; |
197 | 198 | ||
199 | /* | ||
200 | * file can be NULL from ioctl_by_bdev()... | ||
201 | */ | ||
202 | if (file) | ||
203 | has_write_perm = file->f_mode & FMODE_WRITE; | ||
204 | |||
198 | /* Write-safe commands just require a writable open.. */ | 205 | /* Write-safe commands just require a writable open.. */ |
199 | if (type & CMD_WRITE_SAFE) { | 206 | if ((type & CMD_WRITE_SAFE) && has_write_perm) |
200 | if (file->f_mode & FMODE_WRITE) | 207 | return 0; |
201 | return 0; | ||
202 | } | ||
203 | 208 | ||
204 | /* And root can do any command.. */ | 209 | /* And root can do any command.. */ |
205 | if (capable(CAP_SYS_RAWIO)) | 210 | if (capable(CAP_SYS_RAWIO)) |
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 88452c79fb..e4e9f255bd 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c | |||
@@ -2178,16 +2178,48 @@ static inline void resend_cciss_cmd( ctlr_info_t *h, CommandList_struct *c) | |||
2178 | 2178 | ||
2179 | start_io(h); | 2179 | start_io(h); |
2180 | } | 2180 | } |
2181 | |||
2182 | static void cciss_softirq_done(struct request *rq) | ||
2183 | { | ||
2184 | CommandList_struct *cmd = rq->completion_data; | ||
2185 | ctlr_info_t *h = hba[cmd->ctlr]; | ||
2186 | u64bit temp64; | ||
2187 | int i, ddir; | ||
2188 | |||
2189 | if (cmd->Request.Type.Direction == XFER_READ) | ||
2190 | ddir = PCI_DMA_FROMDEVICE; | ||
2191 | else | ||
2192 | ddir = PCI_DMA_TODEVICE; | ||
2193 | |||
2194 | /* command did not need to be retried */ | ||
2195 | /* unmap the DMA mapping for all the scatter gather elements */ | ||
2196 | for(i=0; i<cmd->Header.SGList; i++) { | ||
2197 | temp64.val32.lower = cmd->SG[i].Addr.lower; | ||
2198 | temp64.val32.upper = cmd->SG[i].Addr.upper; | ||
2199 | pci_unmap_page(h->pdev, temp64.val, cmd->SG[i].Len, ddir); | ||
2200 | } | ||
2201 | |||
2202 | complete_buffers(rq->bio, rq->errors); | ||
2203 | |||
2204 | #ifdef CCISS_DEBUG | ||
2205 | printk("Done with %p\n", rq); | ||
2206 | #endif /* CCISS_DEBUG */ | ||
2207 | |||
2208 | spin_lock_irq(&h->lock); | ||
2209 | end_that_request_last(rq, rq->errors); | ||
2210 | cmd_free(h, cmd,1); | ||
2211 | spin_unlock_irq(&h->lock); | ||
2212 | } | ||
2213 | |||
2181 | /* checks the status of the job and calls complete buffers to mark all | 2214 | /* checks the status of the job and calls complete buffers to mark all |
2182 | * buffers for the completed job. | 2215 | * buffers for the completed job. Note that this function does not need |
2216 | * to hold the hba/queue lock. | ||
2183 | */ | 2217 | */ |
2184 | static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd, | 2218 | static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd, |
2185 | int timeout) | 2219 | int timeout) |
2186 | { | 2220 | { |
2187 | int status = 1; | 2221 | int status = 1; |
2188 | int i; | ||
2189 | int retry_cmd = 0; | 2222 | int retry_cmd = 0; |
2190 | u64bit temp64; | ||
2191 | 2223 | ||
2192 | if (timeout) | 2224 | if (timeout) |
2193 | status = 0; | 2225 | status = 0; |
@@ -2295,24 +2327,10 @@ static inline void complete_command( ctlr_info_t *h, CommandList_struct *cmd, | |||
2295 | resend_cciss_cmd(h,cmd); | 2327 | resend_cciss_cmd(h,cmd); |
2296 | return; | 2328 | return; |
2297 | } | 2329 | } |
2298 | /* command did not need to be retried */ | ||
2299 | /* unmap the DMA mapping for all the scatter gather elements */ | ||
2300 | for(i=0; i<cmd->Header.SGList; i++) { | ||
2301 | temp64.val32.lower = cmd->SG[i].Addr.lower; | ||
2302 | temp64.val32.upper = cmd->SG[i].Addr.upper; | ||
2303 | pci_unmap_page(hba[cmd->ctlr]->pdev, | ||
2304 | temp64.val, cmd->SG[i].Len, | ||
2305 | (cmd->Request.Type.Direction == XFER_READ) ? | ||
2306 | PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); | ||
2307 | } | ||
2308 | complete_buffers(cmd->rq->bio, status); | ||
2309 | |||
2310 | #ifdef CCISS_DEBUG | ||
2311 | printk("Done with %p\n", cmd->rq); | ||
2312 | #endif /* CCISS_DEBUG */ | ||
2313 | 2330 | ||
2314 | end_that_request_last(cmd->rq, status ? 1 : -EIO); | 2331 | cmd->rq->completion_data = cmd; |
2315 | cmd_free(h,cmd,1); | 2332 | cmd->rq->errors = status; |
2333 | blk_complete_request(cmd->rq); | ||
2316 | } | 2334 | } |
2317 | 2335 | ||
2318 | /* | 2336 | /* |
@@ -3199,15 +3217,17 @@ static int __devinit cciss_init_one(struct pci_dev *pdev, | |||
3199 | drv->queue = q; | 3217 | drv->queue = q; |
3200 | 3218 | ||
3201 | q->backing_dev_info.ra_pages = READ_AHEAD; | 3219 | q->backing_dev_info.ra_pages = READ_AHEAD; |
3202 | blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask); | 3220 | blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask); |
3221 | |||
3222 | /* This is a hardware imposed limit. */ | ||
3223 | blk_queue_max_hw_segments(q, MAXSGENTRIES); | ||
3203 | 3224 | ||
3204 | /* This is a hardware imposed limit. */ | 3225 | /* This is a limit in the driver and could be eliminated. */ |
3205 | blk_queue_max_hw_segments(q, MAXSGENTRIES); | 3226 | blk_queue_max_phys_segments(q, MAXSGENTRIES); |
3206 | 3227 | ||
3207 | /* This is a limit in the driver and could be eliminated. */ | 3228 | blk_queue_max_sectors(q, 512); |
3208 | blk_queue_max_phys_segments(q, MAXSGENTRIES); | ||
3209 | 3229 | ||
3210 | blk_queue_max_sectors(q, 512); | 3230 | blk_queue_softirq_done(q, cciss_softirq_done); |
3211 | 3231 | ||
3212 | q->queuedata = hba[i]; | 3232 | q->queuedata = hba[i]; |
3213 | sprintf(disk->disk_name, "cciss/c%dd%d", i, j); | 3233 | sprintf(disk->disk_name, "cciss/c%dd%d", i, j); |
diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c index d31117eb95..e4d55ad32d 100644 --- a/drivers/ide/ide-cd.c +++ b/drivers/ide/ide-cd.c | |||
@@ -1332,8 +1332,6 @@ static ide_startstop_t cdrom_start_read (ide_drive_t *drive, unsigned int block) | |||
1332 | if (cdrom_read_from_buffer(drive)) | 1332 | if (cdrom_read_from_buffer(drive)) |
1333 | return ide_stopped; | 1333 | return ide_stopped; |
1334 | 1334 | ||
1335 | blk_attempt_remerge(drive->queue, rq); | ||
1336 | |||
1337 | /* Clear the local sector buffer. */ | 1335 | /* Clear the local sector buffer. */ |
1338 | info->nsectors_buffered = 0; | 1336 | info->nsectors_buffered = 0; |
1339 | 1337 | ||
@@ -1874,14 +1872,6 @@ static ide_startstop_t cdrom_start_write(ide_drive_t *drive, struct request *rq) | |||
1874 | return ide_stopped; | 1872 | return ide_stopped; |
1875 | } | 1873 | } |
1876 | 1874 | ||
1877 | /* | ||
1878 | * for dvd-ram and such media, it's a really big deal to get | ||
1879 | * big writes all the time. so scour the queue and attempt to | ||
1880 | * remerge requests, often the plugging will not have had time | ||
1881 | * to do this properly | ||
1882 | */ | ||
1883 | blk_attempt_remerge(drive->queue, rq); | ||
1884 | |||
1885 | info->nsectors_buffered = 0; | 1875 | info->nsectors_buffered = 0; |
1886 | 1876 | ||
1887 | /* use dma, if possible. we don't need to check more, since we | 1877 | /* use dma, if possible. we don't need to check more, since we |
diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index b5dc6df8e6..dea2d4dcc6 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c | |||
@@ -55,9 +55,22 @@ | |||
55 | #include <asm/io.h> | 55 | #include <asm/io.h> |
56 | #include <asm/bitops.h> | 56 | #include <asm/bitops.h> |
57 | 57 | ||
58 | void ide_softirq_done(struct request *rq) | ||
59 | { | ||
60 | request_queue_t *q = rq->q; | ||
61 | |||
62 | add_disk_randomness(rq->rq_disk); | ||
63 | end_that_request_chunk(rq, rq->errors, rq->data_len); | ||
64 | |||
65 | spin_lock_irq(q->queue_lock); | ||
66 | end_that_request_last(rq, rq->errors); | ||
67 | spin_unlock_irq(q->queue_lock); | ||
68 | } | ||
69 | |||
58 | int __ide_end_request(ide_drive_t *drive, struct request *rq, int uptodate, | 70 | int __ide_end_request(ide_drive_t *drive, struct request *rq, int uptodate, |
59 | int nr_sectors) | 71 | int nr_sectors) |
60 | { | 72 | { |
73 | unsigned int nbytes; | ||
61 | int ret = 1; | 74 | int ret = 1; |
62 | 75 | ||
63 | BUG_ON(!(rq->flags & REQ_STARTED)); | 76 | BUG_ON(!(rq->flags & REQ_STARTED)); |
@@ -81,17 +94,28 @@ int __ide_end_request(ide_drive_t *drive, struct request *rq, int uptodate, | |||
81 | HWGROUP(drive)->hwif->ide_dma_on(drive); | 94 | HWGROUP(drive)->hwif->ide_dma_on(drive); |
82 | } | 95 | } |
83 | 96 | ||
84 | if (!end_that_request_first(rq, uptodate, nr_sectors)) { | 97 | /* |
85 | add_disk_randomness(rq->rq_disk); | 98 | * For partial completions (or non fs/pc requests), use the regular |
86 | 99 | * direct completion path. | |
87 | if (blk_rq_tagged(rq)) | 100 | */ |
88 | blk_queue_end_tag(drive->queue, rq); | 101 | nbytes = nr_sectors << 9; |
89 | 102 | if (rq_all_done(rq, nbytes)) { | |
103 | rq->errors = uptodate; | ||
104 | rq->data_len = nbytes; | ||
90 | blkdev_dequeue_request(rq); | 105 | blkdev_dequeue_request(rq); |
91 | HWGROUP(drive)->rq = NULL; | 106 | HWGROUP(drive)->rq = NULL; |
92 | end_that_request_last(rq, uptodate); | 107 | blk_complete_request(rq); |
93 | ret = 0; | 108 | ret = 0; |
109 | } else { | ||
110 | if (!end_that_request_first(rq, uptodate, nr_sectors)) { | ||
111 | add_disk_randomness(rq->rq_disk); | ||
112 | blkdev_dequeue_request(rq); | ||
113 | HWGROUP(drive)->rq = NULL; | ||
114 | end_that_request_last(rq, uptodate); | ||
115 | ret = 0; | ||
116 | } | ||
94 | } | 117 | } |
118 | |||
95 | return ret; | 119 | return ret; |
96 | } | 120 | } |
97 | EXPORT_SYMBOL(__ide_end_request); | 121 | EXPORT_SYMBOL(__ide_end_request); |
@@ -113,6 +137,10 @@ int ide_end_request (ide_drive_t *drive, int uptodate, int nr_sectors) | |||
113 | unsigned long flags; | 137 | unsigned long flags; |
114 | int ret = 1; | 138 | int ret = 1; |
115 | 139 | ||
140 | /* | ||
141 | * room for locking improvements here, the calls below don't | ||
142 | * need the queue lock held at all | ||
143 | */ | ||
116 | spin_lock_irqsave(&ide_lock, flags); | 144 | spin_lock_irqsave(&ide_lock, flags); |
117 | rq = HWGROUP(drive)->rq; | 145 | rq = HWGROUP(drive)->rq; |
118 | 146 | ||
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index 02167a5b75..1ddaa71a8f 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c | |||
@@ -1011,6 +1011,8 @@ static int ide_init_queue(ide_drive_t *drive) | |||
1011 | blk_queue_max_hw_segments(q, max_sg_entries); | 1011 | blk_queue_max_hw_segments(q, max_sg_entries); |
1012 | blk_queue_max_phys_segments(q, max_sg_entries); | 1012 | blk_queue_max_phys_segments(q, max_sg_entries); |
1013 | 1013 | ||
1014 | blk_queue_softirq_done(q, ide_softirq_done); | ||
1015 | |||
1014 | /* assign drive queue */ | 1016 | /* assign drive queue */ |
1015 | drive->queue = q; | 1017 | drive->queue = q; |
1016 | 1018 | ||
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 180676d711..ee5f4dfdab 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c | |||
@@ -69,7 +69,6 @@ | |||
69 | #include "scsi_logging.h" | 69 | #include "scsi_logging.h" |
70 | 70 | ||
71 | static void scsi_done(struct scsi_cmnd *cmd); | 71 | static void scsi_done(struct scsi_cmnd *cmd); |
72 | static int scsi_retry_command(struct scsi_cmnd *cmd); | ||
73 | 72 | ||
74 | /* | 73 | /* |
75 | * Definitions and constants. | 74 | * Definitions and constants. |
@@ -752,7 +751,7 @@ static void scsi_done(struct scsi_cmnd *cmd) | |||
752 | * isn't running --- used by scsi_times_out */ | 751 | * isn't running --- used by scsi_times_out */ |
753 | void __scsi_done(struct scsi_cmnd *cmd) | 752 | void __scsi_done(struct scsi_cmnd *cmd) |
754 | { | 753 | { |
755 | unsigned long flags; | 754 | struct request *rq = cmd->request; |
756 | 755 | ||
757 | /* | 756 | /* |
758 | * Set the serial numbers back to zero | 757 | * Set the serial numbers back to zero |
@@ -763,71 +762,14 @@ void __scsi_done(struct scsi_cmnd *cmd) | |||
763 | if (cmd->result) | 762 | if (cmd->result) |
764 | atomic_inc(&cmd->device->ioerr_cnt); | 763 | atomic_inc(&cmd->device->ioerr_cnt); |
765 | 764 | ||
765 | BUG_ON(!rq); | ||
766 | |||
766 | /* | 767 | /* |
767 | * Next, enqueue the command into the done queue. | 768 | * The uptodate/nbytes values don't matter, as we allow partial |
768 | * It is a per-CPU queue, so we just disable local interrupts | 769 | * completes and thus will check this in the softirq callback |
769 | * and need no spinlock. | ||
770 | */ | 770 | */ |
771 | local_irq_save(flags); | 771 | rq->completion_data = cmd; |
772 | list_add_tail(&cmd->eh_entry, &__get_cpu_var(scsi_done_q)); | 772 | blk_complete_request(rq); |
773 | raise_softirq_irqoff(SCSI_SOFTIRQ); | ||
774 | local_irq_restore(flags); | ||
775 | } | ||
776 | |||
777 | /** | ||
778 | * scsi_softirq - Perform post-interrupt processing of finished SCSI commands. | ||
779 | * | ||
780 | * This is the consumer of the done queue. | ||
781 | * | ||
782 | * This is called with all interrupts enabled. This should reduce | ||
783 | * interrupt latency, stack depth, and reentrancy of the low-level | ||
784 | * drivers. | ||
785 | */ | ||
786 | static void scsi_softirq(struct softirq_action *h) | ||
787 | { | ||
788 | int disposition; | ||
789 | LIST_HEAD(local_q); | ||
790 | |||
791 | local_irq_disable(); | ||
792 | list_splice_init(&__get_cpu_var(scsi_done_q), &local_q); | ||
793 | local_irq_enable(); | ||
794 | |||
795 | while (!list_empty(&local_q)) { | ||
796 | struct scsi_cmnd *cmd = list_entry(local_q.next, | ||
797 | struct scsi_cmnd, eh_entry); | ||
798 | /* The longest time any command should be outstanding is the | ||
799 | * per command timeout multiplied by the number of retries. | ||
800 | * | ||
801 | * For a typical command, this is 2.5 minutes */ | ||
802 | unsigned long wait_for | ||
803 | = cmd->allowed * cmd->timeout_per_command; | ||
804 | list_del_init(&cmd->eh_entry); | ||
805 | |||
806 | disposition = scsi_decide_disposition(cmd); | ||
807 | if (disposition != SUCCESS && | ||
808 | time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { | ||
809 | sdev_printk(KERN_ERR, cmd->device, | ||
810 | "timing out command, waited %lus\n", | ||
811 | wait_for/HZ); | ||
812 | disposition = SUCCESS; | ||
813 | } | ||
814 | |||
815 | scsi_log_completion(cmd, disposition); | ||
816 | switch (disposition) { | ||
817 | case SUCCESS: | ||
818 | scsi_finish_command(cmd); | ||
819 | break; | ||
820 | case NEEDS_RETRY: | ||
821 | scsi_retry_command(cmd); | ||
822 | break; | ||
823 | case ADD_TO_MLQUEUE: | ||
824 | scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); | ||
825 | break; | ||
826 | default: | ||
827 | if (!scsi_eh_scmd_add(cmd, 0)) | ||
828 | scsi_finish_command(cmd); | ||
829 | } | ||
830 | } | ||
831 | } | 773 | } |
832 | 774 | ||
833 | /* | 775 | /* |
@@ -840,7 +782,7 @@ static void scsi_softirq(struct softirq_action *h) | |||
840 | * level drivers should not become re-entrant as a result of | 782 | * level drivers should not become re-entrant as a result of |
841 | * this. | 783 | * this. |
842 | */ | 784 | */ |
843 | static int scsi_retry_command(struct scsi_cmnd *cmd) | 785 | int scsi_retry_command(struct scsi_cmnd *cmd) |
844 | { | 786 | { |
845 | /* | 787 | /* |
846 | * Restore the SCSI command state. | 788 | * Restore the SCSI command state. |
@@ -1273,38 +1215,6 @@ int scsi_device_cancel(struct scsi_device *sdev, int recovery) | |||
1273 | } | 1215 | } |
1274 | EXPORT_SYMBOL(scsi_device_cancel); | 1216 | EXPORT_SYMBOL(scsi_device_cancel); |
1275 | 1217 | ||
1276 | #ifdef CONFIG_HOTPLUG_CPU | ||
1277 | static int scsi_cpu_notify(struct notifier_block *self, | ||
1278 | unsigned long action, void *hcpu) | ||
1279 | { | ||
1280 | int cpu = (unsigned long)hcpu; | ||
1281 | |||
1282 | switch(action) { | ||
1283 | case CPU_DEAD: | ||
1284 | /* Drain scsi_done_q. */ | ||
1285 | local_irq_disable(); | ||
1286 | list_splice_init(&per_cpu(scsi_done_q, cpu), | ||
1287 | &__get_cpu_var(scsi_done_q)); | ||
1288 | raise_softirq_irqoff(SCSI_SOFTIRQ); | ||
1289 | local_irq_enable(); | ||
1290 | break; | ||
1291 | default: | ||
1292 | break; | ||
1293 | } | ||
1294 | return NOTIFY_OK; | ||
1295 | } | ||
1296 | |||
1297 | static struct notifier_block __devinitdata scsi_cpu_nb = { | ||
1298 | .notifier_call = scsi_cpu_notify, | ||
1299 | }; | ||
1300 | |||
1301 | #define register_scsi_cpu() register_cpu_notifier(&scsi_cpu_nb) | ||
1302 | #define unregister_scsi_cpu() unregister_cpu_notifier(&scsi_cpu_nb) | ||
1303 | #else | ||
1304 | #define register_scsi_cpu() | ||
1305 | #define unregister_scsi_cpu() | ||
1306 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
1307 | |||
1308 | MODULE_DESCRIPTION("SCSI core"); | 1218 | MODULE_DESCRIPTION("SCSI core"); |
1309 | MODULE_LICENSE("GPL"); | 1219 | MODULE_LICENSE("GPL"); |
1310 | 1220 | ||
@@ -1338,8 +1248,6 @@ static int __init init_scsi(void) | |||
1338 | INIT_LIST_HEAD(&per_cpu(scsi_done_q, i)); | 1248 | INIT_LIST_HEAD(&per_cpu(scsi_done_q, i)); |
1339 | 1249 | ||
1340 | devfs_mk_dir("scsi"); | 1250 | devfs_mk_dir("scsi"); |
1341 | open_softirq(SCSI_SOFTIRQ, scsi_softirq, NULL); | ||
1342 | register_scsi_cpu(); | ||
1343 | printk(KERN_NOTICE "SCSI subsystem initialized\n"); | 1251 | printk(KERN_NOTICE "SCSI subsystem initialized\n"); |
1344 | return 0; | 1252 | return 0; |
1345 | 1253 | ||
@@ -1367,7 +1275,6 @@ static void __exit exit_scsi(void) | |||
1367 | devfs_remove("scsi"); | 1275 | devfs_remove("scsi"); |
1368 | scsi_exit_procfs(); | 1276 | scsi_exit_procfs(); |
1369 | scsi_exit_queue(); | 1277 | scsi_exit_queue(); |
1370 | unregister_scsi_cpu(); | ||
1371 | } | 1278 | } |
1372 | 1279 | ||
1373 | subsys_initcall(init_scsi); | 1280 | subsys_initcall(init_scsi); |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index ba93d6e66d..00c9bf383e 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -1493,6 +1493,41 @@ static void scsi_kill_request(struct request *req, request_queue_t *q) | |||
1493 | __scsi_done(cmd); | 1493 | __scsi_done(cmd); |
1494 | } | 1494 | } |
1495 | 1495 | ||
1496 | static void scsi_softirq_done(struct request *rq) | ||
1497 | { | ||
1498 | struct scsi_cmnd *cmd = rq->completion_data; | ||
1499 | unsigned long wait_for = cmd->allowed * cmd->timeout_per_command; | ||
1500 | int disposition; | ||
1501 | |||
1502 | INIT_LIST_HEAD(&cmd->eh_entry); | ||
1503 | |||
1504 | disposition = scsi_decide_disposition(cmd); | ||
1505 | if (disposition != SUCCESS && | ||
1506 | time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { | ||
1507 | sdev_printk(KERN_ERR, cmd->device, | ||
1508 | "timing out command, waited %lus\n", | ||
1509 | wait_for/HZ); | ||
1510 | disposition = SUCCESS; | ||
1511 | } | ||
1512 | |||
1513 | scsi_log_completion(cmd, disposition); | ||
1514 | |||
1515 | switch (disposition) { | ||
1516 | case SUCCESS: | ||
1517 | scsi_finish_command(cmd); | ||
1518 | break; | ||
1519 | case NEEDS_RETRY: | ||
1520 | scsi_retry_command(cmd); | ||
1521 | break; | ||
1522 | case ADD_TO_MLQUEUE: | ||
1523 | scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); | ||
1524 | break; | ||
1525 | default: | ||
1526 | if (!scsi_eh_scmd_add(cmd, 0)) | ||
1527 | scsi_finish_command(cmd); | ||
1528 | } | ||
1529 | } | ||
1530 | |||
1496 | /* | 1531 | /* |
1497 | * Function: scsi_request_fn() | 1532 | * Function: scsi_request_fn() |
1498 | * | 1533 | * |
@@ -1667,6 +1702,7 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) | |||
1667 | blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); | 1702 | blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); |
1668 | blk_queue_segment_boundary(q, shost->dma_boundary); | 1703 | blk_queue_segment_boundary(q, shost->dma_boundary); |
1669 | blk_queue_issue_flush_fn(q, scsi_issue_flush_fn); | 1704 | blk_queue_issue_flush_fn(q, scsi_issue_flush_fn); |
1705 | blk_queue_softirq_done(q, scsi_softirq_done); | ||
1670 | 1706 | ||
1671 | if (!shost->use_clustering) | 1707 | if (!shost->use_clustering) |
1672 | clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); | 1708 | clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); |
diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h index f04e7e11f5..14a6198cb8 100644 --- a/drivers/scsi/scsi_priv.h +++ b/drivers/scsi/scsi_priv.h | |||
@@ -44,6 +44,7 @@ extern void scsi_init_cmd_from_req(struct scsi_cmnd *cmd, | |||
44 | struct scsi_request *sreq); | 44 | struct scsi_request *sreq); |
45 | extern void __scsi_release_request(struct scsi_request *sreq); | 45 | extern void __scsi_release_request(struct scsi_request *sreq); |
46 | extern void __scsi_done(struct scsi_cmnd *cmd); | 46 | extern void __scsi_done(struct scsi_cmnd *cmd); |
47 | extern int scsi_retry_command(struct scsi_cmnd *cmd); | ||
47 | #ifdef CONFIG_SCSI_LOGGING | 48 | #ifdef CONFIG_SCSI_LOGGING |
48 | void scsi_log_send(struct scsi_cmnd *cmd); | 49 | void scsi_log_send(struct scsi_cmnd *cmd); |
49 | void scsi_log_completion(struct scsi_cmnd *cmd, int disposition); | 50 | void scsi_log_completion(struct scsi_cmnd *cmd, int disposition); |
@@ -126,6 +126,7 @@ static void bio_fs_destructor(struct bio *bio) | |||
126 | inline void bio_init(struct bio *bio) | 126 | inline void bio_init(struct bio *bio) |
127 | { | 127 | { |
128 | bio->bi_next = NULL; | 128 | bio->bi_next = NULL; |
129 | bio->bi_bdev = NULL; | ||
129 | bio->bi_flags = 1 << BIO_UPTODATE; | 130 | bio->bi_flags = 1 << BIO_UPTODATE; |
130 | bio->bi_rw = 0; | 131 | bio->bi_rw = 0; |
131 | bio->bi_vcnt = 0; | 132 | bio->bi_vcnt = 0; |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index fb09853774..02a585faa6 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -118,9 +118,9 @@ struct request_list { | |||
118 | * try to put the fields that are referenced together in the same cacheline | 118 | * try to put the fields that are referenced together in the same cacheline |
119 | */ | 119 | */ |
120 | struct request { | 120 | struct request { |
121 | struct list_head queuelist; /* looking for ->queue? you must _not_ | 121 | struct list_head queuelist; |
122 | * access it directly, use | 122 | struct list_head donelist; |
123 | * blkdev_dequeue_request! */ | 123 | |
124 | unsigned long flags; /* see REQ_ bits below */ | 124 | unsigned long flags; /* see REQ_ bits below */ |
125 | 125 | ||
126 | /* Maintain bio traversal state for part by part I/O submission. | 126 | /* Maintain bio traversal state for part by part I/O submission. |
@@ -141,6 +141,7 @@ struct request { | |||
141 | struct bio *biotail; | 141 | struct bio *biotail; |
142 | 142 | ||
143 | void *elevator_private; | 143 | void *elevator_private; |
144 | void *completion_data; | ||
144 | 145 | ||
145 | unsigned short ioprio; | 146 | unsigned short ioprio; |
146 | 147 | ||
@@ -291,6 +292,7 @@ typedef int (merge_bvec_fn) (request_queue_t *, struct bio *, struct bio_vec *); | |||
291 | typedef void (activity_fn) (void *data, int rw); | 292 | typedef void (activity_fn) (void *data, int rw); |
292 | typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *); | 293 | typedef int (issue_flush_fn) (request_queue_t *, struct gendisk *, sector_t *); |
293 | typedef void (prepare_flush_fn) (request_queue_t *, struct request *); | 294 | typedef void (prepare_flush_fn) (request_queue_t *, struct request *); |
295 | typedef void (softirq_done_fn)(struct request *); | ||
294 | 296 | ||
295 | enum blk_queue_state { | 297 | enum blk_queue_state { |
296 | Queue_down, | 298 | Queue_down, |
@@ -332,6 +334,7 @@ struct request_queue | |||
332 | activity_fn *activity_fn; | 334 | activity_fn *activity_fn; |
333 | issue_flush_fn *issue_flush_fn; | 335 | issue_flush_fn *issue_flush_fn; |
334 | prepare_flush_fn *prepare_flush_fn; | 336 | prepare_flush_fn *prepare_flush_fn; |
337 | softirq_done_fn *softirq_done_fn; | ||
335 | 338 | ||
336 | /* | 339 | /* |
337 | * Dispatch queue sorting | 340 | * Dispatch queue sorting |
@@ -592,7 +595,6 @@ extern void generic_make_request(struct bio *bio); | |||
592 | extern void blk_put_request(struct request *); | 595 | extern void blk_put_request(struct request *); |
593 | extern void __blk_put_request(request_queue_t *, struct request *); | 596 | extern void __blk_put_request(request_queue_t *, struct request *); |
594 | extern void blk_end_sync_rq(struct request *rq, int error); | 597 | extern void blk_end_sync_rq(struct request *rq, int error); |
595 | extern void blk_attempt_remerge(request_queue_t *, struct request *); | ||
596 | extern struct request *blk_get_request(request_queue_t *, int, gfp_t); | 598 | extern struct request *blk_get_request(request_queue_t *, int, gfp_t); |
597 | extern void blk_insert_request(request_queue_t *, struct request *, int, void *); | 599 | extern void blk_insert_request(request_queue_t *, struct request *, int, void *); |
598 | extern void blk_requeue_request(request_queue_t *, struct request *); | 600 | extern void blk_requeue_request(request_queue_t *, struct request *); |
@@ -646,6 +648,17 @@ extern int end_that_request_first(struct request *, int, int); | |||
646 | extern int end_that_request_chunk(struct request *, int, int); | 648 | extern int end_that_request_chunk(struct request *, int, int); |
647 | extern void end_that_request_last(struct request *, int); | 649 | extern void end_that_request_last(struct request *, int); |
648 | extern void end_request(struct request *req, int uptodate); | 650 | extern void end_request(struct request *req, int uptodate); |
651 | extern void blk_complete_request(struct request *); | ||
652 | |||
653 | static inline int rq_all_done(struct request *rq, unsigned int nr_bytes) | ||
654 | { | ||
655 | if (blk_fs_request(rq)) | ||
656 | return (nr_bytes >= (rq->hard_nr_sectors << 9)); | ||
657 | else if (blk_pc_request(rq)) | ||
658 | return nr_bytes >= rq->data_len; | ||
659 | |||
660 | return 0; | ||
661 | } | ||
649 | 662 | ||
650 | /* | 663 | /* |
651 | * end_that_request_first/chunk() takes an uptodate argument. we account | 664 | * end_that_request_first/chunk() takes an uptodate argument. we account |
@@ -694,6 +707,7 @@ extern void blk_queue_segment_boundary(request_queue_t *, unsigned long); | |||
694 | extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn); | 707 | extern void blk_queue_prep_rq(request_queue_t *, prep_rq_fn *pfn); |
695 | extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *); | 708 | extern void blk_queue_merge_bvec(request_queue_t *, merge_bvec_fn *); |
696 | extern void blk_queue_dma_alignment(request_queue_t *, int); | 709 | extern void blk_queue_dma_alignment(request_queue_t *, int); |
710 | extern void blk_queue_softirq_done(request_queue_t *, softirq_done_fn *); | ||
697 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); | 711 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); |
698 | extern int blk_queue_ordered(request_queue_t *, unsigned, prepare_flush_fn *); | 712 | extern int blk_queue_ordered(request_queue_t *, unsigned, prepare_flush_fn *); |
699 | extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *); | 713 | extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *); |
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index fb80fa44c4..4a6f50e31c 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
@@ -114,8 +114,6 @@ extern ssize_t elv_iosched_store(request_queue_t *, const char *, size_t); | |||
114 | extern int elevator_init(request_queue_t *, char *); | 114 | extern int elevator_init(request_queue_t *, char *); |
115 | extern void elevator_exit(elevator_t *); | 115 | extern void elevator_exit(elevator_t *); |
116 | extern int elv_rq_merge_ok(struct request *, struct bio *); | 116 | extern int elv_rq_merge_ok(struct request *, struct bio *); |
117 | extern int elv_try_merge(struct request *, struct bio *); | ||
118 | extern int elv_try_last_merge(request_queue_t *, struct bio *); | ||
119 | 117 | ||
120 | /* | 118 | /* |
121 | * Return values from elevator merger | 119 | * Return values from elevator merger |
diff --git a/include/linux/ide.h b/include/linux/ide.h index 4dd6694963..ef8d0cbb83 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h | |||
@@ -1001,6 +1001,7 @@ extern int noautodma; | |||
1001 | 1001 | ||
1002 | extern int ide_end_request (ide_drive_t *drive, int uptodate, int nrsecs); | 1002 | extern int ide_end_request (ide_drive_t *drive, int uptodate, int nrsecs); |
1003 | extern int __ide_end_request (ide_drive_t *drive, struct request *rq, int uptodate, int nrsecs); | 1003 | extern int __ide_end_request (ide_drive_t *drive, struct request *rq, int uptodate, int nrsecs); |
1004 | extern void ide_softirq_done(struct request *rq); | ||
1004 | 1005 | ||
1005 | /* | 1006 | /* |
1006 | * This is used on exit from the driver to designate the next irq handler | 1007 | * This is used on exit from the driver to designate the next irq handler |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index e50a95fbeb..2c08fdc2bd 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -112,7 +112,7 @@ enum | |||
112 | TIMER_SOFTIRQ, | 112 | TIMER_SOFTIRQ, |
113 | NET_TX_SOFTIRQ, | 113 | NET_TX_SOFTIRQ, |
114 | NET_RX_SOFTIRQ, | 114 | NET_RX_SOFTIRQ, |
115 | SCSI_SOFTIRQ, | 115 | BLOCK_SOFTIRQ, |
116 | TASKLET_SOFTIRQ | 116 | TASKLET_SOFTIRQ |
117 | }; | 117 | }; |
118 | 118 | ||
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 51747cd88d..a1d26cb289 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -125,36 +125,7 @@ static inline void rcu_bh_qsctr_inc(int cpu) | |||
125 | rdp->passed_quiesc = 1; | 125 | rdp->passed_quiesc = 1; |
126 | } | 126 | } |
127 | 127 | ||
128 | static inline int __rcu_pending(struct rcu_ctrlblk *rcp, | 128 | extern int rcu_pending(int cpu); |
129 | struct rcu_data *rdp) | ||
130 | { | ||
131 | /* This cpu has pending rcu entries and the grace period | ||
132 | * for them has completed. | ||
133 | */ | ||
134 | if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) | ||
135 | return 1; | ||
136 | |||
137 | /* This cpu has no pending entries, but there are new entries */ | ||
138 | if (!rdp->curlist && rdp->nxtlist) | ||
139 | return 1; | ||
140 | |||
141 | /* This cpu has finished callbacks to invoke */ | ||
142 | if (rdp->donelist) | ||
143 | return 1; | ||
144 | |||
145 | /* The rcu core waits for a quiescent state from the cpu */ | ||
146 | if (rdp->quiescbatch != rcp->cur || rdp->qs_pending) | ||
147 | return 1; | ||
148 | |||
149 | /* nothing to do */ | ||
150 | return 0; | ||
151 | } | ||
152 | |||
153 | static inline int rcu_pending(int cpu) | ||
154 | { | ||
155 | return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) || | ||
156 | __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu)); | ||
157 | } | ||
158 | 129 | ||
159 | /** | 130 | /** |
160 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. | 131 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. |
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 30b0bba038..ccc45d49ce 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
@@ -429,6 +429,36 @@ static void rcu_process_callbacks(unsigned long unused) | |||
429 | &__get_cpu_var(rcu_bh_data)); | 429 | &__get_cpu_var(rcu_bh_data)); |
430 | } | 430 | } |
431 | 431 | ||
432 | static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) | ||
433 | { | ||
434 | /* This cpu has pending rcu entries and the grace period | ||
435 | * for them has completed. | ||
436 | */ | ||
437 | if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) | ||
438 | return 1; | ||
439 | |||
440 | /* This cpu has no pending entries, but there are new entries */ | ||
441 | if (!rdp->curlist && rdp->nxtlist) | ||
442 | return 1; | ||
443 | |||
444 | /* This cpu has finished callbacks to invoke */ | ||
445 | if (rdp->donelist) | ||
446 | return 1; | ||
447 | |||
448 | /* The rcu core waits for a quiescent state from the cpu */ | ||
449 | if (rdp->quiescbatch != rcp->cur || rdp->qs_pending) | ||
450 | return 1; | ||
451 | |||
452 | /* nothing to do */ | ||
453 | return 0; | ||
454 | } | ||
455 | |||
456 | int rcu_pending(int cpu) | ||
457 | { | ||
458 | return __rcu_pending(&rcu_ctrlblk, &per_cpu(rcu_data, cpu)) || | ||
459 | __rcu_pending(&rcu_bh_ctrlblk, &per_cpu(rcu_bh_data, cpu)); | ||
460 | } | ||
461 | |||
432 | void rcu_check_callbacks(int cpu, int user) | 462 | void rcu_check_callbacks(int cpu, int user) |
433 | { | 463 | { |
434 | if (user || | 464 | if (user || |
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 7849cac14d..a67f1b44c9 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -402,7 +402,7 @@ static int netlink_create(struct socket *sock, int protocol) | |||
402 | groups = nl_table[protocol].groups; | 402 | groups = nl_table[protocol].groups; |
403 | netlink_unlock_table(); | 403 | netlink_unlock_table(); |
404 | 404 | ||
405 | if ((err = __netlink_create(sock, protocol) < 0)) | 405 | if ((err = __netlink_create(sock, protocol)) < 0) |
406 | goto out_module; | 406 | goto out_module; |
407 | 407 | ||
408 | nlk = nlk_sk(sock->sk); | 408 | nlk = nlk_sk(sock->sk); |