diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/as-iosched.c | 5 | ||||
-rw-r--r-- | block/cfq-iosched.c | 53 | ||||
-rw-r--r-- | block/elevator.c | 2 | ||||
-rw-r--r-- | block/ll_rw_blk.c | 4 | ||||
-rw-r--r-- | block/scsi_ioctl.c | 101 |
5 files changed, 117 insertions, 48 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c index 296708ceceb2..e25a5d79ab27 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c | |||
@@ -1844,9 +1844,10 @@ static void __exit as_exit(void) | |||
1844 | DECLARE_COMPLETION(all_gone); | 1844 | DECLARE_COMPLETION(all_gone); |
1845 | elv_unregister(&iosched_as); | 1845 | elv_unregister(&iosched_as); |
1846 | ioc_gone = &all_gone; | 1846 | ioc_gone = &all_gone; |
1847 | barrier(); | 1847 | /* ioc_gone's update must be visible before reading ioc_count */ |
1848 | smp_wmb(); | ||
1848 | if (atomic_read(&ioc_count)) | 1849 | if (atomic_read(&ioc_count)) |
1849 | complete(ioc_gone); | 1850 | wait_for_completion(ioc_gone); |
1850 | synchronize_rcu(); | 1851 | synchronize_rcu(); |
1851 | kmem_cache_destroy(arq_pool); | 1852 | kmem_cache_destroy(arq_pool); |
1852 | } | 1853 | } |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 67d446de0227..2540dfaa3e38 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -1472,19 +1472,37 @@ out: | |||
1472 | return cfqq; | 1472 | return cfqq; |
1473 | } | 1473 | } |
1474 | 1474 | ||
1475 | static void | ||
1476 | cfq_drop_dead_cic(struct io_context *ioc, struct cfq_io_context *cic) | ||
1477 | { | ||
1478 | read_lock(&cfq_exit_lock); | ||
1479 | rb_erase(&cic->rb_node, &ioc->cic_root); | ||
1480 | read_unlock(&cfq_exit_lock); | ||
1481 | kmem_cache_free(cfq_ioc_pool, cic); | ||
1482 | atomic_dec(&ioc_count); | ||
1483 | } | ||
1484 | |||
1475 | static struct cfq_io_context * | 1485 | static struct cfq_io_context * |
1476 | cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc) | 1486 | cfq_cic_rb_lookup(struct cfq_data *cfqd, struct io_context *ioc) |
1477 | { | 1487 | { |
1478 | struct rb_node *n = ioc->cic_root.rb_node; | 1488 | struct rb_node *n; |
1479 | struct cfq_io_context *cic; | 1489 | struct cfq_io_context *cic; |
1480 | void *key = cfqd; | 1490 | void *k, *key = cfqd; |
1481 | 1491 | ||
1492 | restart: | ||
1493 | n = ioc->cic_root.rb_node; | ||
1482 | while (n) { | 1494 | while (n) { |
1483 | cic = rb_entry(n, struct cfq_io_context, rb_node); | 1495 | cic = rb_entry(n, struct cfq_io_context, rb_node); |
1496 | /* ->key must be copied to avoid race with cfq_exit_queue() */ | ||
1497 | k = cic->key; | ||
1498 | if (unlikely(!k)) { | ||
1499 | cfq_drop_dead_cic(ioc, cic); | ||
1500 | goto restart; | ||
1501 | } | ||
1484 | 1502 | ||
1485 | if (key < cic->key) | 1503 | if (key < k) |
1486 | n = n->rb_left; | 1504 | n = n->rb_left; |
1487 | else if (key > cic->key) | 1505 | else if (key > k) |
1488 | n = n->rb_right; | 1506 | n = n->rb_right; |
1489 | else | 1507 | else |
1490 | return cic; | 1508 | return cic; |
@@ -1497,29 +1515,37 @@ static inline void | |||
1497 | cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, | 1515 | cfq_cic_link(struct cfq_data *cfqd, struct io_context *ioc, |
1498 | struct cfq_io_context *cic) | 1516 | struct cfq_io_context *cic) |
1499 | { | 1517 | { |
1500 | struct rb_node **p = &ioc->cic_root.rb_node; | 1518 | struct rb_node **p; |
1501 | struct rb_node *parent = NULL; | 1519 | struct rb_node *parent; |
1502 | struct cfq_io_context *__cic; | 1520 | struct cfq_io_context *__cic; |
1503 | 1521 | void *k; | |
1504 | read_lock(&cfq_exit_lock); | ||
1505 | 1522 | ||
1506 | cic->ioc = ioc; | 1523 | cic->ioc = ioc; |
1507 | cic->key = cfqd; | 1524 | cic->key = cfqd; |
1508 | 1525 | ||
1509 | ioc->set_ioprio = cfq_ioc_set_ioprio; | 1526 | ioc->set_ioprio = cfq_ioc_set_ioprio; |
1510 | 1527 | restart: | |
1528 | parent = NULL; | ||
1529 | p = &ioc->cic_root.rb_node; | ||
1511 | while (*p) { | 1530 | while (*p) { |
1512 | parent = *p; | 1531 | parent = *p; |
1513 | __cic = rb_entry(parent, struct cfq_io_context, rb_node); | 1532 | __cic = rb_entry(parent, struct cfq_io_context, rb_node); |
1533 | /* ->key must be copied to avoid race with cfq_exit_queue() */ | ||
1534 | k = __cic->key; | ||
1535 | if (unlikely(!k)) { | ||
1536 | cfq_drop_dead_cic(ioc, cic); | ||
1537 | goto restart; | ||
1538 | } | ||
1514 | 1539 | ||
1515 | if (cic->key < __cic->key) | 1540 | if (cic->key < k) |
1516 | p = &(*p)->rb_left; | 1541 | p = &(*p)->rb_left; |
1517 | else if (cic->key > __cic->key) | 1542 | else if (cic->key > k) |
1518 | p = &(*p)->rb_right; | 1543 | p = &(*p)->rb_right; |
1519 | else | 1544 | else |
1520 | BUG(); | 1545 | BUG(); |
1521 | } | 1546 | } |
1522 | 1547 | ||
1548 | read_lock(&cfq_exit_lock); | ||
1523 | rb_link_node(&cic->rb_node, parent, p); | 1549 | rb_link_node(&cic->rb_node, parent, p); |
1524 | rb_insert_color(&cic->rb_node, &ioc->cic_root); | 1550 | rb_insert_color(&cic->rb_node, &ioc->cic_root); |
1525 | list_add(&cic->queue_list, &cfqd->cic_list); | 1551 | list_add(&cic->queue_list, &cfqd->cic_list); |
@@ -2439,9 +2465,10 @@ static void __exit cfq_exit(void) | |||
2439 | DECLARE_COMPLETION(all_gone); | 2465 | DECLARE_COMPLETION(all_gone); |
2440 | elv_unregister(&iosched_cfq); | 2466 | elv_unregister(&iosched_cfq); |
2441 | ioc_gone = &all_gone; | 2467 | ioc_gone = &all_gone; |
2442 | barrier(); | 2468 | /* ioc_gone's update must be visible before reading ioc_count */ |
2469 | smp_wmb(); | ||
2443 | if (atomic_read(&ioc_count)) | 2470 | if (atomic_read(&ioc_count)) |
2444 | complete(ioc_gone); | 2471 | wait_for_completion(ioc_gone); |
2445 | synchronize_rcu(); | 2472 | synchronize_rcu(); |
2446 | cfq_slab_kill(); | 2473 | cfq_slab_kill(); |
2447 | } | 2474 | } |
diff --git a/block/elevator.c b/block/elevator.c index 0d6be03d929e..29825792cbd5 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -895,10 +895,8 @@ ssize_t elv_iosched_show(request_queue_t *q, char *name) | |||
895 | EXPORT_SYMBOL(elv_dispatch_sort); | 895 | EXPORT_SYMBOL(elv_dispatch_sort); |
896 | EXPORT_SYMBOL(elv_add_request); | 896 | EXPORT_SYMBOL(elv_add_request); |
897 | EXPORT_SYMBOL(__elv_add_request); | 897 | EXPORT_SYMBOL(__elv_add_request); |
898 | EXPORT_SYMBOL(elv_requeue_request); | ||
899 | EXPORT_SYMBOL(elv_next_request); | 898 | EXPORT_SYMBOL(elv_next_request); |
900 | EXPORT_SYMBOL(elv_dequeue_request); | 899 | EXPORT_SYMBOL(elv_dequeue_request); |
901 | EXPORT_SYMBOL(elv_queue_empty); | 900 | EXPORT_SYMBOL(elv_queue_empty); |
902 | EXPORT_SYMBOL(elv_completed_request); | ||
903 | EXPORT_SYMBOL(elevator_exit); | 901 | EXPORT_SYMBOL(elevator_exit); |
904 | EXPORT_SYMBOL(elevator_init); | 902 | EXPORT_SYMBOL(elevator_init); |
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index e112d1a5dab6..1755c053fd68 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -1554,7 +1554,7 @@ void blk_plug_device(request_queue_t *q) | |||
1554 | * don't plug a stopped queue, it must be paired with blk_start_queue() | 1554 | * don't plug a stopped queue, it must be paired with blk_start_queue() |
1555 | * which will restart the queueing | 1555 | * which will restart the queueing |
1556 | */ | 1556 | */ |
1557 | if (test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags)) | 1557 | if (blk_queue_stopped(q)) |
1558 | return; | 1558 | return; |
1559 | 1559 | ||
1560 | if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) { | 1560 | if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) { |
@@ -1587,7 +1587,7 @@ EXPORT_SYMBOL(blk_remove_plug); | |||
1587 | */ | 1587 | */ |
1588 | void __generic_unplug_device(request_queue_t *q) | 1588 | void __generic_unplug_device(request_queue_t *q) |
1589 | { | 1589 | { |
1590 | if (unlikely(test_bit(QUEUE_FLAG_STOPPED, &q->queue_flags))) | 1590 | if (unlikely(blk_queue_stopped(q))) |
1591 | return; | 1591 | return; |
1592 | 1592 | ||
1593 | if (!blk_remove_plug(q)) | 1593 | if (!blk_remove_plug(q)) |
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index 24f7af9d0abc..b33eda26e205 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c | |||
@@ -350,16 +350,51 @@ out: | |||
350 | return ret; | 350 | return ret; |
351 | } | 351 | } |
352 | 352 | ||
353 | /** | ||
354 | * sg_scsi_ioctl -- handle deprecated SCSI_IOCTL_SEND_COMMAND ioctl | ||
355 | * @file: file this ioctl operates on (optional) | ||
356 | * @q: request queue to send scsi commands down | ||
357 | * @disk: gendisk to operate on (option) | ||
358 | * @sic: userspace structure describing the command to perform | ||
359 | * | ||
360 | * Send down the scsi command described by @sic to the device below | ||
361 | * the request queue @q. If @file is non-NULL it's used to perform | ||
362 | * fine-grained permission checks that allow users to send down | ||
363 | * non-destructive SCSI commands. If the caller has a struct gendisk | ||
364 | * available it should be passed in as @disk to allow the low level | ||
365 | * driver to use the information contained in it. A non-NULL @disk | ||
366 | * is only allowed if the caller knows that the low level driver doesn't | ||
367 | * need it (e.g. in the scsi subsystem). | ||
368 | * | ||
369 | * Notes: | ||
370 | * - This interface is deprecated - users should use the SG_IO | ||
371 | * interface instead, as this is a more flexible approach to | ||
372 | * performing SCSI commands on a device. | ||
373 | * - The SCSI command length is determined by examining the 1st byte | ||
374 | * of the given command. There is no way to override this. | ||
375 | * - Data transfers are limited to PAGE_SIZE | ||
376 | * - The length (x + y) must be at least OMAX_SB_LEN bytes long to | ||
377 | * accommodate the sense buffer when an error occurs. | ||
378 | * The sense buffer is truncated to OMAX_SB_LEN (16) bytes so that | ||
379 | * old code will not be surprised. | ||
380 | * - If a Unix error occurs (e.g. ENOMEM) then the user will receive | ||
381 | * a negative return and the Unix error code in 'errno'. | ||
382 | * If the SCSI command succeeds then 0 is returned. | ||
383 | * Positive numbers returned are the compacted SCSI error codes (4 | ||
384 | * bytes in one int) where the lowest byte is the SCSI status. | ||
385 | */ | ||
353 | #define OMAX_SB_LEN 16 /* For backward compatibility */ | 386 | #define OMAX_SB_LEN 16 /* For backward compatibility */ |
354 | 387 | int sg_scsi_ioctl(struct file *file, struct request_queue *q, | |
355 | static int sg_scsi_ioctl(struct file *file, request_queue_t *q, | 388 | struct gendisk *disk, struct scsi_ioctl_command __user *sic) |
356 | struct gendisk *bd_disk, Scsi_Ioctl_Command __user *sic) | ||
357 | { | 389 | { |
358 | struct request *rq; | 390 | struct request *rq; |
359 | int err; | 391 | int err; |
360 | unsigned int in_len, out_len, bytes, opcode, cmdlen; | 392 | unsigned int in_len, out_len, bytes, opcode, cmdlen; |
361 | char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE]; | 393 | char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE]; |
362 | 394 | ||
395 | if (!sic) | ||
396 | return -EINVAL; | ||
397 | |||
363 | /* | 398 | /* |
364 | * get in an out lengths, verify they don't exceed a page worth of data | 399 | * get in an out lengths, verify they don't exceed a page worth of data |
365 | */ | 400 | */ |
@@ -393,45 +428,53 @@ static int sg_scsi_ioctl(struct file *file, request_queue_t *q, | |||
393 | if (copy_from_user(rq->cmd, sic->data, cmdlen)) | 428 | if (copy_from_user(rq->cmd, sic->data, cmdlen)) |
394 | goto error; | 429 | goto error; |
395 | 430 | ||
396 | if (copy_from_user(buffer, sic->data + cmdlen, in_len)) | 431 | if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len)) |
397 | goto error; | 432 | goto error; |
398 | 433 | ||
399 | err = verify_command(file, rq->cmd); | 434 | err = verify_command(file, rq->cmd); |
400 | if (err) | 435 | if (err) |
401 | goto error; | 436 | goto error; |
402 | 437 | ||
438 | /* default. possible overriden later */ | ||
439 | rq->retries = 5; | ||
440 | |||
403 | switch (opcode) { | 441 | switch (opcode) { |
404 | case SEND_DIAGNOSTIC: | 442 | case SEND_DIAGNOSTIC: |
405 | case FORMAT_UNIT: | 443 | case FORMAT_UNIT: |
406 | rq->timeout = FORMAT_UNIT_TIMEOUT; | 444 | rq->timeout = FORMAT_UNIT_TIMEOUT; |
407 | break; | 445 | rq->retries = 1; |
408 | case START_STOP: | 446 | break; |
409 | rq->timeout = START_STOP_TIMEOUT; | 447 | case START_STOP: |
410 | break; | 448 | rq->timeout = START_STOP_TIMEOUT; |
411 | case MOVE_MEDIUM: | 449 | break; |
412 | rq->timeout = MOVE_MEDIUM_TIMEOUT; | 450 | case MOVE_MEDIUM: |
413 | break; | 451 | rq->timeout = MOVE_MEDIUM_TIMEOUT; |
414 | case READ_ELEMENT_STATUS: | 452 | break; |
415 | rq->timeout = READ_ELEMENT_STATUS_TIMEOUT; | 453 | case READ_ELEMENT_STATUS: |
416 | break; | 454 | rq->timeout = READ_ELEMENT_STATUS_TIMEOUT; |
417 | case READ_DEFECT_DATA: | 455 | break; |
418 | rq->timeout = READ_DEFECT_DATA_TIMEOUT; | 456 | case READ_DEFECT_DATA: |
419 | break; | 457 | rq->timeout = READ_DEFECT_DATA_TIMEOUT; |
420 | default: | 458 | rq->retries = 1; |
421 | rq->timeout = BLK_DEFAULT_TIMEOUT; | 459 | break; |
422 | break; | 460 | default: |
461 | rq->timeout = BLK_DEFAULT_TIMEOUT; | ||
462 | break; | ||
463 | } | ||
464 | |||
465 | if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) { | ||
466 | err = DRIVER_ERROR << 24; | ||
467 | goto out; | ||
423 | } | 468 | } |
424 | 469 | ||
425 | memset(sense, 0, sizeof(sense)); | 470 | memset(sense, 0, sizeof(sense)); |
426 | rq->sense = sense; | 471 | rq->sense = sense; |
427 | rq->sense_len = 0; | 472 | rq->sense_len = 0; |
428 | |||
429 | rq->data = buffer; | ||
430 | rq->data_len = bytes; | ||
431 | rq->flags |= REQ_BLOCK_PC; | 473 | rq->flags |= REQ_BLOCK_PC; |
432 | rq->retries = 0; | ||
433 | 474 | ||
434 | blk_execute_rq(q, bd_disk, rq, 0); | 475 | blk_execute_rq(q, disk, rq, 0); |
476 | |||
477 | out: | ||
435 | err = rq->errors & 0xff; /* only 8 bit SCSI status */ | 478 | err = rq->errors & 0xff; /* only 8 bit SCSI status */ |
436 | if (err) { | 479 | if (err) { |
437 | if (rq->sense_len && rq->sense) { | 480 | if (rq->sense_len && rq->sense) { |
@@ -450,7 +493,7 @@ error: | |||
450 | blk_put_request(rq); | 493 | blk_put_request(rq); |
451 | return err; | 494 | return err; |
452 | } | 495 | } |
453 | 496 | EXPORT_SYMBOL_GPL(sg_scsi_ioctl); | |
454 | 497 | ||
455 | /* Send basic block requests */ | 498 | /* Send basic block requests */ |
456 | static int __blk_send_generic(request_queue_t *q, struct gendisk *bd_disk, int cmd, int data) | 499 | static int __blk_send_generic(request_queue_t *q, struct gendisk *bd_disk, int cmd, int data) |