diff options
Diffstat (limited to 'include/linux/blkdev.h')
| -rw-r--r-- | include/linux/blkdev.h | 30 | 
1 files changed, 7 insertions, 23 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 8963d9149b5f..69103e053c92 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h  | |||
| @@ -70,11 +70,6 @@ enum rq_cmd_type_bits { | |||
| 70 | REQ_TYPE_ATA_PC, | 70 | REQ_TYPE_ATA_PC, | 
| 71 | }; | 71 | }; | 
| 72 | 72 | ||
| 73 | enum { | ||
| 74 | BLK_RW_ASYNC = 0, | ||
| 75 | BLK_RW_SYNC = 1, | ||
| 76 | }; | ||
| 77 | |||
| 78 | /* | 73 | /* | 
| 79 | * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being | 74 | * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being | 
| 80 | * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a | 75 | * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a | 
| @@ -301,12 +296,6 @@ struct blk_queue_tag { | |||
| 301 | #define BLK_SCSI_MAX_CMDS (256) | 296 | #define BLK_SCSI_MAX_CMDS (256) | 
| 302 | #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) | 297 | #define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8)) | 
| 303 | 298 | ||
| 304 | struct blk_cmd_filter { | ||
| 305 | unsigned long read_ok[BLK_SCSI_CMD_PER_LONG]; | ||
| 306 | unsigned long write_ok[BLK_SCSI_CMD_PER_LONG]; | ||
| 307 | struct kobject kobj; | ||
| 308 | }; | ||
| 309 | |||
| 310 | struct queue_limits { | 299 | struct queue_limits { | 
| 311 | unsigned long bounce_pfn; | 300 | unsigned long bounce_pfn; | 
| 312 | unsigned long seg_boundary_mask; | 301 | unsigned long seg_boundary_mask; | 
| @@ -445,7 +434,6 @@ struct request_queue | |||
| 445 | #if defined(CONFIG_BLK_DEV_BSG) | 434 | #if defined(CONFIG_BLK_DEV_BSG) | 
| 446 | struct bsg_class_device bsg_dev; | 435 | struct bsg_class_device bsg_dev; | 
| 447 | #endif | 436 | #endif | 
| 448 | struct blk_cmd_filter cmd_filter; | ||
| 449 | }; | 437 | }; | 
| 450 | 438 | ||
| 451 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ | 439 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ | 
| @@ -730,6 +718,7 @@ struct rq_map_data { | |||
| 730 | int nr_entries; | 718 | int nr_entries; | 
| 731 | unsigned long offset; | 719 | unsigned long offset; | 
| 732 | int null_mapped; | 720 | int null_mapped; | 
| 721 | int from_user; | ||
| 733 | }; | 722 | }; | 
| 734 | 723 | ||
| 735 | struct req_iterator { | 724 | struct req_iterator { | 
| @@ -786,18 +775,18 @@ extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, | |||
| 786 | * congested queues, and wake up anyone who was waiting for requests to be | 775 | * congested queues, and wake up anyone who was waiting for requests to be | 
| 787 | * put back. | 776 | * put back. | 
| 788 | */ | 777 | */ | 
| 789 | static inline void blk_clear_queue_congested(struct request_queue *q, int rw) | 778 | static inline void blk_clear_queue_congested(struct request_queue *q, int sync) | 
| 790 | { | 779 | { | 
| 791 | clear_bdi_congested(&q->backing_dev_info, rw); | 780 | clear_bdi_congested(&q->backing_dev_info, sync); | 
| 792 | } | 781 | } | 
| 793 | 782 | ||
| 794 | /* | 783 | /* | 
| 795 | * A queue has just entered congestion. Flag that in the queue's VM-visible | 784 | * A queue has just entered congestion. Flag that in the queue's VM-visible | 
| 796 | * state flags and increment the global gounter of congested queues. | 785 | * state flags and increment the global gounter of congested queues. | 
| 797 | */ | 786 | */ | 
| 798 | static inline void blk_set_queue_congested(struct request_queue *q, int rw) | 787 | static inline void blk_set_queue_congested(struct request_queue *q, int sync) | 
| 799 | { | 788 | { | 
| 800 | set_bdi_congested(&q->backing_dev_info, rw); | 789 | set_bdi_congested(&q->backing_dev_info, sync); | 
| 801 | } | 790 | } | 
| 802 | 791 | ||
| 803 | extern void blk_start_queue(struct request_queue *q); | 792 | extern void blk_start_queue(struct request_queue *q); | 
| @@ -924,6 +913,7 @@ extern void blk_queue_logical_block_size(struct request_queue *, unsigned short) | |||
| 924 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); | 913 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); | 
| 925 | extern void blk_queue_alignment_offset(struct request_queue *q, | 914 | extern void blk_queue_alignment_offset(struct request_queue *q, | 
| 926 | unsigned int alignment); | 915 | unsigned int alignment); | 
| 916 | extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); | ||
| 927 | extern void blk_queue_io_min(struct request_queue *q, unsigned int min); | 917 | extern void blk_queue_io_min(struct request_queue *q, unsigned int min); | 
| 928 | extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); | 918 | extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); | 
| 929 | extern void blk_set_default_limits(struct queue_limits *lim); | 919 | extern void blk_set_default_limits(struct queue_limits *lim); | 
| @@ -998,13 +988,7 @@ static inline int sb_issue_discard(struct super_block *sb, | |||
| 998 | return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL); | 988 | return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL); | 
| 999 | } | 989 | } | 
| 1000 | 990 | ||
| 1001 | /* | 991 | extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); | 
| 1002 | * command filter functions | ||
| 1003 | */ | ||
| 1004 | extern int blk_verify_command(struct blk_cmd_filter *filter, | ||
| 1005 | unsigned char *cmd, fmode_t has_write_perm); | ||
| 1006 | extern void blk_unregister_filter(struct gendisk *disk); | ||
| 1007 | extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter); | ||
| 1008 | 992 | ||
| 1009 | #define MAX_PHYS_SEGMENTS 128 | 993 | #define MAX_PHYS_SEGMENTS 128 | 
| 1010 | #define MAX_HW_SEGMENTS 128 | 994 | #define MAX_HW_SEGMENTS 128 | 
