diff options
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/blkdev.h | 83 | ||||
| -rw-r--r-- | include/linux/init_task.h | 1 | ||||
| -rw-r--r-- | include/linux/iocontext.h | 95 | ||||
| -rw-r--r-- | include/linux/ioprio.h | 13 | ||||
| -rw-r--r-- | include/linux/sched.h | 2 |
5 files changed, 111 insertions, 83 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 49b7a4c31a6d..2483a05231c7 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -34,83 +34,10 @@ struct sg_io_hdr; | |||
| 34 | #define BLKDEV_MIN_RQ 4 | 34 | #define BLKDEV_MIN_RQ 4 |
| 35 | #define BLKDEV_MAX_RQ 128 /* Default maximum */ | 35 | #define BLKDEV_MAX_RQ 128 /* Default maximum */ |
| 36 | 36 | ||
| 37 | /* | 37 | int put_io_context(struct io_context *ioc); |
| 38 | * This is the per-process anticipatory I/O scheduler state. | ||
| 39 | */ | ||
| 40 | struct as_io_context { | ||
| 41 | spinlock_t lock; | ||
| 42 | |||
| 43 | void (*dtor)(struct as_io_context *aic); /* destructor */ | ||
| 44 | void (*exit)(struct as_io_context *aic); /* called on task exit */ | ||
| 45 | |||
| 46 | unsigned long state; | ||
| 47 | atomic_t nr_queued; /* queued reads & sync writes */ | ||
| 48 | atomic_t nr_dispatched; /* number of requests gone to the drivers */ | ||
| 49 | |||
| 50 | /* IO History tracking */ | ||
| 51 | /* Thinktime */ | ||
| 52 | unsigned long last_end_request; | ||
| 53 | unsigned long ttime_total; | ||
| 54 | unsigned long ttime_samples; | ||
| 55 | unsigned long ttime_mean; | ||
| 56 | /* Layout pattern */ | ||
| 57 | unsigned int seek_samples; | ||
| 58 | sector_t last_request_pos; | ||
| 59 | u64 seek_total; | ||
| 60 | sector_t seek_mean; | ||
| 61 | }; | ||
| 62 | |||
| 63 | struct cfq_queue; | ||
| 64 | struct cfq_io_context { | ||
| 65 | struct rb_node rb_node; | ||
| 66 | void *key; | ||
| 67 | |||
| 68 | struct cfq_queue *cfqq[2]; | ||
| 69 | |||
| 70 | struct io_context *ioc; | ||
| 71 | |||
| 72 | unsigned long last_end_request; | ||
| 73 | sector_t last_request_pos; | ||
| 74 | |||
| 75 | unsigned long ttime_total; | ||
| 76 | unsigned long ttime_samples; | ||
| 77 | unsigned long ttime_mean; | ||
| 78 | |||
| 79 | unsigned int seek_samples; | ||
| 80 | u64 seek_total; | ||
| 81 | sector_t seek_mean; | ||
| 82 | |||
| 83 | struct list_head queue_list; | ||
| 84 | |||
| 85 | void (*dtor)(struct io_context *); /* destructor */ | ||
| 86 | void (*exit)(struct io_context *); /* called on task exit */ | ||
| 87 | }; | ||
| 88 | |||
| 89 | /* | ||
| 90 | * This is the per-process I/O subsystem state. It is refcounted and | ||
| 91 | * kmalloc'ed. Currently all fields are modified in process io context | ||
| 92 | * (apart from the atomic refcount), so require no locking. | ||
| 93 | */ | ||
| 94 | struct io_context { | ||
| 95 | atomic_t refcount; | ||
| 96 | struct task_struct *task; | ||
| 97 | |||
| 98 | unsigned int ioprio_changed; | ||
| 99 | |||
| 100 | /* | ||
| 101 | * For request batching | ||
| 102 | */ | ||
| 103 | unsigned long last_waited; /* Time last woken after wait for request */ | ||
| 104 | int nr_batch_requests; /* Number of requests left in the batch */ | ||
| 105 | |||
| 106 | struct as_io_context *aic; | ||
| 107 | struct rb_root cic_root; | ||
| 108 | void *ioc_data; | ||
| 109 | }; | ||
| 110 | |||
| 111 | void put_io_context(struct io_context *ioc); | ||
| 112 | void exit_io_context(void); | 38 | void exit_io_context(void); |
| 113 | struct io_context *get_io_context(gfp_t gfp_flags, int node); | 39 | struct io_context *get_io_context(gfp_t gfp_flags, int node); |
| 40 | struct io_context *alloc_io_context(gfp_t gfp_flags, int node); | ||
| 114 | void copy_io_context(struct io_context **pdst, struct io_context **psrc); | 41 | void copy_io_context(struct io_context **pdst, struct io_context **psrc); |
| 115 | void swap_io_context(struct io_context **ioc1, struct io_context **ioc2); | 42 | void swap_io_context(struct io_context **ioc1, struct io_context **ioc2); |
| 116 | 43 | ||
| @@ -894,6 +821,12 @@ static inline void exit_io_context(void) | |||
| 894 | { | 821 | { |
| 895 | } | 822 | } |
| 896 | 823 | ||
| 824 | static inline int put_io_context(struct io_context *ioc) | ||
| 825 | { | ||
| 826 | return 1; | ||
| 827 | } | ||
| 828 | |||
| 829 | |||
| 897 | #endif /* CONFIG_BLOCK */ | 830 | #endif /* CONFIG_BLOCK */ |
| 898 | 831 | ||
| 899 | #endif | 832 | #endif |
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 796019b22b6f..e6b3f7080679 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
| @@ -137,7 +137,6 @@ extern struct group_info init_groups; | |||
| 137 | .time_slice = HZ, \ | 137 | .time_slice = HZ, \ |
| 138 | .nr_cpus_allowed = NR_CPUS, \ | 138 | .nr_cpus_allowed = NR_CPUS, \ |
| 139 | }, \ | 139 | }, \ |
| 140 | .ioprio = 0, \ | ||
| 141 | .tasks = LIST_HEAD_INIT(tsk.tasks), \ | 140 | .tasks = LIST_HEAD_INIT(tsk.tasks), \ |
| 142 | .ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children), \ | 141 | .ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children), \ |
| 143 | .ptrace_list = LIST_HEAD_INIT(tsk.ptrace_list), \ | 142 | .ptrace_list = LIST_HEAD_INIT(tsk.ptrace_list), \ |
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h new file mode 100644 index 000000000000..593b222d9dcc --- /dev/null +++ b/include/linux/iocontext.h | |||
| @@ -0,0 +1,95 @@ | |||
| 1 | #ifndef IOCONTEXT_H | ||
| 2 | #define IOCONTEXT_H | ||
| 3 | |||
| 4 | #include <linux/radix-tree.h> | ||
| 5 | |||
| 6 | /* | ||
| 7 | * This is the per-process anticipatory I/O scheduler state. | ||
| 8 | */ | ||
| 9 | struct as_io_context { | ||
| 10 | spinlock_t lock; | ||
| 11 | |||
| 12 | void (*dtor)(struct as_io_context *aic); /* destructor */ | ||
| 13 | void (*exit)(struct as_io_context *aic); /* called on task exit */ | ||
| 14 | |||
| 15 | unsigned long state; | ||
| 16 | atomic_t nr_queued; /* queued reads & sync writes */ | ||
| 17 | atomic_t nr_dispatched; /* number of requests gone to the drivers */ | ||
| 18 | |||
| 19 | /* IO History tracking */ | ||
| 20 | /* Thinktime */ | ||
| 21 | unsigned long last_end_request; | ||
| 22 | unsigned long ttime_total; | ||
| 23 | unsigned long ttime_samples; | ||
| 24 | unsigned long ttime_mean; | ||
| 25 | /* Layout pattern */ | ||
| 26 | unsigned int seek_samples; | ||
| 27 | sector_t last_request_pos; | ||
| 28 | u64 seek_total; | ||
| 29 | sector_t seek_mean; | ||
| 30 | }; | ||
| 31 | |||
| 32 | struct cfq_queue; | ||
| 33 | struct cfq_io_context { | ||
| 34 | void *key; | ||
| 35 | unsigned long dead_key; | ||
| 36 | |||
| 37 | struct cfq_queue *cfqq[2]; | ||
| 38 | |||
| 39 | struct io_context *ioc; | ||
| 40 | |||
| 41 | unsigned long last_end_request; | ||
| 42 | sector_t last_request_pos; | ||
| 43 | |||
| 44 | unsigned long ttime_total; | ||
| 45 | unsigned long ttime_samples; | ||
| 46 | unsigned long ttime_mean; | ||
| 47 | |||
| 48 | unsigned int seek_samples; | ||
| 49 | u64 seek_total; | ||
| 50 | sector_t seek_mean; | ||
| 51 | |||
| 52 | struct list_head queue_list; | ||
| 53 | |||
| 54 | void (*dtor)(struct io_context *); /* destructor */ | ||
| 55 | void (*exit)(struct io_context *); /* called on task exit */ | ||
| 56 | }; | ||
| 57 | |||
| 58 | /* | ||
| 59 | * I/O subsystem state of the associated processes. It is refcounted | ||
| 60 | * and kmalloc'ed. These could be shared between processes. | ||
| 61 | */ | ||
| 62 | struct io_context { | ||
| 63 | atomic_t refcount; | ||
| 64 | atomic_t nr_tasks; | ||
| 65 | |||
| 66 | /* all the fields below are protected by this lock */ | ||
| 67 | spinlock_t lock; | ||
| 68 | |||
| 69 | unsigned short ioprio; | ||
| 70 | unsigned short ioprio_changed; | ||
| 71 | |||
| 72 | /* | ||
| 73 | * For request batching | ||
| 74 | */ | ||
| 75 | unsigned long last_waited; /* Time last woken after wait for request */ | ||
| 76 | int nr_batch_requests; /* Number of requests left in the batch */ | ||
| 77 | |||
| 78 | struct as_io_context *aic; | ||
| 79 | struct radix_tree_root radix_root; | ||
| 80 | void *ioc_data; | ||
| 81 | }; | ||
| 82 | |||
| 83 | static inline struct io_context *ioc_task_link(struct io_context *ioc) | ||
| 84 | { | ||
| 85 | /* | ||
| 86 | * if ref count is zero, don't allow sharing (ioc is going away, it's | ||
| 87 | * a race). | ||
| 88 | */ | ||
| 89 | if (ioc && atomic_inc_not_zero(&ioc->refcount)) | ||
| 90 | return ioc; | ||
| 91 | |||
| 92 | return NULL; | ||
| 93 | } | ||
| 94 | |||
| 95 | #endif | ||
diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h index baf29387cab4..2a3bb1bb7433 100644 --- a/include/linux/ioprio.h +++ b/include/linux/ioprio.h | |||
| @@ -2,6 +2,7 @@ | |||
| 2 | #define IOPRIO_H | 2 | #define IOPRIO_H |
| 3 | 3 | ||
| 4 | #include <linux/sched.h> | 4 | #include <linux/sched.h> |
| 5 | #include <linux/iocontext.h> | ||
| 5 | 6 | ||
| 6 | /* | 7 | /* |
| 7 | * Gives us 8 prio classes with 13-bits of data for each class | 8 | * Gives us 8 prio classes with 13-bits of data for each class |
| @@ -45,18 +46,18 @@ enum { | |||
| 45 | * the cpu scheduler nice value to an io priority | 46 | * the cpu scheduler nice value to an io priority |
| 46 | */ | 47 | */ |
| 47 | #define IOPRIO_NORM (4) | 48 | #define IOPRIO_NORM (4) |
| 48 | static inline int task_ioprio(struct task_struct *task) | 49 | static inline int task_ioprio(struct io_context *ioc) |
| 49 | { | 50 | { |
| 50 | if (ioprio_valid(task->ioprio)) | 51 | if (ioprio_valid(ioc->ioprio)) |
| 51 | return IOPRIO_PRIO_DATA(task->ioprio); | 52 | return IOPRIO_PRIO_DATA(ioc->ioprio); |
| 52 | 53 | ||
| 53 | return IOPRIO_NORM; | 54 | return IOPRIO_NORM; |
| 54 | } | 55 | } |
| 55 | 56 | ||
| 56 | static inline int task_ioprio_class(struct task_struct *task) | 57 | static inline int task_ioprio_class(struct io_context *ioc) |
| 57 | { | 58 | { |
| 58 | if (ioprio_valid(task->ioprio)) | 59 | if (ioprio_valid(ioc->ioprio)) |
| 59 | return IOPRIO_PRIO_CLASS(task->ioprio); | 60 | return IOPRIO_PRIO_CLASS(ioc->ioprio); |
| 60 | 61 | ||
| 61 | return IOPRIO_CLASS_BE; | 62 | return IOPRIO_CLASS_BE; |
| 62 | } | 63 | } |
diff --git a/include/linux/sched.h b/include/linux/sched.h index df5b24ee80b3..2d0546e884ea 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -27,6 +27,7 @@ | |||
| 27 | #define CLONE_NEWUSER 0x10000000 /* New user namespace */ | 27 | #define CLONE_NEWUSER 0x10000000 /* New user namespace */ |
| 28 | #define CLONE_NEWPID 0x20000000 /* New pid namespace */ | 28 | #define CLONE_NEWPID 0x20000000 /* New pid namespace */ |
| 29 | #define CLONE_NEWNET 0x40000000 /* New network namespace */ | 29 | #define CLONE_NEWNET 0x40000000 /* New network namespace */ |
| 30 | #define CLONE_IO 0x80000000 /* Clone io context */ | ||
| 30 | 31 | ||
| 31 | /* | 32 | /* |
| 32 | * Scheduling policies | 33 | * Scheduling policies |
| @@ -975,7 +976,6 @@ struct task_struct { | |||
| 975 | struct hlist_head preempt_notifiers; | 976 | struct hlist_head preempt_notifiers; |
| 976 | #endif | 977 | #endif |
| 977 | 978 | ||
| 978 | unsigned short ioprio; | ||
| 979 | /* | 979 | /* |
| 980 | * fpu_counter contains the number of consecutive context switches | 980 | * fpu_counter contains the number of consecutive context switches |
| 981 | * that the FPU is used. If this is over a threshold, the lazy fpu | 981 | * that the FPU is used. If this is over a threshold, the lazy fpu |
