diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /include/linux/sunrpc/sched.h |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'include/linux/sunrpc/sched.h')
-rw-r--r-- | include/linux/sunrpc/sched.h | 273 |
1 files changed, 273 insertions, 0 deletions
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h new file mode 100644 index 000000000000..99d17ed7cebb --- /dev/null +++ b/include/linux/sunrpc/sched.h | |||
@@ -0,0 +1,273 @@ | |||
1 | /* | ||
2 | * linux/include/linux/sunrpc/sched.h | ||
3 | * | ||
4 | * Scheduling primitives for kernel Sun RPC. | ||
5 | * | ||
6 | * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> | ||
7 | */ | ||
8 | |||
9 | #ifndef _LINUX_SUNRPC_SCHED_H_ | ||
10 | #define _LINUX_SUNRPC_SCHED_H_ | ||
11 | |||
12 | #include <linux/timer.h> | ||
13 | #include <linux/sunrpc/types.h> | ||
14 | #include <linux/spinlock.h> | ||
15 | #include <linux/wait.h> | ||
16 | #include <linux/workqueue.h> | ||
17 | #include <linux/sunrpc/xdr.h> | ||
18 | |||
19 | /* | ||
20 | * This is the actual RPC procedure call info. | ||
21 | */ | ||
22 | struct rpc_procinfo; | ||
23 | struct rpc_message { | ||
24 | struct rpc_procinfo * rpc_proc; /* Procedure information */ | ||
25 | void * rpc_argp; /* Arguments */ | ||
26 | void * rpc_resp; /* Result */ | ||
27 | struct rpc_cred * rpc_cred; /* Credentials */ | ||
28 | }; | ||
29 | |||
30 | struct rpc_wait_queue; | ||
31 | struct rpc_wait { | ||
32 | struct list_head list; /* wait queue links */ | ||
33 | struct list_head links; /* Links to related tasks */ | ||
34 | wait_queue_head_t waitq; /* sync: sleep on this q */ | ||
35 | struct rpc_wait_queue * rpc_waitq; /* RPC wait queue we're on */ | ||
36 | }; | ||
37 | |||
38 | /* | ||
39 | * This is the RPC task struct | ||
40 | */ | ||
41 | struct rpc_task { | ||
42 | #ifdef RPC_DEBUG | ||
43 | unsigned long tk_magic; /* 0xf00baa */ | ||
44 | #endif | ||
45 | struct list_head tk_task; /* global list of tasks */ | ||
46 | struct rpc_clnt * tk_client; /* RPC client */ | ||
47 | struct rpc_rqst * tk_rqstp; /* RPC request */ | ||
48 | int tk_status; /* result of last operation */ | ||
49 | |||
50 | /* | ||
51 | * RPC call state | ||
52 | */ | ||
53 | struct rpc_message tk_msg; /* RPC call info */ | ||
54 | __u32 * tk_buffer; /* XDR buffer */ | ||
55 | size_t tk_bufsize; | ||
56 | __u8 tk_garb_retry; | ||
57 | __u8 tk_cred_retry; | ||
58 | |||
59 | unsigned long tk_cookie; /* Cookie for batching tasks */ | ||
60 | |||
61 | /* | ||
62 | * timeout_fn to be executed by timer bottom half | ||
63 | * callback to be executed after waking up | ||
64 | * action next procedure for async tasks | ||
65 | * exit exit async task and report to caller | ||
66 | */ | ||
67 | void (*tk_timeout_fn)(struct rpc_task *); | ||
68 | void (*tk_callback)(struct rpc_task *); | ||
69 | void (*tk_action)(struct rpc_task *); | ||
70 | void (*tk_exit)(struct rpc_task *); | ||
71 | void (*tk_release)(struct rpc_task *); | ||
72 | void * tk_calldata; | ||
73 | |||
74 | /* | ||
75 | * tk_timer is used for async processing by the RPC scheduling | ||
76 | * primitives. You should not access this directly unless | ||
77 | * you have a pathological interest in kernel oopses. | ||
78 | */ | ||
79 | struct timer_list tk_timer; /* kernel timer */ | ||
80 | unsigned long tk_timeout; /* timeout for rpc_sleep() */ | ||
81 | unsigned short tk_flags; /* misc flags */ | ||
82 | unsigned char tk_active : 1;/* Task has been activated */ | ||
83 | unsigned char tk_priority : 2;/* Task priority */ | ||
84 | unsigned long tk_runstate; /* Task run status */ | ||
85 | struct workqueue_struct *tk_workqueue; /* Normally rpciod, but could | ||
86 | * be any workqueue | ||
87 | */ | ||
88 | union { | ||
89 | struct work_struct tk_work; /* Async task work queue */ | ||
90 | struct rpc_wait tk_wait; /* RPC wait */ | ||
91 | } u; | ||
92 | #ifdef RPC_DEBUG | ||
93 | unsigned short tk_pid; /* debugging aid */ | ||
94 | #endif | ||
95 | }; | ||
96 | #define tk_auth tk_client->cl_auth | ||
97 | #define tk_xprt tk_client->cl_xprt | ||
98 | |||
99 | /* support walking a list of tasks on a wait queue */ | ||
100 | #define task_for_each(task, pos, head) \ | ||
101 | list_for_each(pos, head) \ | ||
102 | if ((task=list_entry(pos, struct rpc_task, u.tk_wait.list)),1) | ||
103 | |||
104 | #define task_for_first(task, head) \ | ||
105 | if (!list_empty(head) && \ | ||
106 | ((task=list_entry((head)->next, struct rpc_task, u.tk_wait.list)),1)) | ||
107 | |||
108 | /* .. and walking list of all tasks */ | ||
109 | #define alltask_for_each(task, pos, head) \ | ||
110 | list_for_each(pos, head) \ | ||
111 | if ((task=list_entry(pos, struct rpc_task, tk_task)),1) | ||
112 | |||
113 | typedef void (*rpc_action)(struct rpc_task *); | ||
114 | |||
115 | /* | ||
116 | * RPC task flags | ||
117 | */ | ||
118 | #define RPC_TASK_ASYNC 0x0001 /* is an async task */ | ||
119 | #define RPC_TASK_SWAPPER 0x0002 /* is swapping in/out */ | ||
120 | #define RPC_TASK_CHILD 0x0008 /* is child of other task */ | ||
121 | #define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */ | ||
122 | #define RPC_TASK_ROOTCREDS 0x0040 /* force root creds */ | ||
123 | #define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */ | ||
124 | #define RPC_TASK_KILLED 0x0100 /* task was killed */ | ||
125 | #define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */ | ||
126 | #define RPC_TASK_NOINTR 0x0400 /* uninterruptible task */ | ||
127 | |||
128 | #define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC) | ||
129 | #define RPC_IS_CHILD(t) ((t)->tk_flags & RPC_TASK_CHILD) | ||
130 | #define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER) | ||
131 | #define RPC_DO_ROOTOVERRIDE(t) ((t)->tk_flags & RPC_TASK_ROOTCREDS) | ||
132 | #define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED) | ||
133 | #define RPC_IS_ACTIVATED(t) ((t)->tk_active) | ||
134 | #define RPC_DO_CALLBACK(t) ((t)->tk_callback != NULL) | ||
135 | #define RPC_IS_SOFT(t) ((t)->tk_flags & RPC_TASK_SOFT) | ||
136 | #define RPC_TASK_UNINTERRUPTIBLE(t) ((t)->tk_flags & RPC_TASK_NOINTR) | ||
137 | |||
138 | #define RPC_TASK_RUNNING 0 | ||
139 | #define RPC_TASK_QUEUED 1 | ||
140 | #define RPC_TASK_WAKEUP 2 | ||
141 | #define RPC_TASK_HAS_TIMER 3 | ||
142 | |||
143 | #define RPC_IS_RUNNING(t) (test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)) | ||
144 | #define rpc_set_running(t) (set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)) | ||
145 | #define rpc_test_and_set_running(t) \ | ||
146 | (test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)) | ||
147 | #define rpc_clear_running(t) \ | ||
148 | do { \ | ||
149 | smp_mb__before_clear_bit(); \ | ||
150 | clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate); \ | ||
151 | smp_mb__after_clear_bit(); \ | ||
152 | } while (0) | ||
153 | |||
154 | #define RPC_IS_QUEUED(t) (test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)) | ||
155 | #define rpc_set_queued(t) (set_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)) | ||
156 | #define rpc_clear_queued(t) \ | ||
157 | do { \ | ||
158 | smp_mb__before_clear_bit(); \ | ||
159 | clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate); \ | ||
160 | smp_mb__after_clear_bit(); \ | ||
161 | } while (0) | ||
162 | |||
163 | #define rpc_start_wakeup(t) \ | ||
164 | (test_and_set_bit(RPC_TASK_WAKEUP, &(t)->tk_runstate) == 0) | ||
165 | #define rpc_finish_wakeup(t) \ | ||
166 | do { \ | ||
167 | smp_mb__before_clear_bit(); \ | ||
168 | clear_bit(RPC_TASK_WAKEUP, &(t)->tk_runstate); \ | ||
169 | smp_mb__after_clear_bit(); \ | ||
170 | } while (0) | ||
171 | |||
172 | /* | ||
173 | * Task priorities. | ||
174 | * Note: if you change these, you must also change | ||
175 | * the task initialization definitions below. | ||
176 | */ | ||
177 | #define RPC_PRIORITY_LOW 0 | ||
178 | #define RPC_PRIORITY_NORMAL 1 | ||
179 | #define RPC_PRIORITY_HIGH 2 | ||
180 | #define RPC_NR_PRIORITY (RPC_PRIORITY_HIGH+1) | ||
181 | |||
182 | /* | ||
183 | * RPC synchronization objects | ||
184 | */ | ||
185 | struct rpc_wait_queue { | ||
186 | spinlock_t lock; | ||
187 | struct list_head tasks[RPC_NR_PRIORITY]; /* task queue for each priority level */ | ||
188 | unsigned long cookie; /* cookie of last task serviced */ | ||
189 | unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */ | ||
190 | unsigned char priority; /* current priority */ | ||
191 | unsigned char count; /* # task groups remaining serviced so far */ | ||
192 | unsigned char nr; /* # tasks remaining for cookie */ | ||
193 | #ifdef RPC_DEBUG | ||
194 | const char * name; | ||
195 | #endif | ||
196 | }; | ||
197 | |||
198 | /* | ||
199 | * This is the # requests to send consecutively | ||
200 | * from a single cookie. The aim is to improve | ||
201 | * performance of NFS operations such as read/write. | ||
202 | */ | ||
203 | #define RPC_BATCH_COUNT 16 | ||
204 | |||
205 | #ifndef RPC_DEBUG | ||
206 | # define RPC_WAITQ_INIT(var,qname) { \ | ||
207 | .lock = SPIN_LOCK_UNLOCKED, \ | ||
208 | .tasks = { \ | ||
209 | [0] = LIST_HEAD_INIT(var.tasks[0]), \ | ||
210 | [1] = LIST_HEAD_INIT(var.tasks[1]), \ | ||
211 | [2] = LIST_HEAD_INIT(var.tasks[2]), \ | ||
212 | }, \ | ||
213 | } | ||
214 | #else | ||
215 | # define RPC_WAITQ_INIT(var,qname) { \ | ||
216 | .lock = SPIN_LOCK_UNLOCKED, \ | ||
217 | .tasks = { \ | ||
218 | [0] = LIST_HEAD_INIT(var.tasks[0]), \ | ||
219 | [1] = LIST_HEAD_INIT(var.tasks[1]), \ | ||
220 | [2] = LIST_HEAD_INIT(var.tasks[2]), \ | ||
221 | }, \ | ||
222 | .name = qname, \ | ||
223 | } | ||
224 | #endif | ||
225 | # define RPC_WAITQ(var,qname) struct rpc_wait_queue var = RPC_WAITQ_INIT(var,qname) | ||
226 | |||
227 | #define RPC_IS_PRIORITY(q) ((q)->maxpriority > 0) | ||
228 | |||
229 | /* | ||
230 | * Function prototypes | ||
231 | */ | ||
232 | struct rpc_task *rpc_new_task(struct rpc_clnt *, rpc_action, int flags); | ||
233 | struct rpc_task *rpc_new_child(struct rpc_clnt *, struct rpc_task *parent); | ||
234 | void rpc_init_task(struct rpc_task *, struct rpc_clnt *, | ||
235 | rpc_action exitfunc, int flags); | ||
236 | void rpc_release_task(struct rpc_task *); | ||
237 | void rpc_killall_tasks(struct rpc_clnt *); | ||
238 | int rpc_execute(struct rpc_task *); | ||
239 | void rpc_run_child(struct rpc_task *parent, struct rpc_task *child, | ||
240 | rpc_action action); | ||
241 | void rpc_init_priority_wait_queue(struct rpc_wait_queue *, const char *); | ||
242 | void rpc_init_wait_queue(struct rpc_wait_queue *, const char *); | ||
243 | void rpc_sleep_on(struct rpc_wait_queue *, struct rpc_task *, | ||
244 | rpc_action action, rpc_action timer); | ||
245 | void rpc_wake_up_task(struct rpc_task *); | ||
246 | void rpc_wake_up(struct rpc_wait_queue *); | ||
247 | struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *); | ||
248 | void rpc_wake_up_status(struct rpc_wait_queue *, int); | ||
249 | void rpc_delay(struct rpc_task *, unsigned long); | ||
250 | void * rpc_malloc(struct rpc_task *, size_t); | ||
251 | int rpciod_up(void); | ||
252 | void rpciod_down(void); | ||
253 | void rpciod_wake_up(void); | ||
254 | #ifdef RPC_DEBUG | ||
255 | void rpc_show_tasks(void); | ||
256 | #endif | ||
257 | int rpc_init_mempool(void); | ||
258 | void rpc_destroy_mempool(void); | ||
259 | |||
260 | static inline void rpc_exit(struct rpc_task *task, int status) | ||
261 | { | ||
262 | task->tk_status = status; | ||
263 | task->tk_action = NULL; | ||
264 | } | ||
265 | |||
266 | #ifdef RPC_DEBUG | ||
267 | static inline const char * rpc_qname(struct rpc_wait_queue *q) | ||
268 | { | ||
269 | return ((q && q->name) ? q->name : "unknown"); | ||
270 | } | ||
271 | #endif | ||
272 | |||
273 | #endif /* _LINUX_SUNRPC_SCHED_H_ */ | ||