diff options
-rw-r--r-- | fs/nfs/direct.c | 10 | ||||
-rw-r--r-- | fs/nfs/read.c | 2 | ||||
-rw-r--r-- | fs/nfs/write.c | 12 | ||||
-rw-r--r-- | include/linux/sunrpc/sched.h | 17 | ||||
-rw-r--r-- | net/sunrpc/sched.c | 30 |
5 files changed, 29 insertions, 42 deletions
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index f9f5fc13dc7d..5bcc764e501a 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
@@ -331,8 +331,6 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq, | |||
331 | rpc_init_task(&data->task, &task_setup_data); | 331 | rpc_init_task(&data->task, &task_setup_data); |
332 | NFS_PROTO(inode)->read_setup(data); | 332 | NFS_PROTO(inode)->read_setup(data); |
333 | 333 | ||
334 | data->task.tk_cookie = (unsigned long) inode; | ||
335 | |||
336 | rpc_execute(&data->task); | 334 | rpc_execute(&data->task); |
337 | 335 | ||
338 | dprintk("NFS: %5u initiated direct read call " | 336 | dprintk("NFS: %5u initiated direct read call " |
@@ -465,9 +463,6 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) | |||
465 | rpc_init_task(&data->task, &task_setup_data); | 463 | rpc_init_task(&data->task, &task_setup_data); |
466 | NFS_PROTO(inode)->write_setup(data, FLUSH_STABLE); | 464 | NFS_PROTO(inode)->write_setup(data, FLUSH_STABLE); |
467 | 465 | ||
468 | data->task.tk_priority = RPC_PRIORITY_NORMAL; | ||
469 | data->task.tk_cookie = (unsigned long) inode; | ||
470 | |||
471 | /* | 466 | /* |
472 | * We're called via an RPC callback, so BKL is already held. | 467 | * We're called via an RPC callback, so BKL is already held. |
473 | */ | 468 | */ |
@@ -534,8 +529,6 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq) | |||
534 | rpc_init_task(&data->task, &task_setup_data); | 529 | rpc_init_task(&data->task, &task_setup_data); |
535 | NFS_PROTO(data->inode)->commit_setup(data, 0); | 530 | NFS_PROTO(data->inode)->commit_setup(data, 0); |
536 | 531 | ||
537 | data->task.tk_priority = RPC_PRIORITY_NORMAL; | ||
538 | data->task.tk_cookie = (unsigned long)data->inode; | ||
539 | /* Note: task.tk_ops->rpc_release will free dreq->commit_data */ | 532 | /* Note: task.tk_ops->rpc_release will free dreq->commit_data */ |
540 | dreq->commit_data = NULL; | 533 | dreq->commit_data = NULL; |
541 | 534 | ||
@@ -718,9 +711,6 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq, | |||
718 | rpc_init_task(&data->task, &task_setup_data); | 711 | rpc_init_task(&data->task, &task_setup_data); |
719 | NFS_PROTO(inode)->write_setup(data, sync); | 712 | NFS_PROTO(inode)->write_setup(data, sync); |
720 | 713 | ||
721 | data->task.tk_priority = RPC_PRIORITY_NORMAL; | ||
722 | data->task.tk_cookie = (unsigned long) inode; | ||
723 | |||
724 | rpc_execute(&data->task); | 714 | rpc_execute(&data->task); |
725 | 715 | ||
726 | dprintk("NFS: %5u initiated direct write call " | 716 | dprintk("NFS: %5u initiated direct write call " |
diff --git a/fs/nfs/read.c b/fs/nfs/read.c index c7f0d5ebd451..8f1eb08ccffa 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c | |||
@@ -189,8 +189,6 @@ static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data, | |||
189 | rpc_init_task(&data->task, &task_setup_data); | 189 | rpc_init_task(&data->task, &task_setup_data); |
190 | NFS_PROTO(inode)->read_setup(data); | 190 | NFS_PROTO(inode)->read_setup(data); |
191 | 191 | ||
192 | data->task.tk_cookie = (unsigned long)inode; | ||
193 | |||
194 | dprintk("NFS: %5u initiated read call (req %s/%Ld, %u bytes @ offset %Lu)\n", | 192 | dprintk("NFS: %5u initiated read call (req %s/%Ld, %u bytes @ offset %Lu)\n", |
195 | data->task.tk_pid, | 193 | data->task.tk_pid, |
196 | inode->i_sb->s_id, | 194 | inode->i_sb->s_id, |
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index c4376606f106..8d90e90ccd47 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -753,7 +753,7 @@ static void nfs_writepage_release(struct nfs_page *req) | |||
753 | nfs_clear_page_tag_locked(req); | 753 | nfs_clear_page_tag_locked(req); |
754 | } | 754 | } |
755 | 755 | ||
756 | static inline int flush_task_priority(int how) | 756 | static int flush_task_priority(int how) |
757 | { | 757 | { |
758 | switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) { | 758 | switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) { |
759 | case FLUSH_HIGHPRI: | 759 | case FLUSH_HIGHPRI: |
@@ -775,11 +775,13 @@ static void nfs_write_rpcsetup(struct nfs_page *req, | |||
775 | { | 775 | { |
776 | struct inode *inode = req->wb_context->path.dentry->d_inode; | 776 | struct inode *inode = req->wb_context->path.dentry->d_inode; |
777 | int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC; | 777 | int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC; |
778 | int priority = flush_task_priority(how); | ||
778 | struct rpc_task_setup task_setup_data = { | 779 | struct rpc_task_setup task_setup_data = { |
779 | .rpc_client = NFS_CLIENT(inode), | 780 | .rpc_client = NFS_CLIENT(inode), |
780 | .callback_ops = call_ops, | 781 | .callback_ops = call_ops, |
781 | .callback_data = data, | 782 | .callback_data = data, |
782 | .flags = flags, | 783 | .flags = flags, |
784 | .priority = priority, | ||
783 | }; | 785 | }; |
784 | 786 | ||
785 | /* Set up the RPC argument and reply structs | 787 | /* Set up the RPC argument and reply structs |
@@ -805,9 +807,6 @@ static void nfs_write_rpcsetup(struct nfs_page *req, | |||
805 | rpc_init_task(&data->task, &task_setup_data); | 807 | rpc_init_task(&data->task, &task_setup_data); |
806 | NFS_PROTO(inode)->write_setup(data, how); | 808 | NFS_PROTO(inode)->write_setup(data, how); |
807 | 809 | ||
808 | data->task.tk_priority = flush_task_priority(how); | ||
809 | data->task.tk_cookie = (unsigned long)inode; | ||
810 | |||
811 | dprintk("NFS: %5u initiated write call " | 810 | dprintk("NFS: %5u initiated write call " |
812 | "(req %s/%Ld, %u bytes @ offset %Lu)\n", | 811 | "(req %s/%Ld, %u bytes @ offset %Lu)\n", |
813 | data->task.tk_pid, | 812 | data->task.tk_pid, |
@@ -1152,11 +1151,13 @@ static void nfs_commit_rpcsetup(struct list_head *head, | |||
1152 | struct nfs_page *first = nfs_list_entry(head->next); | 1151 | struct nfs_page *first = nfs_list_entry(head->next); |
1153 | struct inode *inode = first->wb_context->path.dentry->d_inode; | 1152 | struct inode *inode = first->wb_context->path.dentry->d_inode; |
1154 | int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC; | 1153 | int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC; |
1154 | int priority = flush_task_priority(how); | ||
1155 | struct rpc_task_setup task_setup_data = { | 1155 | struct rpc_task_setup task_setup_data = { |
1156 | .rpc_client = NFS_CLIENT(inode), | 1156 | .rpc_client = NFS_CLIENT(inode), |
1157 | .callback_ops = &nfs_commit_ops, | 1157 | .callback_ops = &nfs_commit_ops, |
1158 | .callback_data = data, | 1158 | .callback_data = data, |
1159 | .flags = flags, | 1159 | .flags = flags, |
1160 | .priority = priority, | ||
1160 | }; | 1161 | }; |
1161 | 1162 | ||
1162 | /* Set up the RPC argument and reply structs | 1163 | /* Set up the RPC argument and reply structs |
@@ -1180,9 +1181,6 @@ static void nfs_commit_rpcsetup(struct list_head *head, | |||
1180 | rpc_init_task(&data->task, &task_setup_data); | 1181 | rpc_init_task(&data->task, &task_setup_data); |
1181 | NFS_PROTO(inode)->commit_setup(data, how); | 1182 | NFS_PROTO(inode)->commit_setup(data, how); |
1182 | 1183 | ||
1183 | data->task.tk_priority = flush_task_priority(how); | ||
1184 | data->task.tk_cookie = (unsigned long)inode; | ||
1185 | |||
1186 | dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid); | 1184 | dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid); |
1187 | } | 1185 | } |
1188 | 1186 | ||
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index d974421d7647..c9444fdc23ac 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h | |||
@@ -56,8 +56,6 @@ struct rpc_task { | |||
56 | __u8 tk_garb_retry; | 56 | __u8 tk_garb_retry; |
57 | __u8 tk_cred_retry; | 57 | __u8 tk_cred_retry; |
58 | 58 | ||
59 | unsigned long tk_cookie; /* Cookie for batching tasks */ | ||
60 | |||
61 | /* | 59 | /* |
62 | * timeout_fn to be executed by timer bottom half | 60 | * timeout_fn to be executed by timer bottom half |
63 | * callback to be executed after waking up | 61 | * callback to be executed after waking up |
@@ -78,7 +76,6 @@ struct rpc_task { | |||
78 | struct timer_list tk_timer; /* kernel timer */ | 76 | struct timer_list tk_timer; /* kernel timer */ |
79 | unsigned long tk_timeout; /* timeout for rpc_sleep() */ | 77 | unsigned long tk_timeout; /* timeout for rpc_sleep() */ |
80 | unsigned short tk_flags; /* misc flags */ | 78 | unsigned short tk_flags; /* misc flags */ |
81 | unsigned char tk_priority : 2;/* Task priority */ | ||
82 | unsigned long tk_runstate; /* Task run status */ | 79 | unsigned long tk_runstate; /* Task run status */ |
83 | struct workqueue_struct *tk_workqueue; /* Normally rpciod, but could | 80 | struct workqueue_struct *tk_workqueue; /* Normally rpciod, but could |
84 | * be any workqueue | 81 | * be any workqueue |
@@ -94,6 +91,9 @@ struct rpc_task { | |||
94 | unsigned long tk_start; /* RPC task init timestamp */ | 91 | unsigned long tk_start; /* RPC task init timestamp */ |
95 | long tk_rtt; /* round-trip time (jiffies) */ | 92 | long tk_rtt; /* round-trip time (jiffies) */ |
96 | 93 | ||
94 | pid_t tk_owner; /* Process id for batching tasks */ | ||
95 | unsigned char tk_priority : 2;/* Task priority */ | ||
96 | |||
97 | #ifdef RPC_DEBUG | 97 | #ifdef RPC_DEBUG |
98 | unsigned short tk_pid; /* debugging aid */ | 98 | unsigned short tk_pid; /* debugging aid */ |
99 | #endif | 99 | #endif |
@@ -123,6 +123,7 @@ struct rpc_task_setup { | |||
123 | const struct rpc_call_ops *callback_ops; | 123 | const struct rpc_call_ops *callback_ops; |
124 | void *callback_data; | 124 | void *callback_data; |
125 | unsigned short flags; | 125 | unsigned short flags; |
126 | signed char priority; | ||
126 | }; | 127 | }; |
127 | 128 | ||
128 | /* | 129 | /* |
@@ -187,10 +188,10 @@ struct rpc_task_setup { | |||
187 | * Note: if you change these, you must also change | 188 | * Note: if you change these, you must also change |
188 | * the task initialization definitions below. | 189 | * the task initialization definitions below. |
189 | */ | 190 | */ |
190 | #define RPC_PRIORITY_LOW 0 | 191 | #define RPC_PRIORITY_LOW (-1) |
191 | #define RPC_PRIORITY_NORMAL 1 | 192 | #define RPC_PRIORITY_NORMAL (0) |
192 | #define RPC_PRIORITY_HIGH 2 | 193 | #define RPC_PRIORITY_HIGH (1) |
193 | #define RPC_NR_PRIORITY (RPC_PRIORITY_HIGH+1) | 194 | #define RPC_NR_PRIORITY (1 + RPC_PRIORITY_HIGH - RPC_PRIORITY_LOW) |
194 | 195 | ||
195 | /* | 196 | /* |
196 | * RPC synchronization objects | 197 | * RPC synchronization objects |
@@ -198,7 +199,7 @@ struct rpc_task_setup { | |||
198 | struct rpc_wait_queue { | 199 | struct rpc_wait_queue { |
199 | spinlock_t lock; | 200 | spinlock_t lock; |
200 | struct list_head tasks[RPC_NR_PRIORITY]; /* task queue for each priority level */ | 201 | struct list_head tasks[RPC_NR_PRIORITY]; /* task queue for each priority level */ |
201 | unsigned long cookie; /* cookie of last task serviced */ | 202 | pid_t owner; /* process id of last task serviced */ |
202 | unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */ | 203 | unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */ |
203 | unsigned char priority; /* current priority */ | 204 | unsigned char priority; /* current priority */ |
204 | unsigned char count; /* # task groups remaining serviced so far */ | 205 | unsigned char count; /* # task groups remaining serviced so far */ |
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index 10216989309c..b9061bcf6fc1 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c | |||
@@ -135,7 +135,7 @@ static void __rpc_add_wait_queue_priority(struct rpc_wait_queue *queue, struct r | |||
135 | if (unlikely(task->tk_priority > queue->maxpriority)) | 135 | if (unlikely(task->tk_priority > queue->maxpriority)) |
136 | q = &queue->tasks[queue->maxpriority]; | 136 | q = &queue->tasks[queue->maxpriority]; |
137 | list_for_each_entry(t, q, u.tk_wait.list) { | 137 | list_for_each_entry(t, q, u.tk_wait.list) { |
138 | if (t->tk_cookie == task->tk_cookie) { | 138 | if (t->tk_owner == task->tk_owner) { |
139 | list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links); | 139 | list_add_tail(&task->u.tk_wait.list, &t->u.tk_wait.links); |
140 | return; | 140 | return; |
141 | } | 141 | } |
@@ -208,26 +208,26 @@ static inline void rpc_set_waitqueue_priority(struct rpc_wait_queue *queue, int | |||
208 | queue->count = 1 << (priority * 2); | 208 | queue->count = 1 << (priority * 2); |
209 | } | 209 | } |
210 | 210 | ||
211 | static inline void rpc_set_waitqueue_cookie(struct rpc_wait_queue *queue, unsigned long cookie) | 211 | static inline void rpc_set_waitqueue_owner(struct rpc_wait_queue *queue, pid_t pid) |
212 | { | 212 | { |
213 | queue->cookie = cookie; | 213 | queue->owner = pid; |
214 | queue->nr = RPC_BATCH_COUNT; | 214 | queue->nr = RPC_BATCH_COUNT; |
215 | } | 215 | } |
216 | 216 | ||
217 | static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue) | 217 | static inline void rpc_reset_waitqueue_priority(struct rpc_wait_queue *queue) |
218 | { | 218 | { |
219 | rpc_set_waitqueue_priority(queue, queue->maxpriority); | 219 | rpc_set_waitqueue_priority(queue, queue->maxpriority); |
220 | rpc_set_waitqueue_cookie(queue, 0); | 220 | rpc_set_waitqueue_owner(queue, 0); |
221 | } | 221 | } |
222 | 222 | ||
223 | static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, int maxprio) | 223 | static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname, unsigned char nr_queues) |
224 | { | 224 | { |
225 | int i; | 225 | int i; |
226 | 226 | ||
227 | spin_lock_init(&queue->lock); | 227 | spin_lock_init(&queue->lock); |
228 | for (i = 0; i < ARRAY_SIZE(queue->tasks); i++) | 228 | for (i = 0; i < ARRAY_SIZE(queue->tasks); i++) |
229 | INIT_LIST_HEAD(&queue->tasks[i]); | 229 | INIT_LIST_HEAD(&queue->tasks[i]); |
230 | queue->maxpriority = maxprio; | 230 | queue->maxpriority = nr_queues - 1; |
231 | rpc_reset_waitqueue_priority(queue); | 231 | rpc_reset_waitqueue_priority(queue); |
232 | #ifdef RPC_DEBUG | 232 | #ifdef RPC_DEBUG |
233 | queue->name = qname; | 233 | queue->name = qname; |
@@ -236,12 +236,12 @@ static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const c | |||
236 | 236 | ||
237 | void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname) | 237 | void rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const char *qname) |
238 | { | 238 | { |
239 | __rpc_init_priority_wait_queue(queue, qname, RPC_PRIORITY_HIGH); | 239 | __rpc_init_priority_wait_queue(queue, qname, RPC_NR_PRIORITY); |
240 | } | 240 | } |
241 | 241 | ||
242 | void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname) | 242 | void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname) |
243 | { | 243 | { |
244 | __rpc_init_priority_wait_queue(queue, qname, 0); | 244 | __rpc_init_priority_wait_queue(queue, qname, 1); |
245 | } | 245 | } |
246 | EXPORT_SYMBOL_GPL(rpc_init_wait_queue); | 246 | EXPORT_SYMBOL_GPL(rpc_init_wait_queue); |
247 | 247 | ||
@@ -456,12 +456,12 @@ static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queu | |||
456 | struct rpc_task *task; | 456 | struct rpc_task *task; |
457 | 457 | ||
458 | /* | 458 | /* |
459 | * Service a batch of tasks from a single cookie. | 459 | * Service a batch of tasks from a single owner. |
460 | */ | 460 | */ |
461 | q = &queue->tasks[queue->priority]; | 461 | q = &queue->tasks[queue->priority]; |
462 | if (!list_empty(q)) { | 462 | if (!list_empty(q)) { |
463 | task = list_entry(q->next, struct rpc_task, u.tk_wait.list); | 463 | task = list_entry(q->next, struct rpc_task, u.tk_wait.list); |
464 | if (queue->cookie == task->tk_cookie) { | 464 | if (queue->owner == task->tk_owner) { |
465 | if (--queue->nr) | 465 | if (--queue->nr) |
466 | goto out; | 466 | goto out; |
467 | list_move_tail(&task->u.tk_wait.list, q); | 467 | list_move_tail(&task->u.tk_wait.list, q); |
@@ -470,7 +470,7 @@ static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queu | |||
470 | * Check if we need to switch queues. | 470 | * Check if we need to switch queues. |
471 | */ | 471 | */ |
472 | if (--queue->count) | 472 | if (--queue->count) |
473 | goto new_cookie; | 473 | goto new_owner; |
474 | } | 474 | } |
475 | 475 | ||
476 | /* | 476 | /* |
@@ -492,8 +492,8 @@ static struct rpc_task * __rpc_wake_up_next_priority(struct rpc_wait_queue *queu | |||
492 | 492 | ||
493 | new_queue: | 493 | new_queue: |
494 | rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0])); | 494 | rpc_set_waitqueue_priority(queue, (unsigned int)(q - &queue->tasks[0])); |
495 | new_cookie: | 495 | new_owner: |
496 | rpc_set_waitqueue_cookie(queue, task->tk_cookie); | 496 | rpc_set_waitqueue_owner(queue, task->tk_owner); |
497 | out: | 497 | out: |
498 | __rpc_wake_up_task(task); | 498 | __rpc_wake_up_task(task); |
499 | return task; | 499 | return task; |
@@ -830,8 +830,8 @@ void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setu | |||
830 | task->tk_garb_retry = 2; | 830 | task->tk_garb_retry = 2; |
831 | task->tk_cred_retry = 2; | 831 | task->tk_cred_retry = 2; |
832 | 832 | ||
833 | task->tk_priority = RPC_PRIORITY_NORMAL; | 833 | task->tk_priority = task_setup_data->priority - RPC_PRIORITY_LOW; |
834 | task->tk_cookie = (unsigned long)current; | 834 | task->tk_owner = current->tgid; |
835 | 835 | ||
836 | /* Initialize workqueue for async tasks */ | 836 | /* Initialize workqueue for async tasks */ |
837 | task->tk_workqueue = rpciod_workqueue; | 837 | task->tk_workqueue = rpciod_workqueue; |