aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/aoe/aoechr.c
diff options
context:
space:
mode:
authorEd L. Cashin <ecashin@coraid.com>2006-09-20 14:36:51 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2006-10-18 15:53:51 -0400
commita12c93f08b8fc83b7fcdabaf92b1adcea7489f5e (patch)
tree493fb94c32f45b5f1c8109c7ce170f653cb5c3d5 /drivers/block/aoe/aoechr.c
parent086216db1435f44a58c18454acfa59f013510c95 (diff)
aoe: revert printk macros
This patch addresses the concern that the aoe driver should not introduce unecessary conventions that must be learned by the reader. It reverts patch 6. Signed-off-by: "Ed L. Cashin" <ecashin@coraid.com> Acked-by: Alan Cox <alan@redhat.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/block/aoe/aoechr.c')
-rw-r--r--drivers/block/aoe/aoechr.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c
index f5cab69fbc91..e22b4c9520a9 100644
--- a/drivers/block/aoe/aoechr.c
+++ b/drivers/block/aoe/aoechr.c
@@ -55,7 +55,8 @@ static int
55interfaces(const char __user *str, size_t size) 55interfaces(const char __user *str, size_t size)
56{ 56{
57 if (set_aoe_iflist(str, size)) { 57 if (set_aoe_iflist(str, size)) {
58 eprintk("could not set interface list: too many interfaces\n"); 58 printk(KERN_ERR
59 "aoe: could not set interface list: too many interfaces\n");
59 return -EINVAL; 60 return -EINVAL;
60 } 61 }
61 return 0; 62 return 0;
@@ -78,7 +79,7 @@ revalidate(const char __user *str, size_t size)
78 /* should be e%d.%d format */ 79 /* should be e%d.%d format */
79 n = sscanf(buf, "e%d.%d", &major, &minor); 80 n = sscanf(buf, "e%d.%d", &major, &minor);
80 if (n != 2) { 81 if (n != 2) {
81 eprintk("invalid device specification\n"); 82 printk(KERN_ERR "aoe: invalid device specification\n");
82 return -EINVAL; 83 return -EINVAL;
83 } 84 }
84 d = aoedev_by_aoeaddr(major, minor); 85 d = aoedev_by_aoeaddr(major, minor);
@@ -113,7 +114,7 @@ bail: spin_unlock_irqrestore(&emsgs_lock, flags);
113 114
114 mp = kmalloc(n, GFP_ATOMIC); 115 mp = kmalloc(n, GFP_ATOMIC);
115 if (mp == NULL) { 116 if (mp == NULL) {
116 eprintk("allocation failure, len=%ld\n", n); 117 printk(KERN_ERR "aoe: allocation failure, len=%ld\n", n);
117 goto bail; 118 goto bail;
118 } 119 }
119 120
@@ -138,7 +139,7 @@ aoechr_write(struct file *filp, const char __user *buf, size_t cnt, loff_t *offp
138 139
139 switch ((unsigned long) filp->private_data) { 140 switch ((unsigned long) filp->private_data) {
140 default: 141 default:
141 iprintk("can't write to that file.\n"); 142 printk(KERN_INFO "aoe: can't write to that file.\n");
142 break; 143 break;
143 case MINOR_DISCOVER: 144 case MINOR_DISCOVER:
144 ret = discover(); 145 ret = discover();
@@ -247,7 +248,7 @@ aoechr_init(void)
247 248
248 n = register_chrdev(AOE_MAJOR, "aoechr", &aoe_fops); 249 n = register_chrdev(AOE_MAJOR, "aoechr", &aoe_fops);
249 if (n < 0) { 250 if (n < 0) {
250 eprintk("can't register char device\n"); 251 printk(KERN_ERR "aoe: can't register char device\n");
251 return n; 252 return n;
252 } 253 }
253 sema_init(&emsgs_sema, 0); 254 sema_init(&emsgs_sema, 0);
="hl opt">.next = load_balance_next_rt; /* pass 'busiest' rq argument into * load_balance_[start|next]_rt iterators */ rt_rq_iterator.arg = busiest; return balance_tasks(this_rq, this_cpu, busiest, max_load_move, sd, idle, all_pinned, this_best_prio, &rt_rq_iterator); } static int move_one_task_rt(struct rq *this_rq, int this_cpu, struct rq *busiest, struct sched_domain *sd, enum cpu_idle_type idle) { struct rq_iterator rt_rq_iterator; rt_rq_iterator.start = load_balance_start_rt; rt_rq_iterator.next = load_balance_next_rt; rt_rq_iterator.arg = busiest; return iter_move_one_task(this_rq, this_cpu, busiest, sd, idle, &rt_rq_iterator); } #endif static void task_tick_rt(struct rq *rq, struct task_struct *p) { update_curr_rt(rq); /* * RR tasks need a special form of timeslice management. * FIFO tasks have no timeslices. */ if (p->policy != SCHED_RR) return; if (--p->time_slice) return; p->time_slice = DEF_TIMESLICE; /* * Requeue to the end of queue if we are not the only element * on the queue: */ if (p->run_list.prev != p->run_list.next) { requeue_task_rt(rq, p); set_tsk_need_resched(p); } } static void set_curr_task_rt(struct rq *rq) { struct task_struct *p = rq->curr; p->se.exec_start = rq->clock; } const struct sched_class rt_sched_class = { .next = &fair_sched_class, .enqueue_task = enqueue_task_rt, .dequeue_task = dequeue_task_rt, .yield_task = yield_task_rt, .check_preempt_curr = check_preempt_curr_rt, .pick_next_task = pick_next_task_rt, .put_prev_task = put_prev_task_rt, #ifdef CONFIG_SMP .load_balance = load_balance_rt, .move_one_task = move_one_task_rt, #endif .set_curr_task = set_curr_task_rt, .task_tick = task_tick_rt, };