aboutsummaryrefslogtreecommitdiffstats
path: root/litmus
diff options
context:
space:
mode:
Diffstat (limited to 'litmus')
-rw-r--r--litmus/Kconfig2
-rw-r--r--litmus/color.c13
-rw-r--r--litmus/color_queue.c34
-rw-r--r--litmus/dgl.c88
-rw-r--r--litmus/lockdown.c34
-rw-r--r--litmus/sched_mc.c7
-rw-r--r--litmus/sched_trace.c13
-rw-r--r--litmus/way_tracker.c3
8 files changed, 153 insertions, 41 deletions
diff --git a/litmus/Kconfig b/litmus/Kconfig
index e71f7a332f21..1ba3a1368f68 100644
--- a/litmus/Kconfig
+++ b/litmus/Kconfig
@@ -39,7 +39,7 @@ config PLUGIN_PFAIR
39config MERGE_TIMERS 39config MERGE_TIMERS
40 bool "Timer-merging Support" 40 bool "Timer-merging Support"
41 depends on HIGH_RES_TIMERS 41 depends on HIGH_RES_TIMERS
42 default y 42 default n
43 help 43 help
44 Include support for merging timers. 44 Include support for merging timers.
45 45
diff --git a/litmus/color.c b/litmus/color.c
index 26f800e785ae..c9826dd48ec2 100644
--- a/litmus/color.c
+++ b/litmus/color.c
@@ -16,7 +16,9 @@
16#include <litmus/color.h> 16#include <litmus/color.h>
17#include <litmus/litmus.h> /* for in_list(...) */ 17#include <litmus/litmus.h> /* for in_list(...) */
18#include <litmus/clock.h> 18#include <litmus/clock.h>
19#ifndef CONFIG_CACHE_PL310
19#include <litmus/color_queue.h> 20#include <litmus/color_queue.h>
21#endif
20#include <litmus/way_tracker.h> 22#include <litmus/way_tracker.h>
21#include <litmus/trace.h> 23#include <litmus/trace.h>
22 24
@@ -222,7 +224,13 @@ void reclaim_pages(struct vm_area_struct *vma)
222int color_sched_in_task(struct task_struct *t) 224int color_sched_in_task(struct task_struct *t)
223{ 225{
224 color_page_info_take_ways(t); 226 color_page_info_take_ways(t);
227
228#ifdef CONFIG_CACHE_PL310
229 return color_cache_task(t);
230#else
225 return color_queue_enqueue_read(t); 231 return color_queue_enqueue_read(t);
232#endif
233
226} 234}
227 235
228/* 236/*
@@ -231,7 +239,12 @@ int color_sched_in_task(struct task_struct *t)
231int color_sched_out_task(struct task_struct *t) 239int color_sched_out_task(struct task_struct *t)
232{ 240{
233 color_page_info_release_ways(t); 241 color_page_info_release_ways(t);
242
243#ifdef CONFIG_CACHE_PL310
244 return color_uncache_task(t);
245#else
234 return color_queue_enqueue_flush(t); 246 return color_queue_enqueue_flush(t);
247#endif
235} 248}
236 249
237asmlinkage long sys_set_color_page_info(struct color_ctrl_page __user *user_color_ctrl) 250asmlinkage long sys_set_color_page_info(struct color_ctrl_page __user *user_color_ctrl)
diff --git a/litmus/color_queue.c b/litmus/color_queue.c
index 0b87217a931a..913b2f317c16 100644
--- a/litmus/color_queue.c
+++ b/litmus/color_queue.c
@@ -456,6 +456,40 @@ out_free:
456 return ret; 456 return ret;
457} 457}
458 458
459int color_cache_task(struct task_struct *t)
460{
461 struct color_page_info *info;
462 int npages = 0;
463
464 list_for_each_entry(info, &tsk_rt(t)->color_page_info_list, list) {
465 u32 lvalue = unlocked_way[info->way];
466 color_read_in_mem_lock(lvalue, LOCK_ALL,
467 info->vaddr, info->vaddr + PAGE_SIZE);
468 ++npages;
469 }
470
471 return npages;
472}
473
474int color_uncache_task(struct task_struct *t)
475{
476 struct color_page_info *info;
477 int npages = 0;
478
479
480 list_for_each_entry(info, &tsk_rt(t)->color_page_info_list, list) {
481 u32 lvalue = unlocked_way[info->way];
482 void *vaddr = flusher_pages[info->way][info->color];
483
484 color_read_in_mem_lock(lvalue, LOCK_ALL,
485 vaddr, vaddr + PAGE_SIZE);
486
487 ++npages;
488 }
489
490 return npages;
491}
492
459static int __init init_color_queue(void) 493static int __init init_color_queue(void)
460{ 494{
461 struct cpu_entry *cpu_entry; 495 struct cpu_entry *cpu_entry;
diff --git a/litmus/dgl.c b/litmus/dgl.c
index f9cede7ef546..3d6d3201928e 100644
--- a/litmus/dgl.c
+++ b/litmus/dgl.c
@@ -9,12 +9,10 @@
9 9
10#ifdef DEBUG_DGL 10#ifdef DEBUG_DGL
11#define TRACE(fmt, args...) STRACE(fmt, ## args) 11#define TRACE(fmt, args...) STRACE(fmt, ## args)
12#define STRACE2(fmt, args...) STRACE(fmt, ## args)
13#define TRACE_GREQ(greq, fmt, args...) \ 12#define TRACE_GREQ(greq, fmt, args...) \
14 TRACE("(greq-%s/%llu) " fmt, (greq->task ? greq->task->comm : "greq"), \ 13 TRACE("(greq-%s/%llu) " fmt, (greq->task ? greq->task->comm : "greq"), \
15 (greq->task ? greq->task->pid : (unsigned long long)greq), ## args) 14 (greq->task ? greq->task->pid : (unsigned long long)greq), ## args)
16#else 15#else
17#define STRACE2(fmt, args...) printk(KERN_ERR fmt, ## args)
18#define TRACE(fmt, args...) 16#define TRACE(fmt, args...)
19#define TRACE_GREQ(greq, fmt, args...) 17#define TRACE_GREQ(greq, fmt, args...)
20#endif 18#endif
@@ -72,53 +70,62 @@ static int arr_to_bool(struct dgl *dgl, unsigned long *arr)
72 return (ret != 0); 70 return (ret != 0);
73} 71}
74 72
75static void print_queue(struct dgl *dgl, struct list_head *list) 73#ifdef DEBUG_DGL
74#define DEBUG_BUF_LEN 10000
75DEFINE_PER_CPU(char *, debug_buf);
76
77static char* print_queue(char *buf, struct dgl *dgl, struct list_head *list)
76{ 78{
77 struct dgl_req *pos; 79 struct dgl_req *pos;
78 struct dgl_group_req *greq; 80 struct dgl_group_req *greq;
79 81
80 list_for_each_entry(pos, list, list) { 82 list_for_each_entry(pos, list, list) {
81 greq = pos->greq; 83 greq = pos->greq;
82 sched_trace_log_message("(%s-%d:r%d-p%x-b%x)->", greq->task->comm, 84 buf += sprintf(buf, "(%s/%d:r%d-p%x-b%x)->", greq->task->comm,
83 greq->task->pid, pos->replicas, 85 greq->task->pid, pos->replicas,
84 greq->need_prio[0], 86 greq->need_prio[0], greq->blocked[0]);
85 greq->blocked[0]);
86 } 87 }
87 sched_trace_log_message("\n"); 88 buf += sprintf(buf, "\n");
89
90 return buf;
88} 91}
89 92
90static void print_resource(struct dgl *dgl, struct dgl_resource *resource) 93static char* print_resource(char *buf, struct dgl *dgl,
94 struct dgl_resource *resource)
91{ 95{
92 STRACE2("\tResource %d, real_free: %d, goal_free: %d\n", 96 buf += sprintf(buf, "\tResource %d, real_free: %d, goal_free: %d\n",
93 resource_id(dgl, resource), 97 resource_id(dgl, resource),
94 resource->real_free, 98 resource->real_free,
95 resource->goal_free); 99 resource->goal_free);
96 STRACE2("\t acquired: "); 100 buf += sprintf(buf, "\t acquired:");
97 print_queue(dgl, &resource->acquired); 101 buf = print_queue(buf, dgl, &resource->acquired);
98 STRACE2("\t will_acquire:"); 102 buf += sprintf(buf, "\t will_acquire:");
99 print_queue(dgl, &resource->will_acquire); 103 buf = print_queue(buf, dgl, &resource->will_acquire);
100 STRACE2("\t waiting:"); 104 buf += sprintf(buf, "\t waiting:");
101 print_queue(dgl, &resource->waiting); 105 buf = print_queue(buf, dgl, &resource->waiting);
102 STRACE2("\t will_wait:"); 106 buf += sprintf(buf, "\t will_wait:");
103 print_queue(dgl, &resource->will_wait); 107 buf = print_queue(buf, dgl, &resource->will_wait);
108 return buf;
104} 109}
105 110
111
106/* 112/*
107 * Print stats and queues of every resource to the trace log. 113 * Print stats and queues of every resource to the trace log.
108 */ 114 */
109static void print_state(struct dgl *dgl) 115static void print_state(struct dgl *dgl)
110{ 116{
111 int i; 117 int i;
118 char *buf, *start;
112 struct dgl_resource *resource; 119 struct dgl_resource *resource;
113 120
114 sched_trace_log_message("\n"); 121 start = __get_cpu_var(debug_buf);
115 STRACE2("\t\tDGL: requests: %d\n", dgl->requests); 122 buf = start + sprintf(start, "\n\t\tDGL: requests: %d\n", dgl->requests);
116 123
117 for (i = 0; i < dgl->num_resources; ++i) { 124 for (i = 0; i < dgl->num_resources; ++i) {
118 resource = &dgl->resources[i]; 125 resource = &dgl->resources[i];
119 126
120 if (!resource) { 127 if (!resource) {
121 STRACE2("\tResource %d is null!\n", i); 128 buf += sprintf(buf, "\tResource %d is null!\n", i);
122 } 129 }
123 130
124 if (!list_empty(&resource->waiting) || 131 if (!list_empty(&resource->waiting) ||
@@ -126,14 +133,16 @@ static void print_state(struct dgl *dgl)
126 !list_empty(&resource->acquired) || 133 !list_empty(&resource->acquired) ||
127 !list_empty(&resource->will_acquire)) { 134 !list_empty(&resource->will_acquire)) {
128 135
129 print_resource(dgl, resource); 136 buf = print_resource(buf, dgl, resource);
130 } 137 }
131 } 138 }
132 STRACE2("Dump complete\n"); 139
133 sched_trace_log_message("\n"); 140 buf += sprintf(buf, "Dump complete\n\n");
141
142 BUG_ON(buf - start > DEBUG_BUF_LEN);
143 sched_trace_log_message(start);
134} 144}
135 145
136#ifdef DEBUG_DGL
137#define BUG_DUMP(dgl, cond) \ 146#define BUG_DUMP(dgl, cond) \
138 do { \ 147 do { \
139 if (cond) { \ 148 if (cond) { \
@@ -142,10 +151,9 @@ static void print_state(struct dgl *dgl)
142 BUG(); \ 151 BUG(); \
143 }} while(0) 152 }} while(0)
144#else 153#else
145#define BUG_DUMP(dgl, cond) 154#define BUG_DUMP(dgl, cond) BUG_ON(cond)
146#endif 155#endif
147 156
148
149static int higher_prio(struct dgl_group_req *a, struct dgl_group_req *b) 157static int higher_prio(struct dgl_group_req *a, struct dgl_group_req *b)
150{ 158{
151 return (a->priority < b->priority || 159 return (a->priority < b->priority ||
@@ -223,14 +231,14 @@ static int get_lp_replicas(struct dgl *dgl, struct dgl_group_req *greq,
223 * Add @req to @list in priority order. 231 * Add @req to @list in priority order.
224 * @reverse Reverse priority 232 * @reverse Reverse priority
225 */ 233 */
226static void add_request(struct list_head *list, struct dgl_req *req, 234static void add_request(struct dgl *dgl, struct list_head *list,
227 int reverse) 235 struct dgl_req *req, int reverse)
228{ 236{
229 struct list_head *last; 237 struct list_head *last;
230 struct dgl_req *acquired; 238 struct dgl_req *acquired;
231 struct dgl_group_req *greqa, *greqb; 239 struct dgl_group_req *greqa, *greqb;
232 240
233 BUG_ON(in_list(&req->list)); 241 BUG_DUMP(dgl, in_list(&req->list));
234 242
235 last = list; 243 last = list;
236 list_for_each_entry(acquired, list, list) { 244 list_for_each_entry(acquired, list, list) {
@@ -255,7 +263,7 @@ static void add_request(struct list_head *list, struct dgl_req *req,
255static void add_waiting(struct dgl *dgl, struct list_head *list, struct dgl_req *req) 263static void add_waiting(struct dgl *dgl, struct list_head *list, struct dgl_req *req)
256{ 264{
257 BUG_DUMP(dgl, !arr_to_bool(dgl, req->greq->need_prio)); 265 BUG_DUMP(dgl, !arr_to_bool(dgl, req->greq->need_prio));
258 add_request(list, req, 0); 266 add_request(dgl, list, req, 0);
259} 267}
260 268
261/* 269/*
@@ -264,7 +272,7 @@ static void add_waiting(struct dgl *dgl, struct list_head *list, struct dgl_req
264static void add_acquired(struct dgl *dgl, struct list_head *list, struct dgl_req *req) 272static void add_acquired(struct dgl *dgl, struct list_head *list, struct dgl_req *req)
265{ 273{
266 BUG_DUMP(dgl, arr_to_bool(dgl, req->greq->need_prio)); 274 BUG_DUMP(dgl, arr_to_bool(dgl, req->greq->need_prio));
267 add_request(list, req, 1); 275 add_request(dgl, list, req, 1);
268} 276}
269 277
270/* 278/*
@@ -888,3 +896,21 @@ void dgl_group_req_free(struct dgl_group_req *greq)
888 kfree(greq->need_prio); 896 kfree(greq->need_prio);
889 kfree(greq->requests); 897 kfree(greq->requests);
890} 898}
899
900#ifdef DEBUG_DGL
901static int __init init_dgl_debug(void)
902{
903 int cpu;
904 char **buf;
905
906 for_each_online_cpu(cpu) {
907 buf = &per_cpu(debug_buf, cpu);
908 *buf = kmalloc(DEBUG_BUF_LEN * sizeof(char), GFP_ATOMIC);
909 }
910
911 return 0;
912}
913
914module_init(init_dgl_debug);
915
916#endif
diff --git a/litmus/lockdown.c b/litmus/lockdown.c
index bb93690c00c9..09712554c5b9 100644
--- a/litmus/lockdown.c
+++ b/litmus/lockdown.c
@@ -77,6 +77,7 @@ void litmus_setup_lockdown(void __iomem *base, u32 id)
77#else 77#else
78 78
79static void __iomem *cache_base; 79static void __iomem *cache_base;
80
80static void __iomem *lockreg_d; 81static void __iomem *lockreg_d;
81static void __iomem *lockreg_i; 82static void __iomem *lockreg_i;
82static raw_spinlock_t prefetch_lock; 83static raw_spinlock_t prefetch_lock;
@@ -113,7 +114,11 @@ u32 color_read_in_mem(u32 lock_val, u32 unlock_val, void *start, void *end)
113 : [addr] "+r" (start), 114 : [addr] "+r" (start),
114 [val] "+r" (v) 115 [val] "+r" (v)
115 : [end] "r" (end), 116 : [end] "r" (end),
117#ifdef CONFIG_CACHE_PL310
118 [cachereg] "r" (ld_d_reg(raw_smp_processor_id())),
119#else
116 [cachereg] "r" (lockreg_d), 120 [cachereg] "r" (lockreg_d),
121#endif
117 [lockval] "r" (lock_val) 122 [lockval] "r" (lock_val)
118 : "cc"); 123 : "cc");
119 124
@@ -137,7 +142,10 @@ u32 color_read_in_mem_lock(u32 lock_val, u32 unlock_val, void *start, void *end)
137 unsigned long flags; 142 unsigned long flags;
138 u32 v = 0; 143 u32 v = 0;
139 144
145#ifndef CONFIG_CACHE_PL310
140 raw_spin_lock_irqsave(&prefetch_lock, flags); 146 raw_spin_lock_irqsave(&prefetch_lock, flags);
147#endif
148
141 __asm__ __volatile__ ( 149 __asm__ __volatile__ (
142" .align 5\n" 150" .align 5\n"
143" str %[lockval], [%[cachereg]]\n" 151" str %[lockval], [%[cachereg]]\n"
@@ -148,11 +156,18 @@ u32 color_read_in_mem_lock(u32 lock_val, u32 unlock_val, void *start, void *end)
148 : [addr] "+r" (start), 156 : [addr] "+r" (start),
149 [val] "+r" (v) 157 [val] "+r" (v)
150 : [end] "r" (end), 158 : [end] "r" (end),
159#ifdef CONFIG_CACHE_PL310
160 [cachereg] "r" (ld_d_reg(raw_smp_processor_id())),
161#else
151 [cachereg] "r" (lockreg_d), 162 [cachereg] "r" (lockreg_d),
163#endif
152 [lockval] "r" (lock_val), 164 [lockval] "r" (lock_val),
153 [unlockval] "r" (unlock_val) 165 [unlockval] "r" (unlock_val)
154 : "cc"); 166 : "cc");
167
168#ifndef CONFIG_CACHE_PL310
155 raw_spin_unlock_irqrestore(&prefetch_lock, flags); 169 raw_spin_unlock_irqrestore(&prefetch_lock, flags);
170#endif
156 171
157 return v; 172 return v;
158} 173}
@@ -330,9 +345,16 @@ int litmus_lockdown_proc_handler(struct ctl_table *table, int write,
330 345
331#define TRIALS 1000 346#define TRIALS 1000
332 347
348static void sleep_ns(int ns)
349{
350 int i;
351 lt_t start = litmus_clock();
352 for (i = 0; litmus_clock() - start < ns;i++);
353}
354
333static int test_get_cycles_overhead(void) 355static int test_get_cycles_overhead(void)
334{ 356{
335 u64 sum = 0, min = (u64)-1, max = 0; 357 u64 avg = 0, min = (u64)-1, max = 0;
336 unsigned long flags; 358 unsigned long flags;
337 cycles_t a, b; 359 cycles_t a, b;
338 int i; 360 int i;
@@ -342,6 +364,7 @@ static int test_get_cycles_overhead(void)
342 local_irq_save(flags); 364 local_irq_save(flags);
343 preempt_disable(); 365 preempt_disable();
344 a = litmus_get_cycles(); 366 a = litmus_get_cycles();
367 sleep_ns(15000);
345 b = litmus_get_cycles(); 368 b = litmus_get_cycles();
346 preempt_enable(); 369 preempt_enable();
347 local_irq_restore(flags); 370 local_irq_restore(flags);
@@ -350,10 +373,11 @@ static int test_get_cycles_overhead(void)
350 max = diff; 373 max = diff;
351 if (diff < min) 374 if (diff < min)
352 min = diff; 375 min = diff;
353 sum += diff; 376 avg += div64_u64(diff, TRIALS);
354 } 377 }
355 printk("cycle test: avg: %llu min: %llu max: %llu\n", 378 printk("cycle test 15us: avg: %llu min: %llu max: %llu\n",
356 div64_u64(sum, TRIALS), min, max); 379 avg, min, max);
380
357 return 0; 381 return 0;
358} 382}
359 383
@@ -661,7 +685,7 @@ int litmus_test_prefetch_proc_handler(struct ctl_table *table, int write,
661 685
662 free_page((unsigned long)vaddr); 686 free_page((unsigned long)vaddr);
663 687
664 //test_get_cycles_overhead(); 688 test_get_cycles_overhead();
665 test_read_in(); 689 test_read_in();
666 690
667 return 0; 691 return 0;
diff --git a/litmus/sched_mc.c b/litmus/sched_mc.c
index 64de4ef9c466..6edf86935a29 100644
--- a/litmus/sched_mc.c
+++ b/litmus/sched_mc.c
@@ -30,7 +30,7 @@
30#include <litmus/dgl.h> 30#include <litmus/dgl.h>
31#include <litmus/color.h> 31#include <litmus/color.h>
32#include <litmus/way_tracker.h> 32#include <litmus/way_tracker.h>
33 33#warning "MUST ADD CHECK FOR MAX WAYS"
34struct mc_signal { 34struct mc_signal {
35 int update:1; 35 int update:1;
36 int preempt:1; 36 int preempt:1;
@@ -58,6 +58,10 @@ struct cpu_entry {
58#endif 58#endif
59}; 59};
60 60
61#ifndef CONFIG_NP_SECTION
62#error "Need NP section for coloring!"
63#endif
64
61static struct dgl group_lock; 65static struct dgl group_lock;
62static raw_spinlock_t dgl_lock; 66static raw_spinlock_t dgl_lock;
63 67
@@ -97,7 +101,6 @@ static int acquire_resources(struct task_struct *t)
97 101
98 BUG_ON(tsk_rt(t)->linked_on == NO_CPU); 102 BUG_ON(tsk_rt(t)->linked_on == NO_CPU);
99 103
100
101 raw_spin_lock(&dgl_lock); 104 raw_spin_lock(&dgl_lock);
102 105
103 cpu = tsk_rt(t)->linked_on; 106 cpu = tsk_rt(t)->linked_on;
diff --git a/litmus/sched_trace.c b/litmus/sched_trace.c
index f4171fddbbb1..5acb4a48fb6b 100644
--- a/litmus/sched_trace.c
+++ b/litmus/sched_trace.c
@@ -26,7 +26,7 @@
26 26
27/* Max length for one write --- by TRACE() --- to the buffer. This is used to 27/* Max length for one write --- by TRACE() --- to the buffer. This is used to
28 * allocate a per-cpu buffer for printf() formatting. */ 28 * allocate a per-cpu buffer for printf() formatting. */
29#define MSG_SIZE 255 29#define MSG_SIZE 10000
30 30
31 31
32static DEFINE_MUTEX(reader_mutex); 32static DEFINE_MUTEX(reader_mutex);
@@ -35,7 +35,7 @@ static DEFINE_KFIFO(debug_buffer, char, LITMUS_TRACE_BUF_SIZE);
35 35
36 36
37static DEFINE_RAW_SPINLOCK(log_buffer_lock); 37static DEFINE_RAW_SPINLOCK(log_buffer_lock);
38static DEFINE_PER_CPU(char[MSG_SIZE], fmt_buffer); 38static DEFINE_PER_CPU(char*, fmt_buffer);
39 39
40/* 40/*
41 * sched_trace_log_message - Write to the trace buffer (log_buffer) 41 * sched_trace_log_message - Write to the trace buffer (log_buffer)
@@ -230,8 +230,17 @@ static struct sysrq_key_op sysrq_dump_trace_buffer_op = {
230 230
231static int __init init_sched_trace(void) 231static int __init init_sched_trace(void)
232{ 232{
233 int cpu;
234 char **buf;
235
236
233 printk("Initializing TRACE() device\n"); 237 printk("Initializing TRACE() device\n");
234 238
239 for_each_online_cpu(cpu) {
240 buf = &per_cpu(fmt_buffer, cpu);
241 *buf = kmalloc(MSG_SIZE * sizeof(char), GFP_ATOMIC);
242 }
243
235#ifdef CONFIG_MAGIC_SYSRQ 244#ifdef CONFIG_MAGIC_SYSRQ
236 /* offer some debugging help */ 245 /* offer some debugging help */
237 if (!register_sysrq_key('y', &sysrq_dump_trace_buffer_op)) 246 if (!register_sysrq_key('y', &sysrq_dump_trace_buffer_op))
diff --git a/litmus/way_tracker.c b/litmus/way_tracker.c
index a9e8ef62f37e..ff392ab09c4d 100644
--- a/litmus/way_tracker.c
+++ b/litmus/way_tracker.c
@@ -6,6 +6,7 @@
6#include <linux/bitops.h> 6#include <linux/bitops.h>
7#include <linux/slab.h> 7#include <linux/slab.h>
8#include <linux/list.h> 8#include <linux/list.h>
9#include <linux/kgdb.h>
9 10
10#include <litmus/litmus.h> 11#include <litmus/litmus.h>
11#include <litmus/color.h> 12#include <litmus/color.h>
@@ -38,7 +39,9 @@ static int take_next_way(unsigned int color)
38 clear_bit(idx, &ways[color]); 39 clear_bit(idx, &ways[color]);
39 TRACE("Took, now %lu free of color %d\n", hweight_long(ways[color]), color); 40 TRACE("Took, now %lu free of color %d\n", hweight_long(ways[color]), color);
40 } else { 41 } else {
42 printk(KERN_WARNING "Vury bad\n");
41 /* Seriously bad. */ 43 /* Seriously bad. */
44 kgdb_breakpoint();
42 BUG(); 45 BUG();
43 } 46 }
44 raw_spin_unlock(&lock); 47 raw_spin_unlock(&lock);