diff options
Diffstat (limited to 'litmus/nvidia_info.c')
-rw-r--r-- | litmus/nvidia_info.c | 1137 |
1 files changed, 1137 insertions, 0 deletions
diff --git a/litmus/nvidia_info.c b/litmus/nvidia_info.c new file mode 100644 index 000000000000..5a63fb732e8b --- /dev/null +++ b/litmus/nvidia_info.c | |||
@@ -0,0 +1,1137 @@ | |||
1 | #include <linux/module.h> | ||
2 | #include <linux/semaphore.h> | ||
3 | #include <linux/pci.h> | ||
4 | |||
5 | #include <litmus/sched_trace.h> | ||
6 | #include <litmus/nvidia_info.h> | ||
7 | #include <litmus/litmus.h> | ||
8 | |||
9 | #include <litmus/sched_plugin.h> | ||
10 | |||
11 | #include <litmus/binheap.h> | ||
12 | |||
13 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
14 | #include <litmus/litmus_softirq.h> | ||
15 | #endif | ||
16 | |||
17 | typedef unsigned char NvV8; /* "void": enumerated or multiple fields */ | ||
18 | typedef unsigned short NvV16; /* "void": enumerated or multiple fields */ | ||
19 | typedef unsigned char NvU8; /* 0 to 255 */ | ||
20 | typedef unsigned short NvU16; /* 0 to 65535 */ | ||
21 | typedef signed char NvS8; /* -128 to 127 */ | ||
22 | typedef signed short NvS16; /* -32768 to 32767 */ | ||
23 | typedef float NvF32; /* IEEE Single Precision (S1E8M23) */ | ||
24 | typedef double NvF64; /* IEEE Double Precision (S1E11M52) */ | ||
25 | typedef unsigned int NvV32; /* "void": enumerated or multiple fields */ | ||
26 | typedef unsigned int NvU32; /* 0 to 4294967295 */ | ||
27 | typedef unsigned long long NvU64; /* 0 to 18446744073709551615 */ | ||
28 | typedef union | ||
29 | { | ||
30 | volatile NvV8 Reg008[1]; | ||
31 | volatile NvV16 Reg016[1]; | ||
32 | volatile NvV32 Reg032[1]; | ||
33 | } litmus_nv_hwreg_t, * litmus_nv_phwreg_t; | ||
34 | |||
35 | typedef struct | ||
36 | { | ||
37 | NvU64 address; | ||
38 | #ifdef CONFIG_CUDA_5_0 | ||
39 | NvU64 strapped_size; | ||
40 | #endif | ||
41 | NvU64 size; | ||
42 | NvU32 offset; | ||
43 | NvU32 *map; | ||
44 | litmus_nv_phwreg_t map_u; | ||
45 | } litmus_nv_aperture_t; | ||
46 | |||
47 | typedef struct | ||
48 | { | ||
49 | void *priv; /* private data */ | ||
50 | void *os_state; /* os-specific device state */ | ||
51 | |||
52 | #ifndef CONFIG_CUDA_5_0 | ||
53 | int rmInitialized; | ||
54 | #endif | ||
55 | int flags; | ||
56 | |||
57 | /* PCI config info */ | ||
58 | NvU32 domain; | ||
59 | NvU16 bus; | ||
60 | NvU16 slot; | ||
61 | NvU16 vendor_id; | ||
62 | NvU16 device_id; | ||
63 | NvU16 subsystem_id; | ||
64 | NvU32 gpu_id; | ||
65 | void *handle; | ||
66 | |||
67 | NvU32 pci_cfg_space[16]; | ||
68 | |||
69 | /* physical characteristics */ | ||
70 | litmus_nv_aperture_t bars[3]; | ||
71 | litmus_nv_aperture_t *regs; | ||
72 | litmus_nv_aperture_t *fb, ud; | ||
73 | litmus_nv_aperture_t agp; | ||
74 | |||
75 | NvU32 interrupt_line; | ||
76 | |||
77 | NvU32 agp_config; | ||
78 | NvU32 agp_status; | ||
79 | |||
80 | NvU32 primary_vga; | ||
81 | |||
82 | NvU32 sim_env; | ||
83 | |||
84 | NvU32 rc_timer_enabled; | ||
85 | |||
86 | /* list of events allocated for this device */ | ||
87 | void *event_list; | ||
88 | |||
89 | void *kern_mappings; | ||
90 | |||
91 | } litmus_nv_state_t; | ||
92 | |||
93 | typedef struct work_struct litmus_nv_task_t; | ||
94 | |||
95 | typedef struct litmus_nv_work_s { | ||
96 | litmus_nv_task_t task; | ||
97 | void *data; | ||
98 | } litmus_nv_work_t; | ||
99 | |||
100 | typedef struct litmus_nv_linux_state_s { | ||
101 | litmus_nv_state_t nv_state; | ||
102 | atomic_t usage_count; | ||
103 | |||
104 | struct pci_dev *dev; | ||
105 | void *agp_bridge; | ||
106 | void *alloc_queue; | ||
107 | |||
108 | void *timer_sp; | ||
109 | void *isr_sp; | ||
110 | void *pci_cfgchk_sp; | ||
111 | void *isr_bh_sp; | ||
112 | |||
113 | #if defined(CONFIG_CUDA_4_0) || defined(CONFIG_CUDA_5_0) | ||
114 | char registry_keys[512]; | ||
115 | #endif | ||
116 | |||
117 | /* keep track of any pending bottom halfes */ | ||
118 | struct tasklet_struct tasklet; | ||
119 | litmus_nv_work_t work; | ||
120 | |||
121 | /* get a timer callback every second */ | ||
122 | struct timer_list rc_timer; | ||
123 | |||
124 | /* lock for linux-specific data, not used by core rm */ | ||
125 | struct semaphore ldata_lock; | ||
126 | |||
127 | /* lock for linux-specific alloc queue */ | ||
128 | struct semaphore at_lock; | ||
129 | |||
130 | #if 0 | ||
131 | #if defined(NV_USER_MAP) | ||
132 | /* list of user mappings */ | ||
133 | struct nv_usermap_s *usermap_list; | ||
134 | |||
135 | /* lock for VMware-specific mapping list */ | ||
136 | struct semaphore mt_lock; | ||
137 | #endif /* defined(NV_USER_MAP) */ | ||
138 | #if defined(NV_PM_SUPPORT_OLD_STYLE_APM) | ||
139 | void *apm_nv_dev; | ||
140 | #endif | ||
141 | #endif | ||
142 | |||
143 | NvU32 device_num; | ||
144 | struct litmus_nv_linux_state_s *next; | ||
145 | } litmus_nv_linux_state_t; | ||
146 | |||
147 | void dump_nvidia_info(const struct tasklet_struct *t) | ||
148 | { | ||
149 | litmus_nv_state_t* nvstate = NULL; | ||
150 | litmus_nv_linux_state_t* linuxstate = NULL; | ||
151 | struct pci_dev* pci = NULL; | ||
152 | |||
153 | nvstate = (litmus_nv_state_t*)(t->data); | ||
154 | |||
155 | if(nvstate) | ||
156 | { | ||
157 | TRACE("NV State:\n" | ||
158 | "\ttasklet ptr = %p\n" | ||
159 | "\tstate ptr = %p\n" | ||
160 | "\tprivate data ptr = %p\n" | ||
161 | "\tos state ptr = %p\n" | ||
162 | "\tdomain = %u\n" | ||
163 | "\tbus = %u\n" | ||
164 | "\tslot = %u\n" | ||
165 | "\tvender_id = %u\n" | ||
166 | "\tdevice_id = %u\n" | ||
167 | "\tsubsystem_id = %u\n" | ||
168 | "\tgpu_id = %u\n" | ||
169 | "\tinterrupt_line = %u\n", | ||
170 | t, | ||
171 | nvstate, | ||
172 | nvstate->priv, | ||
173 | nvstate->os_state, | ||
174 | nvstate->domain, | ||
175 | nvstate->bus, | ||
176 | nvstate->slot, | ||
177 | nvstate->vendor_id, | ||
178 | nvstate->device_id, | ||
179 | nvstate->subsystem_id, | ||
180 | nvstate->gpu_id, | ||
181 | nvstate->interrupt_line); | ||
182 | |||
183 | linuxstate = container_of(nvstate, litmus_nv_linux_state_t, nv_state); | ||
184 | } | ||
185 | else | ||
186 | { | ||
187 | TRACE("INVALID NVSTATE????\n"); | ||
188 | } | ||
189 | |||
190 | if(linuxstate) | ||
191 | { | ||
192 | int ls_offset = (void*)(&(linuxstate->device_num)) - (void*)(linuxstate); | ||
193 | int ns_offset_raw = (void*)(&(linuxstate->device_num)) - (void*)(&(linuxstate->nv_state)); | ||
194 | int ns_offset_desired = (void*)(&(linuxstate->device_num)) - (void*)(nvstate); | ||
195 | |||
196 | |||
197 | TRACE("LINUX NV State:\n" | ||
198 | "\tlinux nv state ptr: %p\n" | ||
199 | "\taddress of tasklet: %p\n" | ||
200 | "\taddress of work: %p\n" | ||
201 | "\tusage_count: %d\n" | ||
202 | "\tdevice_num: %u\n" | ||
203 | "\ttasklet addr == this tasklet: %d\n" | ||
204 | "\tpci: %p\n", | ||
205 | linuxstate, | ||
206 | &(linuxstate->tasklet), | ||
207 | &(linuxstate->work), | ||
208 | atomic_read(&(linuxstate->usage_count)), | ||
209 | linuxstate->device_num, | ||
210 | (t == &(linuxstate->tasklet)), | ||
211 | linuxstate->dev); | ||
212 | |||
213 | pci = linuxstate->dev; | ||
214 | |||
215 | TRACE("Offsets:\n" | ||
216 | "\tOffset from LinuxState: %d, %x\n" | ||
217 | "\tOffset from NVState: %d, %x\n" | ||
218 | "\tOffset from parameter: %d, %x\n" | ||
219 | "\tdevice_num: %u\n", | ||
220 | ls_offset, ls_offset, | ||
221 | ns_offset_raw, ns_offset_raw, | ||
222 | ns_offset_desired, ns_offset_desired, | ||
223 | *((u32*)((void*)nvstate + ns_offset_desired))); | ||
224 | } | ||
225 | else | ||
226 | { | ||
227 | TRACE("INVALID LINUXNVSTATE?????\n"); | ||
228 | } | ||
229 | |||
230 | #if 0 | ||
231 | if(pci) | ||
232 | { | ||
233 | TRACE("PCI DEV Info:\n" | ||
234 | "pci device ptr: %p\n" | ||
235 | "\tdevfn = %d\n" | ||
236 | "\tvendor = %d\n" | ||
237 | "\tdevice = %d\n" | ||
238 | "\tsubsystem_vendor = %d\n" | ||
239 | "\tsubsystem_device = %d\n" | ||
240 | "\tslot # = %d\n", | ||
241 | pci, | ||
242 | pci->devfn, | ||
243 | pci->vendor, | ||
244 | pci->device, | ||
245 | pci->subsystem_vendor, | ||
246 | pci->subsystem_device, | ||
247 | pci->slot->number); | ||
248 | } | ||
249 | else | ||
250 | { | ||
251 | TRACE("INVALID PCIDEV PTR?????\n"); | ||
252 | } | ||
253 | #endif | ||
254 | } | ||
255 | |||
256 | |||
257 | |||
258 | static struct module* nvidia_mod = NULL; | ||
259 | |||
260 | |||
261 | |||
262 | |||
263 | #if 0 | ||
264 | static int nvidia_ready_module_notify(struct notifier_block *self, | ||
265 | unsigned long val, void *data) | ||
266 | { | ||
267 | mutex_lock(&module_mutex); | ||
268 | nvidia_mod = find_module("nvidia"); | ||
269 | mutex_unlock(&module_mutex); | ||
270 | |||
271 | if(nvidia_mod != NULL) | ||
272 | { | ||
273 | TRACE("%s : Found NVIDIA module. Core Code: %p to %p\n", __FUNCTION__, | ||
274 | (void*)(nvidia_mod->module_core), | ||
275 | (void*)(nvidia_mod->module_core) + nvidia_mod->core_size); | ||
276 | init_nv_device_reg(); | ||
277 | return(0); | ||
278 | } | ||
279 | else | ||
280 | { | ||
281 | TRACE("%s : Could not find NVIDIA module! Loaded?\n", __FUNCTION__); | ||
282 | } | ||
283 | } | ||
284 | |||
285 | static int nvidia_going_module_notify(struct notifier_block *self, | ||
286 | unsigned long val, void *data) | ||
287 | { | ||
288 | nvidia_mod = NULL; | ||
289 | mb(); | ||
290 | |||
291 | return 0; | ||
292 | } | ||
293 | |||
294 | static struct notifier_block nvidia_ready = { | ||
295 | .notifier_call = nvidia_ready_module_notify, | ||
296 | .priority = 1, | ||
297 | }; | ||
298 | |||
299 | static struct notifier_block nvidia_going = { | ||
300 | .notifier_call = nvidia_going_module_notify, | ||
301 | .priority = 1, | ||
302 | }; | ||
303 | #endif | ||
304 | |||
305 | |||
306 | |||
307 | static int init_nv_device_reg(void); | ||
308 | static int shutdown_nv_device_reg(void); | ||
309 | |||
310 | |||
311 | int init_nvidia_info(void) | ||
312 | { | ||
313 | mutex_lock(&module_mutex); | ||
314 | nvidia_mod = find_module("nvidia"); | ||
315 | mutex_unlock(&module_mutex); | ||
316 | if(nvidia_mod != NULL) | ||
317 | { | ||
318 | TRACE("%s : Found NVIDIA module. Core Code: %p to %p\n", __FUNCTION__, | ||
319 | (void*)(nvidia_mod->module_core), | ||
320 | (void*)(nvidia_mod->module_core) + nvidia_mod->core_size); | ||
321 | init_nv_device_reg(); | ||
322 | return(0); | ||
323 | } | ||
324 | else | ||
325 | { | ||
326 | TRACE("%s : Could not find NVIDIA module! Loaded?\n", __FUNCTION__); | ||
327 | |||
328 | init_nv_device_reg(); | ||
329 | return(0); | ||
330 | // return(-1); | ||
331 | } | ||
332 | } | ||
333 | |||
334 | void shutdown_nvidia_info(void) | ||
335 | { | ||
336 | nvidia_mod = NULL; | ||
337 | mb(); | ||
338 | |||
339 | shutdown_nv_device_reg(); | ||
340 | } | ||
341 | |||
342 | /* works with pointers to static data inside the module too. */ | ||
343 | int is_nvidia_func(void* func_addr) | ||
344 | { | ||
345 | int ret = 0; | ||
346 | if(nvidia_mod) | ||
347 | { | ||
348 | ret = within_module_core((long unsigned int)func_addr, nvidia_mod); | ||
349 | /* | ||
350 | if(ret) | ||
351 | { | ||
352 | TRACE("%s : %p is in NVIDIA module: %d\n", | ||
353 | __FUNCTION__, func_addr, ret); | ||
354 | }*/ | ||
355 | } | ||
356 | |||
357 | return(ret); | ||
358 | } | ||
359 | |||
360 | u32 get_tasklet_nv_device_num(const struct tasklet_struct *t) | ||
361 | { | ||
362 | // life is too short to use hard-coded offsets. update this later. | ||
363 | litmus_nv_state_t* nvstate = (litmus_nv_state_t*)(t->data); | ||
364 | litmus_nv_linux_state_t* linuxstate = container_of(nvstate, litmus_nv_linux_state_t, nv_state); | ||
365 | |||
366 | BUG_ON(linuxstate->device_num >= NV_DEVICE_NUM); | ||
367 | |||
368 | return(linuxstate->device_num); | ||
369 | } | ||
370 | |||
371 | u32 get_work_nv_device_num(const struct work_struct *t) | ||
372 | { | ||
373 | // offset determined though observed behavior of the NV driver. | ||
374 | const int DEVICE_NUM_OFFSET = sizeof(struct work_struct); | ||
375 | void* state = (void*)(t); | ||
376 | void** device_num_ptr = state + DEVICE_NUM_OFFSET; | ||
377 | return(*((u32*)(*device_num_ptr))); | ||
378 | } | ||
379 | |||
380 | |||
381 | /////////////////////////////////////////////////////////////////////////////// | ||
382 | /////////////////////////////////////////////////////////////////////////////// | ||
383 | /////////////////////////////////////////////////////////////////////////////// | ||
384 | |||
385 | |||
386 | typedef struct { | ||
387 | raw_spinlock_t lock; /* not needed if GPU not shared between scheudling domains */ | ||
388 | struct binheap owners; | ||
389 | |||
390 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
391 | klmirqd_callback_t callback; | ||
392 | struct task_struct* thread; | ||
393 | int ready:1; /* todo: make threads check for the ready flag */ | ||
394 | #endif | ||
395 | |||
396 | #ifdef CONFIG_LITMUS_NV_KLMIRQD_DEBUG | ||
397 | struct tasklet_struct nv_klmirqd_dbg_tasklet; | ||
398 | #endif | ||
399 | }nv_device_registry_t; | ||
400 | |||
401 | |||
402 | static nv_device_registry_t NV_DEVICE_REG[NV_DEVICE_NUM]; | ||
403 | |||
404 | |||
405 | |||
406 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
407 | static int nvidia_klmirqd_cb(void *arg) | ||
408 | { | ||
409 | unsigned long flags; | ||
410 | int reg_device_id = (int)(long long)(arg); | ||
411 | nv_device_registry_t *reg = &NV_DEVICE_REG[reg_device_id]; | ||
412 | |||
413 | TRACE("nv klmirqd callback for GPU %d\n", reg_device_id); | ||
414 | |||
415 | raw_spin_lock_irqsave(®->lock, flags); | ||
416 | reg->thread = current; | ||
417 | reg->ready = 1; | ||
418 | raw_spin_unlock_irqrestore(®->lock, flags); | ||
419 | |||
420 | return 0; | ||
421 | } | ||
422 | #endif | ||
423 | |||
424 | #ifdef CONFIG_LITMUS_NV_KLMIRQD_DEBUG | ||
425 | struct nv_klmirqd_dbg_timer_struct | ||
426 | { | ||
427 | struct hrtimer timer; | ||
428 | }; | ||
429 | |||
430 | static struct nv_klmirqd_dbg_timer_struct nv_klmirqd_dbg_timer; | ||
431 | |||
432 | static void nv_klmirqd_arm_dbg_timer(lt_t relative_time) | ||
433 | { | ||
434 | lt_t when_to_fire = litmus_clock() + relative_time; | ||
435 | |||
436 | TRACE("next nv tasklet in %d ns\n", relative_time); | ||
437 | |||
438 | __hrtimer_start_range_ns(&nv_klmirqd_dbg_timer.timer, | ||
439 | ns_to_ktime(when_to_fire), | ||
440 | 0, | ||
441 | HRTIMER_MODE_ABS_PINNED, | ||
442 | 0); | ||
443 | } | ||
444 | |||
445 | static void nv_klmirqd_dbg_tasklet_func(unsigned long arg) | ||
446 | { | ||
447 | lt_t now = litmus_clock(); | ||
448 | nv_device_registry_t *reg = (nv_device_registry_t*)arg; | ||
449 | int gpunum = reg - &NV_DEVICE_REG[0]; | ||
450 | |||
451 | TRACE("nv klmirqd routine invoked for GPU %d!\n", gpunum); | ||
452 | |||
453 | /* set up the next timer */ | ||
454 | nv_klmirqd_arm_dbg_timer(now % (NSEC_PER_MSEC * 10)); // within the next 10ms. | ||
455 | } | ||
456 | |||
457 | |||
458 | static enum hrtimer_restart nvklmirqd_timer_func(struct hrtimer *timer) | ||
459 | { | ||
460 | lt_t now = litmus_clock(); | ||
461 | int gpu = (int)(now % num_online_gpus()); | ||
462 | nv_device_registry_t *reg; | ||
463 | |||
464 | TRACE("nvklmirqd_timer invoked!\n"); | ||
465 | |||
466 | reg = &NV_DEVICE_REG[gpu]; | ||
467 | |||
468 | if (reg->thread && reg->ready) { | ||
469 | TRACE("Adding a tasklet for GPU %d\n", gpu); | ||
470 | litmus_tasklet_schedule(®->nv_klmirqd_dbg_tasklet, reg->thread); | ||
471 | } | ||
472 | else { | ||
473 | TRACE("nv klmirqd is not ready!\n"); | ||
474 | nv_klmirqd_arm_dbg_timer(now % (NSEC_PER_MSEC * 10)); // within the next 10ms. | ||
475 | } | ||
476 | |||
477 | return HRTIMER_NORESTART; | ||
478 | } | ||
479 | #endif | ||
480 | |||
481 | |||
482 | static int gpu_owner_max_priority_order(struct binheap_node *a, | ||
483 | struct binheap_node *b) | ||
484 | { | ||
485 | struct task_struct *d_a = container_of(binheap_entry(a, struct rt_param, gpu_owner_node), | ||
486 | struct task_struct, rt_param); | ||
487 | struct task_struct *d_b = container_of(binheap_entry(b, struct rt_param, gpu_owner_node), | ||
488 | struct task_struct, rt_param); | ||
489 | |||
490 | BUG_ON(!d_a); | ||
491 | BUG_ON(!d_b); | ||
492 | |||
493 | return litmus->compare(d_a, d_b); | ||
494 | } | ||
495 | |||
496 | static int init_nv_device_reg(void) | ||
497 | { | ||
498 | int i; | ||
499 | char name[MAX_KLMIRQD_NAME_LEN+1]; | ||
500 | |||
501 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
502 | if (!klmirqd_is_ready()) { | ||
503 | TRACE("klmirqd is not ready!\n"); | ||
504 | return 0; | ||
505 | } | ||
506 | #endif | ||
507 | |||
508 | memset(NV_DEVICE_REG, 0, sizeof(NV_DEVICE_REG)); | ||
509 | mb(); | ||
510 | |||
511 | |||
512 | for(i = 0; i < num_online_gpus(); ++i) { | ||
513 | raw_spin_lock_init(&NV_DEVICE_REG[i].lock); | ||
514 | INIT_BINHEAP_HANDLE(&NV_DEVICE_REG[i].owners, gpu_owner_max_priority_order); | ||
515 | |||
516 | #ifdef CONFIG_LITMUS_NV_KLMIRQD_DEBUG | ||
517 | tasklet_init(&NV_DEVICE_REG[i].nv_klmirqd_dbg_tasklet, nv_klmirqd_dbg_tasklet_func, (unsigned long)&NV_DEVICE_REG[i]); | ||
518 | #endif | ||
519 | |||
520 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
521 | { | ||
522 | int default_cpu = litmus->map_gpu_to_cpu(i); | ||
523 | |||
524 | snprintf(name, MAX_KLMIRQD_NAME_LEN, "nvklmirqd%d", i); | ||
525 | |||
526 | NV_DEVICE_REG[i].callback.func = nvidia_klmirqd_cb; | ||
527 | NV_DEVICE_REG[i].callback.arg = (void*)(long long)(i); | ||
528 | mb(); | ||
529 | |||
530 | if(launch_klmirqd_thread(name, default_cpu, &NV_DEVICE_REG[i].callback) != 0) { | ||
531 | TRACE("Failed to create klmirqd thread for GPU %d\n", i); | ||
532 | } | ||
533 | } | ||
534 | #endif | ||
535 | } | ||
536 | |||
537 | #ifdef CONFIG_LITMUS_NV_KLMIRQD_DEBUG | ||
538 | hrtimer_init(&nv_klmirqd_dbg_timer.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); | ||
539 | nv_klmirqd_dbg_timer.timer.function = nvklmirqd_timer_func; | ||
540 | nv_klmirqd_arm_dbg_timer(NSEC_PER_MSEC * 1000); | ||
541 | #endif | ||
542 | |||
543 | return(1); | ||
544 | } | ||
545 | |||
546 | |||
547 | /* The following code is full of nasty race conditions... */ | ||
548 | /* spawning of klimirqd threads can race with init_nv_device_reg()!!!! */ | ||
549 | static int shutdown_nv_device_reg(void) | ||
550 | { | ||
551 | TRACE("Shutting down nv device registration.\n"); | ||
552 | |||
553 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
554 | { | ||
555 | int i; | ||
556 | nv_device_registry_t *reg; | ||
557 | |||
558 | for (i = 0; i < num_online_gpus(); ++i) { | ||
559 | |||
560 | TRACE("Shutting down GPU %d.\n", i); | ||
561 | |||
562 | reg = &NV_DEVICE_REG[i]; | ||
563 | |||
564 | if (reg->thread && reg->ready) { | ||
565 | kill_klmirqd_thread(reg->thread); | ||
566 | |||
567 | /* assume that all goes according to plan... */ | ||
568 | reg->thread = NULL; | ||
569 | reg->ready = 0; | ||
570 | } | ||
571 | |||
572 | while (!binheap_empty(®->owners)) { | ||
573 | binheap_delete_root(®->owners, struct rt_param, gpu_owner_node); | ||
574 | } | ||
575 | } | ||
576 | } | ||
577 | #endif | ||
578 | |||
579 | return(1); | ||
580 | } | ||
581 | |||
582 | |||
583 | /* use to get the owner of nv_device_id. */ | ||
584 | struct task_struct* get_nv_max_device_owner(u32 target_device_id) | ||
585 | { | ||
586 | struct task_struct *owner = NULL; | ||
587 | nv_device_registry_t *reg; | ||
588 | |||
589 | BUG_ON(target_device_id >= NV_DEVICE_NUM); | ||
590 | |||
591 | reg = &NV_DEVICE_REG[target_device_id]; | ||
592 | |||
593 | if (!binheap_empty(®->owners)) { | ||
594 | struct task_struct *hp = container_of(binheap_top_entry(®->owners, struct rt_param, gpu_owner_node), | ||
595 | struct task_struct, rt_param); | ||
596 | TRACE_CUR("hp: %s/%d\n", hp->comm, hp->pid); | ||
597 | } | ||
598 | |||
599 | return(owner); | ||
600 | } | ||
601 | |||
602 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
603 | struct task_struct* get_nv_klmirqd_thread(u32 target_device_id) | ||
604 | { | ||
605 | struct task_struct *klmirqd = NULL; | ||
606 | nv_device_registry_t *reg; | ||
607 | |||
608 | BUG_ON(target_device_id >= NV_DEVICE_NUM); | ||
609 | |||
610 | reg = &NV_DEVICE_REG[target_device_id]; | ||
611 | |||
612 | if(likely(reg->ready)) { | ||
613 | klmirqd = reg->thread; | ||
614 | } | ||
615 | |||
616 | return klmirqd; | ||
617 | } | ||
618 | #endif | ||
619 | |||
620 | |||
621 | |||
622 | |||
623 | |||
624 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
625 | static int gpu_klmirqd_increase_priority(struct task_struct *klmirqd, struct task_struct *hp) | ||
626 | { | ||
627 | int retval = 0; | ||
628 | |||
629 | TRACE_CUR("Increasing priority of nv klmirqd: %s/%d.\n", klmirqd->comm, klmirqd->pid); | ||
630 | |||
631 | /* the klmirqd thread should never attempt to hold a litmus-level real-time | ||
632 | * so nested support is not required */ | ||
633 | retval = litmus->__increase_prio(klmirqd, hp); | ||
634 | |||
635 | return retval; | ||
636 | } | ||
637 | |||
638 | static int gpu_klmirqd_decrease_priority(struct task_struct *klmirqd, struct task_struct *hp) | ||
639 | { | ||
640 | int retval = 0; | ||
641 | |||
642 | TRACE_CUR("Decreasing priority of nv klmirqd: %s/%d.\n", klmirqd->comm, klmirqd->pid); | ||
643 | |||
644 | /* the klmirqd thread should never attempt to hold a litmus-level real-time | ||
645 | * so nested support is not required */ | ||
646 | retval = litmus->__decrease_prio(klmirqd, hp); | ||
647 | |||
648 | return retval; | ||
649 | } | ||
650 | #endif | ||
651 | |||
652 | |||
653 | |||
654 | |||
655 | /* call when an gpu owner becomes real-time */ | ||
656 | long enable_gpu_owner(struct task_struct *t) | ||
657 | { | ||
658 | long retval = 0; | ||
659 | // unsigned long flags; | ||
660 | int gpu; | ||
661 | nv_device_registry_t *reg; | ||
662 | |||
663 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
664 | struct task_struct *hp; | ||
665 | #endif | ||
666 | |||
667 | if (!tsk_rt(t)->held_gpus) { | ||
668 | TRACE_CUR("task %s/%d does not hold any GPUs\n", t->comm, t->pid); | ||
669 | return -1; | ||
670 | } | ||
671 | |||
672 | BUG_ON(!is_realtime(t)); | ||
673 | |||
674 | gpu = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); | ||
675 | |||
676 | if (binheap_is_in_heap(&tsk_rt(t)->gpu_owner_node)) { | ||
677 | TRACE_CUR("task %s/%d is already active on GPU %d\n", t->comm, t->pid, gpu); | ||
678 | goto out; | ||
679 | } | ||
680 | |||
681 | /* update the registration (and maybe klmirqd) */ | ||
682 | reg = &NV_DEVICE_REG[gpu]; | ||
683 | |||
684 | // raw_spin_lock_irqsave(®->lock, flags); | ||
685 | |||
686 | binheap_add(&tsk_rt(t)->gpu_owner_node, ®->owners, | ||
687 | struct rt_param, gpu_owner_node); | ||
688 | |||
689 | |||
690 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
691 | hp = container_of(binheap_top_entry(®->owners, struct rt_param, gpu_owner_node), | ||
692 | struct task_struct, rt_param); | ||
693 | |||
694 | if (hp == t) { | ||
695 | /* we're the new hp */ | ||
696 | TRACE_CUR("%s/%d is new hp on GPU %d.\n", t->comm, t->pid, gpu); | ||
697 | |||
698 | retval = gpu_klmirqd_increase_priority(reg->thread, (tsk_rt(hp)->inh_task)? tsk_rt(hp)->inh_task : hp); | ||
699 | } | ||
700 | #endif | ||
701 | |||
702 | // raw_spin_unlock_irqsave(®->lock, flags); | ||
703 | |||
704 | out: | ||
705 | return retval; | ||
706 | } | ||
707 | |||
708 | /* call when an gpu owner exits real-time */ | ||
709 | long disable_gpu_owner(struct task_struct *t) | ||
710 | { | ||
711 | long retval = 0; | ||
712 | // unsigned long flags; | ||
713 | int gpu; | ||
714 | nv_device_registry_t *reg; | ||
715 | |||
716 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
717 | struct task_struct *hp; | ||
718 | struct task_struct *new_hp = NULL; | ||
719 | #endif | ||
720 | |||
721 | if (!tsk_rt(t)->held_gpus) { | ||
722 | TRACE_CUR("task %s/%d does not hold any GPUs\n", t->comm, t->pid); | ||
723 | return -1; | ||
724 | } | ||
725 | |||
726 | BUG_ON(!is_realtime(t)); | ||
727 | |||
728 | gpu = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); | ||
729 | |||
730 | if (!binheap_is_in_heap(&tsk_rt(t)->gpu_owner_node)) { | ||
731 | TRACE_CUR("task %s/%d is not active on GPU %d\n", t->comm, t->pid, gpu); | ||
732 | goto out; | ||
733 | } | ||
734 | |||
735 | TRACE_CUR("task %s/%d exiting from GPU %d.\n", t->comm, t->pid, gpu); | ||
736 | |||
737 | |||
738 | reg = &NV_DEVICE_REG[gpu]; | ||
739 | |||
740 | // raw_spin_lock_irqsave(®->lock, flags); | ||
741 | |||
742 | |||
743 | #ifdef CONFIG_LITMUS_SOFTIRQD | ||
744 | hp = container_of(binheap_top_entry(®->owners, struct rt_param, gpu_owner_node), | ||
745 | struct task_struct, rt_param); | ||
746 | |||
747 | binheap_delete(&tsk_rt(t)->gpu_owner_node, ®->owners); | ||
748 | |||
749 | |||
750 | if (!binheap_empty(®->owners)) { | ||
751 | new_hp = container_of(binheap_top_entry(®->owners, struct rt_param, gpu_owner_node), | ||
752 | struct task_struct, rt_param); | ||
753 | } | ||
754 | |||
755 | if (hp == t && new_hp != t) { | ||
756 | struct task_struct *to_inh = NULL; | ||
757 | |||
758 | TRACE_CUR("%s/%d is no longer hp on GPU %d.\n", t->comm, t->pid, gpu); | ||
759 | |||
760 | if (new_hp) { | ||
761 | to_inh = (tsk_rt(new_hp)->inh_task) ? tsk_rt(new_hp)->inh_task : new_hp; | ||
762 | } | ||
763 | |||
764 | retval = gpu_klmirqd_decrease_priority(reg->thread, to_inh); | ||
765 | } | ||
766 | #else | ||
767 | binheap_delete(&tsk_rt(t)->gpu_owner_node, ®->owners); | ||
768 | #endif | ||
769 | |||
770 | // raw_spin_unlock_irqsave(®->lock, flags); | ||
771 | |||
772 | |||
773 | out: | ||
774 | return retval; | ||
775 | } | ||
776 | |||
777 | |||
778 | |||
779 | |||
780 | |||
781 | |||
782 | |||
783 | |||
784 | |||
785 | |||
786 | int gpu_owner_increase_priority(struct task_struct *t) | ||
787 | { | ||
788 | int retval = 0; | ||
789 | int gpu; | ||
790 | nv_device_registry_t *reg; | ||
791 | |||
792 | struct task_struct *hp = NULL; | ||
793 | struct task_struct *hp_eff = NULL; | ||
794 | |||
795 | BUG_ON(!is_realtime(t)); | ||
796 | BUG_ON(!tsk_rt(t)->held_gpus); | ||
797 | |||
798 | gpu = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); | ||
799 | |||
800 | if (!binheap_is_in_heap(&tsk_rt(t)->gpu_owner_node)) { | ||
801 | WARN_ON(!is_running(t)); | ||
802 | TRACE_CUR("gpu klmirqd may not inherit from %s/%d on GPU %d\n", | ||
803 | t->comm, t->pid, gpu); | ||
804 | goto out; | ||
805 | } | ||
806 | |||
807 | |||
808 | |||
809 | |||
810 | TRACE_CUR("task %s/%d on GPU %d increasing priority.\n", t->comm, t->pid, gpu); | ||
811 | reg = &NV_DEVICE_REG[gpu]; | ||
812 | |||
813 | hp = container_of(binheap_top_entry(®->owners, struct rt_param, gpu_owner_node), | ||
814 | struct task_struct, rt_param); | ||
815 | hp_eff = effective_priority(hp); | ||
816 | |||
817 | if (hp != t) { /* our position in the heap may have changed. hp is already at the root. */ | ||
818 | binheap_decrease(&tsk_rt(t)->gpu_owner_node, ®->owners); | ||
819 | } | ||
820 | |||
821 | hp = container_of(binheap_top_entry(®->owners, struct rt_param, gpu_owner_node), | ||
822 | struct task_struct, rt_param); | ||
823 | |||
824 | if (effective_priority(hp) != hp_eff) { /* the eff. prio. of hp has changed */ | ||
825 | hp_eff = effective_priority(hp); | ||
826 | TRACE_CUR("%s/%d is new hp on GPU %d.\n", t->comm, t->pid, gpu); | ||
827 | |||
828 | retval = gpu_klmirqd_increase_priority(reg->thread, hp_eff); | ||
829 | } | ||
830 | |||
831 | out: | ||
832 | return retval; | ||
833 | } | ||
834 | |||
835 | |||
836 | int gpu_owner_decrease_priority(struct task_struct *t) | ||
837 | { | ||
838 | int retval = 0; | ||
839 | int gpu; | ||
840 | nv_device_registry_t *reg; | ||
841 | |||
842 | struct task_struct *hp = NULL; | ||
843 | struct task_struct *hp_eff = NULL; | ||
844 | |||
845 | BUG_ON(!is_realtime(t)); | ||
846 | BUG_ON(!tsk_rt(t)->held_gpus); | ||
847 | |||
848 | gpu = find_first_bit(&tsk_rt(t)->held_gpus, sizeof(tsk_rt(t)->held_gpus)); | ||
849 | |||
850 | if (!binheap_is_in_heap(&tsk_rt(t)->gpu_owner_node)) { | ||
851 | WARN_ON(!is_running(t)); | ||
852 | TRACE_CUR("nv klmirqd may not inherit from %s/%d on GPU %d\n", | ||
853 | t->comm, t->pid, gpu); | ||
854 | goto out; | ||
855 | } | ||
856 | |||
857 | TRACE_CUR("task %s/%d on GPU %d decresing priority.\n", t->comm, t->pid, gpu); | ||
858 | reg = &NV_DEVICE_REG[gpu]; | ||
859 | |||
860 | hp = container_of(binheap_top_entry(®->owners, struct rt_param, gpu_owner_node), | ||
861 | struct task_struct, rt_param); | ||
862 | hp_eff = effective_priority(hp); | ||
863 | binheap_delete(&tsk_rt(t)->gpu_owner_node, ®->owners); | ||
864 | binheap_add(&tsk_rt(t)->gpu_owner_node, ®->owners, | ||
865 | struct rt_param, gpu_owner_node); | ||
866 | |||
867 | if (hp == t) { /* t was originally the hp */ | ||
868 | struct task_struct *new_hp = | ||
869 | container_of(binheap_top_entry(®->owners, struct rt_param, gpu_owner_node), | ||
870 | struct task_struct, rt_param); | ||
871 | if (effective_priority(new_hp) != hp_eff) { /* eff prio. of hp has changed */ | ||
872 | hp_eff = effective_priority(new_hp); | ||
873 | TRACE_CUR("%s/%d is no longer hp on GPU %d.\n", t->comm, t->pid, gpu); | ||
874 | retval = gpu_klmirqd_decrease_priority(reg->thread, hp_eff); | ||
875 | } | ||
876 | } | ||
877 | |||
878 | out: | ||
879 | return retval; | ||
880 | } | ||
881 | |||
882 | |||
883 | |||
884 | |||
885 | |||
886 | |||
887 | |||
888 | |||
889 | |||
890 | static int __reg_nv_device(int reg_device_id, struct task_struct *t) | ||
891 | { | ||
892 | __set_bit(reg_device_id, &tsk_rt(t)->held_gpus); | ||
893 | |||
894 | return(0); | ||
895 | } | ||
896 | |||
897 | static int __clear_reg_nv_device(int de_reg_device_id, struct task_struct *t) | ||
898 | { | ||
899 | __clear_bit(de_reg_device_id, &tsk_rt(t)->held_gpus); | ||
900 | |||
901 | return(0); | ||
902 | } | ||
903 | |||
904 | |||
905 | int reg_nv_device(int reg_device_id, int reg_action, struct task_struct *t) | ||
906 | { | ||
907 | int ret; | ||
908 | |||
909 | if((reg_device_id < num_online_gpus()) && (reg_device_id >= 0)) | ||
910 | { | ||
911 | if(reg_action) | ||
912 | ret = __reg_nv_device(reg_device_id, t); | ||
913 | else | ||
914 | ret = __clear_reg_nv_device(reg_device_id, t); | ||
915 | } | ||
916 | else | ||
917 | { | ||
918 | ret = -ENODEV; | ||
919 | } | ||
920 | |||
921 | return(ret); | ||
922 | } | ||
923 | |||
924 | |||
925 | |||
926 | |||
927 | |||
928 | |||
929 | |||
930 | |||
931 | |||
932 | |||
933 | |||
934 | |||
935 | |||
936 | |||
937 | |||
938 | |||
939 | |||
940 | |||
941 | |||
942 | |||
943 | |||
944 | |||
945 | |||
946 | #ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
947 | //void pai_check_priority_increase(struct task_struct *t, int reg_device_id) | ||
948 | //{ | ||
949 | // unsigned long flags; | ||
950 | // nv_device_registry_t *reg = &NV_DEVICE_REG[reg_device_id]; | ||
951 | // | ||
952 | // | ||
953 | // | ||
954 | // if(reg->max_prio_owner != t) { | ||
955 | // | ||
956 | // raw_spin_lock_irqsave(®->lock, flags); | ||
957 | // | ||
958 | // if(reg->max_prio_owner != t) { | ||
959 | // if(litmus->compare(t, reg->max_prio_owner)) { | ||
960 | // litmus->change_prio_pai_tasklet(reg->max_prio_owner, t); | ||
961 | // reg->max_prio_owner = t; | ||
962 | // } | ||
963 | // } | ||
964 | // | ||
965 | // raw_spin_unlock_irqrestore(®->lock, flags); | ||
966 | // } | ||
967 | //} | ||
968 | // | ||
969 | // | ||
970 | //void pai_check_priority_decrease(struct task_struct *t, int reg_device_id) | ||
971 | //{ | ||
972 | // unsigned long flags; | ||
973 | // nv_device_registry_t *reg = &NV_DEVICE_REG[reg_device_id]; | ||
974 | // | ||
975 | // if(reg->max_prio_owner == t) { | ||
976 | // | ||
977 | // raw_spin_lock_irqsave(®->lock, flags); | ||
978 | // | ||
979 | // if(reg->max_prio_owner == t) { | ||
980 | // reg->max_prio_owner = find_hp_owner(reg, NULL); | ||
981 | // if(reg->max_prio_owner != t) { | ||
982 | // litmus->change_prio_pai_tasklet(t, reg->max_prio_owner); | ||
983 | // } | ||
984 | // } | ||
985 | // | ||
986 | // raw_spin_unlock_irqrestore(®->lock, flags); | ||
987 | // } | ||
988 | //} | ||
989 | #endif | ||
990 | |||
991 | |||
992 | |||
993 | |||
994 | |||
995 | //static int __reg_nv_device(int reg_device_id, struct task_struct *t) | ||
996 | //{ | ||
997 | // int ret = 0; | ||
998 | // int i; | ||
999 | // struct task_struct *old_max = NULL; | ||
1000 | // | ||
1001 | // | ||
1002 | // raw_spin_lock_irqsave(®->lock, flags); | ||
1003 | // | ||
1004 | // if(reg->nr_owners < NV_MAX_SIMULT_USERS) { | ||
1005 | // TRACE_TASK(t, "registers GPU %d\n", reg_device_id); | ||
1006 | // for(i = 0; i < NV_MAX_SIMULT_USERS; ++i) { | ||
1007 | // if(reg->owners[i] == NULL) { | ||
1008 | // reg->owners[i] = t; | ||
1009 | // | ||
1010 | // //if(edf_higher_prio(t, reg->max_prio_owner)) { | ||
1011 | // if(litmus->compare(t, reg->max_prio_owner)) { | ||
1012 | // old_max = reg->max_prio_owner; | ||
1013 | // reg->max_prio_owner = t; | ||
1014 | // | ||
1015 | //#ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
1016 | // litmus->change_prio_pai_tasklet(old_max, t); | ||
1017 | //#endif | ||
1018 | // } | ||
1019 | // | ||
1020 | //#ifdef CONFIG_LITMUS_SOFTIRQD | ||
1021 | // down_and_set_stat(t, HELD, &tsk_rt(t)->klmirqd_sem); | ||
1022 | //#endif | ||
1023 | // ++(reg->nr_owners); | ||
1024 | // | ||
1025 | // break; | ||
1026 | // } | ||
1027 | // } | ||
1028 | // } | ||
1029 | // else | ||
1030 | // { | ||
1031 | // TRACE_CUR("%s: device %d is already in use!\n", __FUNCTION__, reg_device_id); | ||
1032 | // //ret = -EBUSY; | ||
1033 | // } | ||
1034 | // | ||
1035 | // raw_spin_unlock_irqrestore(®->lock, flags); | ||
1036 | // | ||
1037 | // __set_bit(reg_device_id, &tsk_rt(t)->held_gpus); | ||
1038 | // | ||
1039 | // return(ret); | ||
1040 | //} | ||
1041 | // | ||
1042 | //static int __clear_reg_nv_device(int de_reg_device_id, struct task_struct *t) | ||
1043 | //{ | ||
1044 | // int ret = 0; | ||
1045 | // int i; | ||
1046 | // unsigned long flags; | ||
1047 | // nv_device_registry_t *reg = &NV_DEVICE_REG[de_reg_device_id]; | ||
1048 | // | ||
1049 | //#ifdef CONFIG_LITMUS_SOFTIRQD | ||
1050 | // struct task_struct* klmirqd_th = get_klmirqd(de_reg_device_id); | ||
1051 | //#endif | ||
1052 | // | ||
1053 | // if(!test_bit(de_reg_device_id, &tsk_rt(t)->held_gpus)) { | ||
1054 | // return ret; | ||
1055 | // } | ||
1056 | // | ||
1057 | // raw_spin_lock_irqsave(®->lock, flags); | ||
1058 | // | ||
1059 | // TRACE_TASK(t, "unregisters GPU %d\n", de_reg_device_id); | ||
1060 | // | ||
1061 | // for(i = 0; i < NV_MAX_SIMULT_USERS; ++i) { | ||
1062 | // if(reg->owners[i] == t) { | ||
1063 | //#ifdef CONFIG_LITMUS_SOFTIRQD | ||
1064 | // flush_pending(klmirqd_th, t); | ||
1065 | //#endif | ||
1066 | // if(reg->max_prio_owner == t) { | ||
1067 | // reg->max_prio_owner = find_hp_owner(reg, t); | ||
1068 | //#ifdef CONFIG_LITMUS_PAI_SOFTIRQD | ||
1069 | // litmus->change_prio_pai_tasklet(t, reg->max_prio_owner); | ||
1070 | //#endif | ||
1071 | // } | ||
1072 | // | ||
1073 | //#ifdef CONFIG_LITMUS_SOFTIRQD | ||
1074 | // up_and_set_stat(t, NOT_HELD, &tsk_rt(t)->klmirqd_sem); | ||
1075 | //#endif | ||
1076 | // | ||
1077 | // reg->owners[i] = NULL; | ||
1078 | // --(reg->nr_owners); | ||
1079 | // | ||
1080 | // break; | ||
1081 | // } | ||
1082 | // } | ||
1083 | // | ||
1084 | // raw_spin_unlock_irqrestore(®->lock, flags); | ||
1085 | // | ||
1086 | // __clear_bit(de_reg_device_id, &tsk_rt(t)->held_gpus); | ||
1087 | // | ||
1088 | // return(ret); | ||
1089 | //} | ||
1090 | // | ||
1091 | // | ||
1092 | //int reg_nv_device(int reg_device_id, int reg_action, struct task_struct *t) | ||
1093 | //{ | ||
1094 | // int ret; | ||
1095 | // | ||
1096 | // if((reg_device_id < NV_DEVICE_NUM) && (reg_device_id >= 0)) | ||
1097 | // { | ||
1098 | // if(reg_action) | ||
1099 | // ret = __reg_nv_device(reg_device_id, t); | ||
1100 | // else | ||
1101 | // ret = __clear_reg_nv_device(reg_device_id, t); | ||
1102 | // } | ||
1103 | // else | ||
1104 | // { | ||
1105 | // ret = -ENODEV; | ||
1106 | // } | ||
1107 | // | ||
1108 | // return(ret); | ||
1109 | //} | ||
1110 | |||
1111 | |||
1112 | |||
1113 | //void lock_nv_registry(u32 target_device_id, unsigned long* flags) | ||
1114 | //{ | ||
1115 | // BUG_ON(target_device_id >= NV_DEVICE_NUM); | ||
1116 | // | ||
1117 | // if(in_interrupt()) | ||
1118 | // TRACE("Locking registry for %d.\n", target_device_id); | ||
1119 | // else | ||
1120 | // TRACE_CUR("Locking registry for %d.\n", target_device_id); | ||
1121 | // | ||
1122 | // raw_spin_lock_irqsave(&NV_DEVICE_REG[target_device_id].lock, *flags); | ||
1123 | //} | ||
1124 | // | ||
1125 | //void unlock_nv_registry(u32 target_device_id, unsigned long* flags) | ||
1126 | //{ | ||
1127 | // BUG_ON(target_device_id >= NV_DEVICE_NUM); | ||
1128 | // | ||
1129 | // if(in_interrupt()) | ||
1130 | // TRACE("Unlocking registry for %d.\n", target_device_id); | ||
1131 | // else | ||
1132 | // TRACE_CUR("Unlocking registry for %d.\n", target_device_id); | ||
1133 | // | ||
1134 | // raw_spin_unlock_irqrestore(&NV_DEVICE_REG[target_device_id].lock, *flags); | ||
1135 | //} | ||
1136 | |||
1137 | |||