aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel/unaligned_64.c
diff options
context:
space:
mode:
authorSam Ravnborg <sam@ravnborg.org>2008-12-03 06:11:52 -0500
committerDavid S. Miller <davem@davemloft.net>2008-12-04 12:17:21 -0500
commita88b5ba8bd8ac18aad65ee6c6a254e2e74876db3 (patch)
treeeb3d0ffaf53c3f7ec6083752c2097cecd1cb892a /arch/sparc/kernel/unaligned_64.c
parentd670bd4f803c8b646acd20f3ba21e65458293faf (diff)
sparc,sparc64: unify kernel/
o Move all files from sparc64/kernel/ to sparc/kernel - rename as appropriate o Update sparc/Makefile to the changes o Update sparc/kernel/Makefile to include the sparc64 files NOTE: This commit changes link order on sparc64! Link order had to change for either of sparc32 and sparc64. And assuming sparc64 see more testing than sparc32 change link order on sparc64 where issues will be caught faster. Signed-off-by: Sam Ravnborg <sam@ravnborg.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/kernel/unaligned_64.c')
-rw-r--r--arch/sparc/kernel/unaligned_64.c690
1 files changed, 690 insertions, 0 deletions
diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
new file mode 100644
index 000000000000..203ddfad9f27
--- /dev/null
+++ b/arch/sparc/kernel/unaligned_64.c
@@ -0,0 +1,690 @@
1/*
2 * unaligned.c: Unaligned load/store trap handling with special
3 * cases for the kernel to do them more quickly.
4 *
5 * Copyright (C) 1996,2008 David S. Miller (davem@davemloft.net)
6 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
7 */
8
9
10#include <linux/jiffies.h>
11#include <linux/kernel.h>
12#include <linux/sched.h>
13#include <linux/mm.h>
14#include <linux/module.h>
15#include <asm/asi.h>
16#include <asm/ptrace.h>
17#include <asm/pstate.h>
18#include <asm/processor.h>
19#include <asm/system.h>
20#include <asm/uaccess.h>
21#include <linux/smp.h>
22#include <linux/bitops.h>
23#include <asm/fpumacro.h>
24
25/* #define DEBUG_MNA */
26
27enum direction {
28 load, /* ld, ldd, ldh, ldsh */
29 store, /* st, std, sth, stsh */
30 both, /* Swap, ldstub, cas, ... */
31 fpld,
32 fpst,
33 invalid,
34};
35
36#ifdef DEBUG_MNA
37static char *dirstrings[] = {
38 "load", "store", "both", "fpload", "fpstore", "invalid"
39};
40#endif
41
42static inline enum direction decode_direction(unsigned int insn)
43{
44 unsigned long tmp = (insn >> 21) & 1;
45
46 if (!tmp)
47 return load;
48 else {
49 switch ((insn>>19)&0xf) {
50 case 15: /* swap* */
51 return both;
52 default:
53 return store;
54 }
55 }
56}
57
58/* 16 = double-word, 8 = extra-word, 4 = word, 2 = half-word */
59static inline int decode_access_size(unsigned int insn)
60{
61 unsigned int tmp;
62
63 tmp = ((insn >> 19) & 0xf);
64 if (tmp == 11 || tmp == 14) /* ldx/stx */
65 return 8;
66 tmp &= 3;
67 if (!tmp)
68 return 4;
69 else if (tmp == 3)
70 return 16; /* ldd/std - Although it is actually 8 */
71 else if (tmp == 2)
72 return 2;
73 else {
74 printk("Impossible unaligned trap. insn=%08x\n", insn);
75 die_if_kernel("Byte sized unaligned access?!?!", current_thread_info()->kregs);
76
77 /* GCC should never warn that control reaches the end
78 * of this function without returning a value because
79 * die_if_kernel() is marked with attribute 'noreturn'.
80 * Alas, some versions do...
81 */
82
83 return 0;
84 }
85}
86
87static inline int decode_asi(unsigned int insn, struct pt_regs *regs)
88{
89 if (insn & 0x800000) {
90 if (insn & 0x2000)
91 return (unsigned char)(regs->tstate >> 24); /* %asi */
92 else
93 return (unsigned char)(insn >> 5); /* imm_asi */
94 } else
95 return ASI_P;
96}
97
98/* 0x400000 = signed, 0 = unsigned */
99static inline int decode_signedness(unsigned int insn)
100{
101 return (insn & 0x400000);
102}
103
104static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2,
105 unsigned int rd, int from_kernel)
106{
107 if (rs2 >= 16 || rs1 >= 16 || rd >= 16) {
108 if (from_kernel != 0)
109 __asm__ __volatile__("flushw");
110 else
111 flushw_user();
112 }
113}
114
115static inline long sign_extend_imm13(long imm)
116{
117 return imm << 51 >> 51;
118}
119
120static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs)
121{
122 unsigned long value;
123
124 if (reg < 16)
125 return (!reg ? 0 : regs->u_regs[reg]);
126 if (regs->tstate & TSTATE_PRIV) {
127 struct reg_window *win;
128 win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
129 value = win->locals[reg - 16];
130 } else if (test_thread_flag(TIF_32BIT)) {
131 struct reg_window32 __user *win32;
132 win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
133 get_user(value, &win32->locals[reg - 16]);
134 } else {
135 struct reg_window __user *win;
136 win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
137 get_user(value, &win->locals[reg - 16]);
138 }
139 return value;
140}
141
142static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs)
143{
144 if (reg < 16)
145 return &regs->u_regs[reg];
146 if (regs->tstate & TSTATE_PRIV) {
147 struct reg_window *win;
148 win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
149 return &win->locals[reg - 16];
150 } else if (test_thread_flag(TIF_32BIT)) {
151 struct reg_window32 *win32;
152 win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
153 return (unsigned long *)&win32->locals[reg - 16];
154 } else {
155 struct reg_window *win;
156 win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS);
157 return &win->locals[reg - 16];
158 }
159}
160
161unsigned long compute_effective_address(struct pt_regs *regs,
162 unsigned int insn, unsigned int rd)
163{
164 unsigned int rs1 = (insn >> 14) & 0x1f;
165 unsigned int rs2 = insn & 0x1f;
166 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
167
168 if (insn & 0x2000) {
169 maybe_flush_windows(rs1, 0, rd, from_kernel);
170 return (fetch_reg(rs1, regs) + sign_extend_imm13(insn));
171 } else {
172 maybe_flush_windows(rs1, rs2, rd, from_kernel);
173 return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs));
174 }
175}
176
177/* This is just to make gcc think die_if_kernel does return... */
178static void __used unaligned_panic(char *str, struct pt_regs *regs)
179{
180 die_if_kernel(str, regs);
181}
182
183extern int do_int_load(unsigned long *dest_reg, int size,
184 unsigned long *saddr, int is_signed, int asi);
185
186extern int __do_int_store(unsigned long *dst_addr, int size,
187 unsigned long src_val, int asi);
188
189static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr,
190 struct pt_regs *regs, int asi, int orig_asi)
191{
192 unsigned long zero = 0;
193 unsigned long *src_val_p = &zero;
194 unsigned long src_val;
195
196 if (size == 16) {
197 size = 8;
198 zero = (((long)(reg_num ?
199 (unsigned)fetch_reg(reg_num, regs) : 0)) << 32) |
200 (unsigned)fetch_reg(reg_num + 1, regs);
201 } else if (reg_num) {
202 src_val_p = fetch_reg_addr(reg_num, regs);
203 }
204 src_val = *src_val_p;
205 if (unlikely(asi != orig_asi)) {
206 switch (size) {
207 case 2:
208 src_val = swab16(src_val);
209 break;
210 case 4:
211 src_val = swab32(src_val);
212 break;
213 case 8:
214 src_val = swab64(src_val);
215 break;
216 case 16:
217 default:
218 BUG();
219 break;
220 };
221 }
222 return __do_int_store(dst_addr, size, src_val, asi);
223}
224
225static inline void advance(struct pt_regs *regs)
226{
227 regs->tpc = regs->tnpc;
228 regs->tnpc += 4;
229 if (test_thread_flag(TIF_32BIT)) {
230 regs->tpc &= 0xffffffff;
231 regs->tnpc &= 0xffffffff;
232 }
233}
234
235static inline int floating_point_load_or_store_p(unsigned int insn)
236{
237 return (insn >> 24) & 1;
238}
239
240static inline int ok_for_kernel(unsigned int insn)
241{
242 return !floating_point_load_or_store_p(insn);
243}
244
245static void kernel_mna_trap_fault(int fixup_tstate_asi)
246{
247 struct pt_regs *regs = current_thread_info()->kern_una_regs;
248 unsigned int insn = current_thread_info()->kern_una_insn;
249 const struct exception_table_entry *entry;
250
251 entry = search_exception_tables(regs->tpc);
252 if (!entry) {
253 unsigned long address;
254
255 address = compute_effective_address(regs, insn,
256 ((insn >> 25) & 0x1f));
257 if (address < PAGE_SIZE) {
258 printk(KERN_ALERT "Unable to handle kernel NULL "
259 "pointer dereference in mna handler");
260 } else
261 printk(KERN_ALERT "Unable to handle kernel paging "
262 "request in mna handler");
263 printk(KERN_ALERT " at virtual address %016lx\n",address);
264 printk(KERN_ALERT "current->{active_,}mm->context = %016lx\n",
265 (current->mm ? CTX_HWBITS(current->mm->context) :
266 CTX_HWBITS(current->active_mm->context)));
267 printk(KERN_ALERT "current->{active_,}mm->pgd = %016lx\n",
268 (current->mm ? (unsigned long) current->mm->pgd :
269 (unsigned long) current->active_mm->pgd));
270 die_if_kernel("Oops", regs);
271 /* Not reached */
272 }
273 regs->tpc = entry->fixup;
274 regs->tnpc = regs->tpc + 4;
275
276 if (fixup_tstate_asi) {
277 regs->tstate &= ~TSTATE_ASI;
278 regs->tstate |= (ASI_AIUS << 24UL);
279 }
280}
281
282static void log_unaligned(struct pt_regs *regs)
283{
284 static unsigned long count, last_time;
285
286 if (time_after(jiffies, last_time + 5 * HZ))
287 count = 0;
288 if (count < 5) {
289 last_time = jiffies;
290 count++;
291 printk("Kernel unaligned access at TPC[%lx] %pS\n",
292 regs->tpc, (void *) regs->tpc);
293 }
294}
295
296asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
297{
298 enum direction dir = decode_direction(insn);
299 int size = decode_access_size(insn);
300 int orig_asi, asi;
301
302 current_thread_info()->kern_una_regs = regs;
303 current_thread_info()->kern_una_insn = insn;
304
305 orig_asi = asi = decode_asi(insn, regs);
306
307 /* If this is a {get,put}_user() on an unaligned userspace pointer,
308 * just signal a fault and do not log the event.
309 */
310 if (asi == ASI_AIUS) {
311 kernel_mna_trap_fault(0);
312 return;
313 }
314
315 log_unaligned(regs);
316
317 if (!ok_for_kernel(insn) || dir == both) {
318 printk("Unsupported unaligned load/store trap for kernel "
319 "at <%016lx>.\n", regs->tpc);
320 unaligned_panic("Kernel does fpu/atomic "
321 "unaligned load/store.", regs);
322
323 kernel_mna_trap_fault(0);
324 } else {
325 unsigned long addr, *reg_addr;
326 int err;
327
328 addr = compute_effective_address(regs, insn,
329 ((insn >> 25) & 0x1f));
330#ifdef DEBUG_MNA
331 printk("KMNA: pc=%016lx [dir=%s addr=%016lx size=%d] "
332 "retpc[%016lx]\n",
333 regs->tpc, dirstrings[dir], addr, size,
334 regs->u_regs[UREG_RETPC]);
335#endif
336 switch (asi) {
337 case ASI_NL:
338 case ASI_AIUPL:
339 case ASI_AIUSL:
340 case ASI_PL:
341 case ASI_SL:
342 case ASI_PNFL:
343 case ASI_SNFL:
344 asi &= ~0x08;
345 break;
346 };
347 switch (dir) {
348 case load:
349 reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs);
350 err = do_int_load(reg_addr, size,
351 (unsigned long *) addr,
352 decode_signedness(insn), asi);
353 if (likely(!err) && unlikely(asi != orig_asi)) {
354 unsigned long val_in = *reg_addr;
355 switch (size) {
356 case 2:
357 val_in = swab16(val_in);
358 break;
359 case 4:
360 val_in = swab32(val_in);
361 break;
362 case 8:
363 val_in = swab64(val_in);
364 break;
365 case 16:
366 default:
367 BUG();
368 break;
369 };
370 *reg_addr = val_in;
371 }
372 break;
373
374 case store:
375 err = do_int_store(((insn>>25)&0x1f), size,
376 (unsigned long *) addr, regs,
377 asi, orig_asi);
378 break;
379
380 default:
381 panic("Impossible kernel unaligned trap.");
382 /* Not reached... */
383 }
384 if (unlikely(err))
385 kernel_mna_trap_fault(1);
386 else
387 advance(regs);
388 }
389}
390
391static char popc_helper[] = {
3920, 1, 1, 2, 1, 2, 2, 3,
3931, 2, 2, 3, 2, 3, 3, 4,
394};
395
396int handle_popc(u32 insn, struct pt_regs *regs)
397{
398 u64 value;
399 int ret, i, rd = ((insn >> 25) & 0x1f);
400 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
401
402 if (insn & 0x2000) {
403 maybe_flush_windows(0, 0, rd, from_kernel);
404 value = sign_extend_imm13(insn);
405 } else {
406 maybe_flush_windows(0, insn & 0x1f, rd, from_kernel);
407 value = fetch_reg(insn & 0x1f, regs);
408 }
409 for (ret = 0, i = 0; i < 16; i++) {
410 ret += popc_helper[value & 0xf];
411 value >>= 4;
412 }
413 if (rd < 16) {
414 if (rd)
415 regs->u_regs[rd] = ret;
416 } else {
417 if (test_thread_flag(TIF_32BIT)) {
418 struct reg_window32 __user *win32;
419 win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
420 put_user(ret, &win32->locals[rd - 16]);
421 } else {
422 struct reg_window __user *win;
423 win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
424 put_user(ret, &win->locals[rd - 16]);
425 }
426 }
427 advance(regs);
428 return 1;
429}
430
431extern void do_fpother(struct pt_regs *regs);
432extern void do_privact(struct pt_regs *regs);
433extern void spitfire_data_access_exception(struct pt_regs *regs,
434 unsigned long sfsr,
435 unsigned long sfar);
436extern void sun4v_data_access_exception(struct pt_regs *regs,
437 unsigned long addr,
438 unsigned long type_ctx);
439
440int handle_ldf_stq(u32 insn, struct pt_regs *regs)
441{
442 unsigned long addr = compute_effective_address(regs, insn, 0);
443 int freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
444 struct fpustate *f = FPUSTATE;
445 int asi = decode_asi(insn, regs);
446 int flag = (freg < 32) ? FPRS_DL : FPRS_DU;
447
448 save_and_clear_fpu();
449 current_thread_info()->xfsr[0] &= ~0x1c000;
450 if (freg & 3) {
451 current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
452 do_fpother(regs);
453 return 0;
454 }
455 if (insn & 0x200000) {
456 /* STQ */
457 u64 first = 0, second = 0;
458
459 if (current_thread_info()->fpsaved[0] & flag) {
460 first = *(u64 *)&f->regs[freg];
461 second = *(u64 *)&f->regs[freg+2];
462 }
463 if (asi < 0x80) {
464 do_privact(regs);
465 return 1;
466 }
467 switch (asi) {
468 case ASI_P:
469 case ASI_S: break;
470 case ASI_PL:
471 case ASI_SL:
472 {
473 /* Need to convert endians */
474 u64 tmp = __swab64p(&first);
475
476 first = __swab64p(&second);
477 second = tmp;
478 break;
479 }
480 default:
481 if (tlb_type == hypervisor)
482 sun4v_data_access_exception(regs, addr, 0);
483 else
484 spitfire_data_access_exception(regs, 0, addr);
485 return 1;
486 }
487 if (put_user (first >> 32, (u32 __user *)addr) ||
488 __put_user ((u32)first, (u32 __user *)(addr + 4)) ||
489 __put_user (second >> 32, (u32 __user *)(addr + 8)) ||
490 __put_user ((u32)second, (u32 __user *)(addr + 12))) {
491 if (tlb_type == hypervisor)
492 sun4v_data_access_exception(regs, addr, 0);
493 else
494 spitfire_data_access_exception(regs, 0, addr);
495 return 1;
496 }
497 } else {
498 /* LDF, LDDF, LDQF */
499 u32 data[4] __attribute__ ((aligned(8)));
500 int size, i;
501 int err;
502
503 if (asi < 0x80) {
504 do_privact(regs);
505 return 1;
506 } else if (asi > ASI_SNFL) {
507 if (tlb_type == hypervisor)
508 sun4v_data_access_exception(regs, addr, 0);
509 else
510 spitfire_data_access_exception(regs, 0, addr);
511 return 1;
512 }
513 switch (insn & 0x180000) {
514 case 0x000000: size = 1; break;
515 case 0x100000: size = 4; break;
516 default: size = 2; break;
517 }
518 for (i = 0; i < size; i++)
519 data[i] = 0;
520
521 err = get_user (data[0], (u32 __user *) addr);
522 if (!err) {
523 for (i = 1; i < size; i++)
524 err |= __get_user (data[i], (u32 __user *)(addr + 4*i));
525 }
526 if (err && !(asi & 0x2 /* NF */)) {
527 if (tlb_type == hypervisor)
528 sun4v_data_access_exception(regs, addr, 0);
529 else
530 spitfire_data_access_exception(regs, 0, addr);
531 return 1;
532 }
533 if (asi & 0x8) /* Little */ {
534 u64 tmp;
535
536 switch (size) {
537 case 1: data[0] = le32_to_cpup(data + 0); break;
538 default:*(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 0));
539 break;
540 case 4: tmp = le64_to_cpup((u64 *)(data + 0));
541 *(u64 *)(data + 0) = le64_to_cpup((u64 *)(data + 2));
542 *(u64 *)(data + 2) = tmp;
543 break;
544 }
545 }
546 if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
547 current_thread_info()->fpsaved[0] = FPRS_FEF;
548 current_thread_info()->gsr[0] = 0;
549 }
550 if (!(current_thread_info()->fpsaved[0] & flag)) {
551 if (freg < 32)
552 memset(f->regs, 0, 32*sizeof(u32));
553 else
554 memset(f->regs+32, 0, 32*sizeof(u32));
555 }
556 memcpy(f->regs + freg, data, size * 4);
557 current_thread_info()->fpsaved[0] |= flag;
558 }
559 advance(regs);
560 return 1;
561}
562
563void handle_ld_nf(u32 insn, struct pt_regs *regs)
564{
565 int rd = ((insn >> 25) & 0x1f);
566 int from_kernel = (regs->tstate & TSTATE_PRIV) != 0;
567 unsigned long *reg;
568
569 maybe_flush_windows(0, 0, rd, from_kernel);
570 reg = fetch_reg_addr(rd, regs);
571 if (from_kernel || rd < 16) {
572 reg[0] = 0;
573 if ((insn & 0x780000) == 0x180000)
574 reg[1] = 0;
575 } else if (test_thread_flag(TIF_32BIT)) {
576 put_user(0, (int __user *) reg);
577 if ((insn & 0x780000) == 0x180000)
578 put_user(0, ((int __user *) reg) + 1);
579 } else {
580 put_user(0, (unsigned long __user *) reg);
581 if ((insn & 0x780000) == 0x180000)
582 put_user(0, (unsigned long __user *) reg + 1);
583 }
584 advance(regs);
585}
586
587void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
588{
589 unsigned long pc = regs->tpc;
590 unsigned long tstate = regs->tstate;
591 u32 insn;
592 u32 first, second;
593 u64 value;
594 u8 freg;
595 int flag;
596 struct fpustate *f = FPUSTATE;
597
598 if (tstate & TSTATE_PRIV)
599 die_if_kernel("lddfmna from kernel", regs);
600 if (test_thread_flag(TIF_32BIT))
601 pc = (u32)pc;
602 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
603 int asi = decode_asi(insn, regs);
604 if ((asi > ASI_SNFL) ||
605 (asi < ASI_P))
606 goto daex;
607 if (get_user(first, (u32 __user *)sfar) ||
608 get_user(second, (u32 __user *)(sfar + 4))) {
609 if (asi & 0x2) /* NF */ {
610 first = 0; second = 0;
611 } else
612 goto daex;
613 }
614 save_and_clear_fpu();
615 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
616 value = (((u64)first) << 32) | second;
617 if (asi & 0x8) /* Little */
618 value = __swab64p(&value);
619 flag = (freg < 32) ? FPRS_DL : FPRS_DU;
620 if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
621 current_thread_info()->fpsaved[0] = FPRS_FEF;
622 current_thread_info()->gsr[0] = 0;
623 }
624 if (!(current_thread_info()->fpsaved[0] & flag)) {
625 if (freg < 32)
626 memset(f->regs, 0, 32*sizeof(u32));
627 else
628 memset(f->regs+32, 0, 32*sizeof(u32));
629 }
630 *(u64 *)(f->regs + freg) = value;
631 current_thread_info()->fpsaved[0] |= flag;
632 } else {
633daex:
634 if (tlb_type == hypervisor)
635 sun4v_data_access_exception(regs, sfar, sfsr);
636 else
637 spitfire_data_access_exception(regs, sfsr, sfar);
638 return;
639 }
640 advance(regs);
641 return;
642}
643
644void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
645{
646 unsigned long pc = regs->tpc;
647 unsigned long tstate = regs->tstate;
648 u32 insn;
649 u64 value;
650 u8 freg;
651 int flag;
652 struct fpustate *f = FPUSTATE;
653
654 if (tstate & TSTATE_PRIV)
655 die_if_kernel("stdfmna from kernel", regs);
656 if (test_thread_flag(TIF_32BIT))
657 pc = (u32)pc;
658 if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
659 int asi = decode_asi(insn, regs);
660 freg = ((insn >> 25) & 0x1e) | ((insn >> 20) & 0x20);
661 value = 0;
662 flag = (freg < 32) ? FPRS_DL : FPRS_DU;
663 if ((asi > ASI_SNFL) ||
664 (asi < ASI_P))
665 goto daex;
666 save_and_clear_fpu();
667 if (current_thread_info()->fpsaved[0] & flag)
668 value = *(u64 *)&f->regs[freg];
669 switch (asi) {
670 case ASI_P:
671 case ASI_S: break;
672 case ASI_PL:
673 case ASI_SL:
674 value = __swab64p(&value); break;
675 default: goto daex;
676 }
677 if (put_user (value >> 32, (u32 __user *) sfar) ||
678 __put_user ((u32)value, (u32 __user *)(sfar + 4)))
679 goto daex;
680 } else {
681daex:
682 if (tlb_type == hypervisor)
683 sun4v_data_access_exception(regs, sfar, sfsr);
684 else
685 spitfire_data_access_exception(regs, sfsr, sfar);
686 return;
687 }
688 advance(regs);
689 return;
690}