aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/entry.S
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/kernel/entry.S')
-rw-r--r--arch/i386/kernel/entry.S950
1 files changed, 950 insertions, 0 deletions
diff --git a/arch/i386/kernel/entry.S b/arch/i386/kernel/entry.S
new file mode 100644
index 000000000000..1e45ff292bc9
--- /dev/null
+++ b/arch/i386/kernel/entry.S
@@ -0,0 +1,950 @@
1/*
2 * linux/arch/i386/entry.S
3 *
4 * Copyright (C) 1991, 1992 Linus Torvalds
5 */
6
7/*
8 * entry.S contains the system-call and fault low-level handling routines.
9 * This also contains the timer-interrupt handler, as well as all interrupts
10 * and faults that can result in a task-switch.
11 *
12 * NOTE: This code handles signal-recognition, which happens every time
13 * after a timer-interrupt and after each system call.
14 *
15 * I changed all the .align's to 4 (16 byte alignment), as that's faster
16 * on a 486.
17 *
18 * Stack layout in 'ret_from_system_call':
19 * ptrace needs to have all regs on the stack.
20 * if the order here is changed, it needs to be
21 * updated in fork.c:copy_process, signal.c:do_signal,
22 * ptrace.c and ptrace.h
23 *
24 * 0(%esp) - %ebx
25 * 4(%esp) - %ecx
26 * 8(%esp) - %edx
27 * C(%esp) - %esi
28 * 10(%esp) - %edi
29 * 14(%esp) - %ebp
30 * 18(%esp) - %eax
31 * 1C(%esp) - %ds
32 * 20(%esp) - %es
33 * 24(%esp) - orig_eax
34 * 28(%esp) - %eip
35 * 2C(%esp) - %cs
36 * 30(%esp) - %eflags
37 * 34(%esp) - %oldesp
38 * 38(%esp) - %oldss
39 *
40 * "current" is in register %ebx during any slow entries.
41 */
42
43#include <linux/config.h>
44#include <linux/linkage.h>
45#include <asm/thread_info.h>
46#include <asm/errno.h>
47#include <asm/segment.h>
48#include <asm/smp.h>
49#include <asm/page.h>
50#include <asm/desc.h>
51#include "irq_vectors.h"
52
53#define nr_syscalls ((syscall_table_size)/4)
54
55EBX = 0x00
56ECX = 0x04
57EDX = 0x08
58ESI = 0x0C
59EDI = 0x10
60EBP = 0x14
61EAX = 0x18
62DS = 0x1C
63ES = 0x20
64ORIG_EAX = 0x24
65EIP = 0x28
66CS = 0x2C
67EFLAGS = 0x30
68OLDESP = 0x34
69OLDSS = 0x38
70
71CF_MASK = 0x00000001
72TF_MASK = 0x00000100
73IF_MASK = 0x00000200
74DF_MASK = 0x00000400
75NT_MASK = 0x00004000
76VM_MASK = 0x00020000
77
78#ifdef CONFIG_PREEMPT
79#define preempt_stop cli
80#else
81#define preempt_stop
82#define resume_kernel restore_nocheck
83#endif
84
85#define SAVE_ALL \
86 cld; \
87 pushl %es; \
88 pushl %ds; \
89 pushl %eax; \
90 pushl %ebp; \
91 pushl %edi; \
92 pushl %esi; \
93 pushl %edx; \
94 pushl %ecx; \
95 pushl %ebx; \
96 movl $(__USER_DS), %edx; \
97 movl %edx, %ds; \
98 movl %edx, %es;
99
100#define RESTORE_INT_REGS \
101 popl %ebx; \
102 popl %ecx; \
103 popl %edx; \
104 popl %esi; \
105 popl %edi; \
106 popl %ebp; \
107 popl %eax
108
109#define RESTORE_REGS \
110 RESTORE_INT_REGS; \
1111: popl %ds; \
1122: popl %es; \
113.section .fixup,"ax"; \
1143: movl $0,(%esp); \
115 jmp 1b; \
1164: movl $0,(%esp); \
117 jmp 2b; \
118.previous; \
119.section __ex_table,"a";\
120 .align 4; \
121 .long 1b,3b; \
122 .long 2b,4b; \
123.previous
124
125
126ENTRY(ret_from_fork)
127 pushl %eax
128 call schedule_tail
129 GET_THREAD_INFO(%ebp)
130 popl %eax
131 jmp syscall_exit
132
133/*
134 * Return to user mode is not as complex as all this looks,
135 * but we want the default path for a system call return to
136 * go as quickly as possible which is why some of this is
137 * less clear than it otherwise should be.
138 */
139
140 # userspace resumption stub bypassing syscall exit tracing
141 ALIGN
142ret_from_exception:
143 preempt_stop
144ret_from_intr:
145 GET_THREAD_INFO(%ebp)
146 movl EFLAGS(%esp), %eax # mix EFLAGS and CS
147 movb CS(%esp), %al
148 testl $(VM_MASK | 3), %eax
149 jz resume_kernel
150ENTRY(resume_userspace)
151 cli # make sure we don't miss an interrupt
152 # setting need_resched or sigpending
153 # between sampling and the iret
154 movl TI_flags(%ebp), %ecx
155 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
156 # int/exception return?
157 jne work_pending
158 jmp restore_all
159
160#ifdef CONFIG_PREEMPT
161ENTRY(resume_kernel)
162 cli
163 cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
164 jnz restore_nocheck
165need_resched:
166 movl TI_flags(%ebp), %ecx # need_resched set ?
167 testb $_TIF_NEED_RESCHED, %cl
168 jz restore_all
169 testl $IF_MASK,EFLAGS(%esp) # interrupts off (exception path) ?
170 jz restore_all
171 call preempt_schedule_irq
172 jmp need_resched
173#endif
174
175/* SYSENTER_RETURN points to after the "sysenter" instruction in
176 the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
177
178 # sysenter call handler stub
179ENTRY(sysenter_entry)
180 movl TSS_sysenter_esp0(%esp),%esp
181sysenter_past_esp:
182 sti
183 pushl $(__USER_DS)
184 pushl %ebp
185 pushfl
186 pushl $(__USER_CS)
187 pushl $SYSENTER_RETURN
188
189/*
190 * Load the potential sixth argument from user stack.
191 * Careful about security.
192 */
193 cmpl $__PAGE_OFFSET-3,%ebp
194 jae syscall_fault
1951: movl (%ebp),%ebp
196.section __ex_table,"a"
197 .align 4
198 .long 1b,syscall_fault
199.previous
200
201 pushl %eax
202 SAVE_ALL
203 GET_THREAD_INFO(%ebp)
204
205 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
206 testw $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),TI_flags(%ebp)
207 jnz syscall_trace_entry
208 cmpl $(nr_syscalls), %eax
209 jae syscall_badsys
210 call *sys_call_table(,%eax,4)
211 movl %eax,EAX(%esp)
212 cli
213 movl TI_flags(%ebp), %ecx
214 testw $_TIF_ALLWORK_MASK, %cx
215 jne syscall_exit_work
216/* if something modifies registers it must also disable sysexit */
217 movl EIP(%esp), %edx
218 movl OLDESP(%esp), %ecx
219 xorl %ebp,%ebp
220 sti
221 sysexit
222
223
224 # system call handler stub
225ENTRY(system_call)
226 pushl %eax # save orig_eax
227 SAVE_ALL
228 GET_THREAD_INFO(%ebp)
229 # system call tracing in operation
230 /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
231 testw $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),TI_flags(%ebp)
232 jnz syscall_trace_entry
233 cmpl $(nr_syscalls), %eax
234 jae syscall_badsys
235syscall_call:
236 call *sys_call_table(,%eax,4)
237 movl %eax,EAX(%esp) # store the return value
238syscall_exit:
239 cli # make sure we don't miss an interrupt
240 # setting need_resched or sigpending
241 # between sampling and the iret
242 movl TI_flags(%ebp), %ecx
243 testw $_TIF_ALLWORK_MASK, %cx # current->work
244 jne syscall_exit_work
245
246restore_all:
247 movl EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
248 movb OLDSS(%esp), %ah
249 movb CS(%esp), %al
250 andl $(VM_MASK | (4 << 8) | 3), %eax
251 cmpl $((4 << 8) | 3), %eax
252 je ldt_ss # returning to user-space with LDT SS
253restore_nocheck:
254 RESTORE_REGS
255 addl $4, %esp
2561: iret
257.section .fixup,"ax"
258iret_exc:
259 sti
260 movl $__USER_DS, %edx
261 movl %edx, %ds
262 movl %edx, %es
263 movl $11,%eax
264 call do_exit
265.previous
266.section __ex_table,"a"
267 .align 4
268 .long 1b,iret_exc
269.previous
270
271ldt_ss:
272 larl OLDSS(%esp), %eax
273 jnz restore_nocheck
274 testl $0x00400000, %eax # returning to 32bit stack?
275 jnz restore_nocheck # allright, normal return
276 /* If returning to userspace with 16bit stack,
277 * try to fix the higher word of ESP, as the CPU
278 * won't restore it.
279 * This is an "official" bug of all the x86-compatible
280 * CPUs, which we can try to work around to make
281 * dosemu and wine happy. */
282 subl $8, %esp # reserve space for switch16 pointer
283 cli
284 movl %esp, %eax
285 /* Set up the 16bit stack frame with switch32 pointer on top,
286 * and a switch16 pointer on top of the current frame. */
287 call setup_x86_bogus_stack
288 RESTORE_REGS
289 lss 20+4(%esp), %esp # switch to 16bit stack
2901: iret
291.section __ex_table,"a"
292 .align 4
293 .long 1b,iret_exc
294.previous
295
296 # perform work that needs to be done immediately before resumption
297 ALIGN
298work_pending:
299 testb $_TIF_NEED_RESCHED, %cl
300 jz work_notifysig
301work_resched:
302 call schedule
303 cli # make sure we don't miss an interrupt
304 # setting need_resched or sigpending
305 # between sampling and the iret
306 movl TI_flags(%ebp), %ecx
307 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
308 # than syscall tracing?
309 jz restore_all
310 testb $_TIF_NEED_RESCHED, %cl
311 jnz work_resched
312
313work_notifysig: # deal with pending signals and
314 # notify-resume requests
315 testl $VM_MASK, EFLAGS(%esp)
316 movl %esp, %eax
317 jne work_notifysig_v86 # returning to kernel-space or
318 # vm86-space
319 xorl %edx, %edx
320 call do_notify_resume
321 jmp restore_all
322
323 ALIGN
324work_notifysig_v86:
325 pushl %ecx # save ti_flags for do_notify_resume
326 call save_v86_state # %eax contains pt_regs pointer
327 popl %ecx
328 movl %eax, %esp
329 xorl %edx, %edx
330 call do_notify_resume
331 jmp restore_all
332
333 # perform syscall exit tracing
334 ALIGN
335syscall_trace_entry:
336 movl $-ENOSYS,EAX(%esp)
337 movl %esp, %eax
338 xorl %edx,%edx
339 call do_syscall_trace
340 movl ORIG_EAX(%esp), %eax
341 cmpl $(nr_syscalls), %eax
342 jnae syscall_call
343 jmp syscall_exit
344
345 # perform syscall exit tracing
346 ALIGN
347syscall_exit_work:
348 testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
349 jz work_pending
350 sti # could let do_syscall_trace() call
351 # schedule() instead
352 movl %esp, %eax
353 movl $1, %edx
354 call do_syscall_trace
355 jmp resume_userspace
356
357 ALIGN
358syscall_fault:
359 pushl %eax # save orig_eax
360 SAVE_ALL
361 GET_THREAD_INFO(%ebp)
362 movl $-EFAULT,EAX(%esp)
363 jmp resume_userspace
364
365 ALIGN
366syscall_badsys:
367 movl $-ENOSYS,EAX(%esp)
368 jmp resume_userspace
369
370#define FIXUP_ESPFIX_STACK \
371 movl %esp, %eax; \
372 /* switch to 32bit stack using the pointer on top of 16bit stack */ \
373 lss %ss:CPU_16BIT_STACK_SIZE-8, %esp; \
374 /* copy data from 16bit stack to 32bit stack */ \
375 call fixup_x86_bogus_stack; \
376 /* put ESP to the proper location */ \
377 movl %eax, %esp;
378#define UNWIND_ESPFIX_STACK \
379 pushl %eax; \
380 movl %ss, %eax; \
381 /* see if on 16bit stack */ \
382 cmpw $__ESPFIX_SS, %ax; \
383 jne 28f; \
384 movl $__KERNEL_DS, %edx; \
385 movl %edx, %ds; \
386 movl %edx, %es; \
387 /* switch to 32bit stack */ \
388 FIXUP_ESPFIX_STACK \
38928: popl %eax;
390
391/*
392 * Build the entry stubs and pointer table with
393 * some assembler magic.
394 */
395.data
396ENTRY(interrupt)
397.text
398
399vector=0
400ENTRY(irq_entries_start)
401.rept NR_IRQS
402 ALIGN
4031: pushl $vector-256
404 jmp common_interrupt
405.data
406 .long 1b
407.text
408vector=vector+1
409.endr
410
411 ALIGN
412common_interrupt:
413 SAVE_ALL
414 movl %esp,%eax
415 call do_IRQ
416 jmp ret_from_intr
417
418#define BUILD_INTERRUPT(name, nr) \
419ENTRY(name) \
420 pushl $nr-256; \
421 SAVE_ALL \
422 movl %esp,%eax; \
423 call smp_/**/name; \
424 jmp ret_from_intr;
425
426/* The include is where all of the SMP etc. interrupts come from */
427#include "entry_arch.h"
428
429ENTRY(divide_error)
430 pushl $0 # no error code
431 pushl $do_divide_error
432 ALIGN
433error_code:
434 pushl %ds
435 pushl %eax
436 xorl %eax, %eax
437 pushl %ebp
438 pushl %edi
439 pushl %esi
440 pushl %edx
441 decl %eax # eax = -1
442 pushl %ecx
443 pushl %ebx
444 cld
445 pushl %es
446 UNWIND_ESPFIX_STACK
447 popl %ecx
448 movl ES(%esp), %edi # get the function address
449 movl ORIG_EAX(%esp), %edx # get the error code
450 movl %eax, ORIG_EAX(%esp)
451 movl %ecx, ES(%esp)
452 movl $(__USER_DS), %ecx
453 movl %ecx, %ds
454 movl %ecx, %es
455 movl %esp,%eax # pt_regs pointer
456 call *%edi
457 jmp ret_from_exception
458
459ENTRY(coprocessor_error)
460 pushl $0
461 pushl $do_coprocessor_error
462 jmp error_code
463
464ENTRY(simd_coprocessor_error)
465 pushl $0
466 pushl $do_simd_coprocessor_error
467 jmp error_code
468
469ENTRY(device_not_available)
470 pushl $-1 # mark this as an int
471 SAVE_ALL
472 movl %cr0, %eax
473 testl $0x4, %eax # EM (math emulation bit)
474 jne device_not_available_emulate
475 preempt_stop
476 call math_state_restore
477 jmp ret_from_exception
478device_not_available_emulate:
479 pushl $0 # temporary storage for ORIG_EIP
480 call math_emulate
481 addl $4, %esp
482 jmp ret_from_exception
483
484/*
485 * Debug traps and NMI can happen at the one SYSENTER instruction
486 * that sets up the real kernel stack. Check here, since we can't
487 * allow the wrong stack to be used.
488 *
489 * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
490 * already pushed 3 words if it hits on the sysenter instruction:
491 * eflags, cs and eip.
492 *
493 * We just load the right stack, and push the three (known) values
494 * by hand onto the new stack - while updating the return eip past
495 * the instruction that would have done it for sysenter.
496 */
497#define FIX_STACK(offset, ok, label) \
498 cmpw $__KERNEL_CS,4(%esp); \
499 jne ok; \
500label: \
501 movl TSS_sysenter_esp0+offset(%esp),%esp; \
502 pushfl; \
503 pushl $__KERNEL_CS; \
504 pushl $sysenter_past_esp
505
506ENTRY(debug)
507 cmpl $sysenter_entry,(%esp)
508 jne debug_stack_correct
509 FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
510debug_stack_correct:
511 pushl $-1 # mark this as an int
512 SAVE_ALL
513 xorl %edx,%edx # error code 0
514 movl %esp,%eax # pt_regs pointer
515 call do_debug
516 testl %eax,%eax
517 jnz restore_all
518 jmp ret_from_exception
519
520/*
521 * NMI is doubly nasty. It can happen _while_ we're handling
522 * a debug fault, and the debug fault hasn't yet been able to
523 * clear up the stack. So we first check whether we got an
524 * NMI on the sysenter entry path, but after that we need to
525 * check whether we got an NMI on the debug path where the debug
526 * fault happened on the sysenter path.
527 */
528ENTRY(nmi)
529 pushl %eax
530 movl %ss, %eax
531 cmpw $__ESPFIX_SS, %ax
532 popl %eax
533 je nmi_16bit_stack
534 cmpl $sysenter_entry,(%esp)
535 je nmi_stack_fixup
536 pushl %eax
537 movl %esp,%eax
538 /* Do not access memory above the end of our stack page,
539 * it might not exist.
540 */
541 andl $(THREAD_SIZE-1),%eax
542 cmpl $(THREAD_SIZE-20),%eax
543 popl %eax
544 jae nmi_stack_correct
545 cmpl $sysenter_entry,12(%esp)
546 je nmi_debug_stack_check
547nmi_stack_correct:
548 pushl %eax
549 SAVE_ALL
550 xorl %edx,%edx # zero error code
551 movl %esp,%eax # pt_regs pointer
552 call do_nmi
553 jmp restore_all
554
555nmi_stack_fixup:
556 FIX_STACK(12,nmi_stack_correct, 1)
557 jmp nmi_stack_correct
558nmi_debug_stack_check:
559 cmpw $__KERNEL_CS,16(%esp)
560 jne nmi_stack_correct
561 cmpl $debug - 1,(%esp)
562 jle nmi_stack_correct
563 cmpl $debug_esp_fix_insn,(%esp)
564 jle nmi_debug_stack_fixup
565nmi_debug_stack_fixup:
566 FIX_STACK(24,nmi_stack_correct, 1)
567 jmp nmi_stack_correct
568
569nmi_16bit_stack:
570 /* create the pointer to lss back */
571 pushl %ss
572 pushl %esp
573 movzwl %sp, %esp
574 addw $4, (%esp)
575 /* copy the iret frame of 12 bytes */
576 .rept 3
577 pushl 16(%esp)
578 .endr
579 pushl %eax
580 SAVE_ALL
581 FIXUP_ESPFIX_STACK # %eax == %esp
582 xorl %edx,%edx # zero error code
583 call do_nmi
584 RESTORE_REGS
585 lss 12+4(%esp), %esp # back to 16bit stack
5861: iret
587.section __ex_table,"a"
588 .align 4
589 .long 1b,iret_exc
590.previous
591
592ENTRY(int3)
593 pushl $-1 # mark this as an int
594 SAVE_ALL
595 xorl %edx,%edx # zero error code
596 movl %esp,%eax # pt_regs pointer
597 call do_int3
598 testl %eax,%eax
599 jnz restore_all
600 jmp ret_from_exception
601
602ENTRY(overflow)
603 pushl $0
604 pushl $do_overflow
605 jmp error_code
606
607ENTRY(bounds)
608 pushl $0
609 pushl $do_bounds
610 jmp error_code
611
612ENTRY(invalid_op)
613 pushl $0
614 pushl $do_invalid_op
615 jmp error_code
616
617ENTRY(coprocessor_segment_overrun)
618 pushl $0
619 pushl $do_coprocessor_segment_overrun
620 jmp error_code
621
622ENTRY(invalid_TSS)
623 pushl $do_invalid_TSS
624 jmp error_code
625
626ENTRY(segment_not_present)
627 pushl $do_segment_not_present
628 jmp error_code
629
630ENTRY(stack_segment)
631 pushl $do_stack_segment
632 jmp error_code
633
634ENTRY(general_protection)
635 pushl $do_general_protection
636 jmp error_code
637
638ENTRY(alignment_check)
639 pushl $do_alignment_check
640 jmp error_code
641
642ENTRY(page_fault)
643 pushl $do_page_fault
644 jmp error_code
645
646#ifdef CONFIG_X86_MCE
647ENTRY(machine_check)
648 pushl $0
649 pushl machine_check_vector
650 jmp error_code
651#endif
652
653ENTRY(spurious_interrupt_bug)
654 pushl $0
655 pushl $do_spurious_interrupt_bug
656 jmp error_code
657
658.data
659ENTRY(sys_call_table)
660 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
661 .long sys_exit
662 .long sys_fork
663 .long sys_read
664 .long sys_write
665 .long sys_open /* 5 */
666 .long sys_close
667 .long sys_waitpid
668 .long sys_creat
669 .long sys_link
670 .long sys_unlink /* 10 */
671 .long sys_execve
672 .long sys_chdir
673 .long sys_time
674 .long sys_mknod
675 .long sys_chmod /* 15 */
676 .long sys_lchown16
677 .long sys_ni_syscall /* old break syscall holder */
678 .long sys_stat
679 .long sys_lseek
680 .long sys_getpid /* 20 */
681 .long sys_mount
682 .long sys_oldumount
683 .long sys_setuid16
684 .long sys_getuid16
685 .long sys_stime /* 25 */
686 .long sys_ptrace
687 .long sys_alarm
688 .long sys_fstat
689 .long sys_pause
690 .long sys_utime /* 30 */
691 .long sys_ni_syscall /* old stty syscall holder */
692 .long sys_ni_syscall /* old gtty syscall holder */
693 .long sys_access
694 .long sys_nice
695 .long sys_ni_syscall /* 35 - old ftime syscall holder */
696 .long sys_sync
697 .long sys_kill
698 .long sys_rename
699 .long sys_mkdir
700 .long sys_rmdir /* 40 */
701 .long sys_dup
702 .long sys_pipe
703 .long sys_times
704 .long sys_ni_syscall /* old prof syscall holder */
705 .long sys_brk /* 45 */
706 .long sys_setgid16
707 .long sys_getgid16
708 .long sys_signal
709 .long sys_geteuid16
710 .long sys_getegid16 /* 50 */
711 .long sys_acct
712 .long sys_umount /* recycled never used phys() */
713 .long sys_ni_syscall /* old lock syscall holder */
714 .long sys_ioctl
715 .long sys_fcntl /* 55 */
716 .long sys_ni_syscall /* old mpx syscall holder */
717 .long sys_setpgid
718 .long sys_ni_syscall /* old ulimit syscall holder */
719 .long sys_olduname
720 .long sys_umask /* 60 */
721 .long sys_chroot
722 .long sys_ustat
723 .long sys_dup2
724 .long sys_getppid
725 .long sys_getpgrp /* 65 */
726 .long sys_setsid
727 .long sys_sigaction
728 .long sys_sgetmask
729 .long sys_ssetmask
730 .long sys_setreuid16 /* 70 */
731 .long sys_setregid16
732 .long sys_sigsuspend
733 .long sys_sigpending
734 .long sys_sethostname
735 .long sys_setrlimit /* 75 */
736 .long sys_old_getrlimit
737 .long sys_getrusage
738 .long sys_gettimeofday
739 .long sys_settimeofday
740 .long sys_getgroups16 /* 80 */
741 .long sys_setgroups16
742 .long old_select
743 .long sys_symlink
744 .long sys_lstat
745 .long sys_readlink /* 85 */
746 .long sys_uselib
747 .long sys_swapon
748 .long sys_reboot
749 .long old_readdir
750 .long old_mmap /* 90 */
751 .long sys_munmap
752 .long sys_truncate
753 .long sys_ftruncate
754 .long sys_fchmod
755 .long sys_fchown16 /* 95 */
756 .long sys_getpriority
757 .long sys_setpriority
758 .long sys_ni_syscall /* old profil syscall holder */
759 .long sys_statfs
760 .long sys_fstatfs /* 100 */
761 .long sys_ioperm
762 .long sys_socketcall
763 .long sys_syslog
764 .long sys_setitimer
765 .long sys_getitimer /* 105 */
766 .long sys_newstat
767 .long sys_newlstat
768 .long sys_newfstat
769 .long sys_uname
770 .long sys_iopl /* 110 */
771 .long sys_vhangup
772 .long sys_ni_syscall /* old "idle" system call */
773 .long sys_vm86old
774 .long sys_wait4
775 .long sys_swapoff /* 115 */
776 .long sys_sysinfo
777 .long sys_ipc
778 .long sys_fsync
779 .long sys_sigreturn
780 .long sys_clone /* 120 */
781 .long sys_setdomainname
782 .long sys_newuname
783 .long sys_modify_ldt
784 .long sys_adjtimex
785 .long sys_mprotect /* 125 */
786 .long sys_sigprocmask
787 .long sys_ni_syscall /* old "create_module" */
788 .long sys_init_module
789 .long sys_delete_module
790 .long sys_ni_syscall /* 130: old "get_kernel_syms" */
791 .long sys_quotactl
792 .long sys_getpgid
793 .long sys_fchdir
794 .long sys_bdflush
795 .long sys_sysfs /* 135 */
796 .long sys_personality
797 .long sys_ni_syscall /* reserved for afs_syscall */
798 .long sys_setfsuid16
799 .long sys_setfsgid16
800 .long sys_llseek /* 140 */
801 .long sys_getdents
802 .long sys_select
803 .long sys_flock
804 .long sys_msync
805 .long sys_readv /* 145 */
806 .long sys_writev
807 .long sys_getsid
808 .long sys_fdatasync
809 .long sys_sysctl
810 .long sys_mlock /* 150 */
811 .long sys_munlock
812 .long sys_mlockall
813 .long sys_munlockall
814 .long sys_sched_setparam
815 .long sys_sched_getparam /* 155 */
816 .long sys_sched_setscheduler
817 .long sys_sched_getscheduler
818 .long sys_sched_yield
819 .long sys_sched_get_priority_max
820 .long sys_sched_get_priority_min /* 160 */
821 .long sys_sched_rr_get_interval
822 .long sys_nanosleep
823 .long sys_mremap
824 .long sys_setresuid16
825 .long sys_getresuid16 /* 165 */
826 .long sys_vm86
827 .long sys_ni_syscall /* Old sys_query_module */
828 .long sys_poll
829 .long sys_nfsservctl
830 .long sys_setresgid16 /* 170 */
831 .long sys_getresgid16
832 .long sys_prctl
833 .long sys_rt_sigreturn
834 .long sys_rt_sigaction
835 .long sys_rt_sigprocmask /* 175 */
836 .long sys_rt_sigpending
837 .long sys_rt_sigtimedwait
838 .long sys_rt_sigqueueinfo
839 .long sys_rt_sigsuspend
840 .long sys_pread64 /* 180 */
841 .long sys_pwrite64
842 .long sys_chown16
843 .long sys_getcwd
844 .long sys_capget
845 .long sys_capset /* 185 */
846 .long sys_sigaltstack
847 .long sys_sendfile
848 .long sys_ni_syscall /* reserved for streams1 */
849 .long sys_ni_syscall /* reserved for streams2 */
850 .long sys_vfork /* 190 */
851 .long sys_getrlimit
852 .long sys_mmap2
853 .long sys_truncate64
854 .long sys_ftruncate64
855 .long sys_stat64 /* 195 */
856 .long sys_lstat64
857 .long sys_fstat64
858 .long sys_lchown
859 .long sys_getuid
860 .long sys_getgid /* 200 */
861 .long sys_geteuid
862 .long sys_getegid
863 .long sys_setreuid
864 .long sys_setregid
865 .long sys_getgroups /* 205 */
866 .long sys_setgroups
867 .long sys_fchown
868 .long sys_setresuid
869 .long sys_getresuid
870 .long sys_setresgid /* 210 */
871 .long sys_getresgid
872 .long sys_chown
873 .long sys_setuid
874 .long sys_setgid
875 .long sys_setfsuid /* 215 */
876 .long sys_setfsgid
877 .long sys_pivot_root
878 .long sys_mincore
879 .long sys_madvise
880 .long sys_getdents64 /* 220 */
881 .long sys_fcntl64
882 .long sys_ni_syscall /* reserved for TUX */
883 .long sys_ni_syscall
884 .long sys_gettid
885 .long sys_readahead /* 225 */
886 .long sys_setxattr
887 .long sys_lsetxattr
888 .long sys_fsetxattr
889 .long sys_getxattr
890 .long sys_lgetxattr /* 230 */
891 .long sys_fgetxattr
892 .long sys_listxattr
893 .long sys_llistxattr
894 .long sys_flistxattr
895 .long sys_removexattr /* 235 */
896 .long sys_lremovexattr
897 .long sys_fremovexattr
898 .long sys_tkill
899 .long sys_sendfile64
900 .long sys_futex /* 240 */
901 .long sys_sched_setaffinity
902 .long sys_sched_getaffinity
903 .long sys_set_thread_area
904 .long sys_get_thread_area
905 .long sys_io_setup /* 245 */
906 .long sys_io_destroy
907 .long sys_io_getevents
908 .long sys_io_submit
909 .long sys_io_cancel
910 .long sys_fadvise64 /* 250 */
911 .long sys_ni_syscall
912 .long sys_exit_group
913 .long sys_lookup_dcookie
914 .long sys_epoll_create
915 .long sys_epoll_ctl /* 255 */
916 .long sys_epoll_wait
917 .long sys_remap_file_pages
918 .long sys_set_tid_address
919 .long sys_timer_create
920 .long sys_timer_settime /* 260 */
921 .long sys_timer_gettime
922 .long sys_timer_getoverrun
923 .long sys_timer_delete
924 .long sys_clock_settime
925 .long sys_clock_gettime /* 265 */
926 .long sys_clock_getres
927 .long sys_clock_nanosleep
928 .long sys_statfs64
929 .long sys_fstatfs64
930 .long sys_tgkill /* 270 */
931 .long sys_utimes
932 .long sys_fadvise64_64
933 .long sys_ni_syscall /* sys_vserver */
934 .long sys_mbind
935 .long sys_get_mempolicy
936 .long sys_set_mempolicy
937 .long sys_mq_open
938 .long sys_mq_unlink
939 .long sys_mq_timedsend
940 .long sys_mq_timedreceive /* 280 */
941 .long sys_mq_notify
942 .long sys_mq_getsetattr
943 .long sys_ni_syscall /* reserved for kexec */
944 .long sys_waitid
945 .long sys_ni_syscall /* 285 */ /* available */
946 .long sys_add_key
947 .long sys_request_key
948 .long sys_keyctl
949
950syscall_table_size=(.-sys_call_table)