diff options
Diffstat (limited to 'include')
| -rw-r--r-- | include/asm-x86/asm.h | 9 | ||||
| -rw-r--r-- | include/asm-x86/delay.h | 4 | ||||
| -rw-r--r-- | include/asm-x86/fixmap_64.h | 5 | ||||
| -rw-r--r-- | include/asm-x86/gart.h | 34 | ||||
| -rw-r--r-- | include/asm-x86/genapic_32.h | 4 | ||||
| -rw-r--r-- | include/asm-x86/genapic_64.h | 4 | ||||
| -rw-r--r-- | include/asm-x86/iommu.h | 31 | ||||
| -rw-r--r-- | include/asm-x86/uaccess.h | 448 | ||||
| -rw-r--r-- | include/asm-x86/uaccess_32.h | 422 | ||||
| -rw-r--r-- | include/asm-x86/uaccess_64.h | 263 |
10 files changed, 494 insertions, 730 deletions
diff --git a/include/asm-x86/asm.h b/include/asm-x86/asm.h index 70939820c55f..97220321f39d 100644 --- a/include/asm-x86/asm.h +++ b/include/asm-x86/asm.h | |||
| @@ -3,8 +3,10 @@ | |||
| 3 | 3 | ||
| 4 | #ifdef __ASSEMBLY__ | 4 | #ifdef __ASSEMBLY__ |
| 5 | # define __ASM_FORM(x) x | 5 | # define __ASM_FORM(x) x |
| 6 | # define __ASM_EX_SEC .section __ex_table | ||
| 6 | #else | 7 | #else |
| 7 | # define __ASM_FORM(x) " " #x " " | 8 | # define __ASM_FORM(x) " " #x " " |
| 9 | # define __ASM_EX_SEC " .section __ex_table,\"a\"\n" | ||
| 8 | #endif | 10 | #endif |
| 9 | 11 | ||
| 10 | #ifdef CONFIG_X86_32 | 12 | #ifdef CONFIG_X86_32 |
| @@ -14,6 +16,7 @@ | |||
| 14 | #endif | 16 | #endif |
| 15 | 17 | ||
| 16 | #define __ASM_SIZE(inst) __ASM_SEL(inst##l, inst##q) | 18 | #define __ASM_SIZE(inst) __ASM_SEL(inst##l, inst##q) |
| 19 | #define __ASM_REG(reg) __ASM_SEL(e##reg, r##reg) | ||
| 17 | 20 | ||
| 18 | #define _ASM_PTR __ASM_SEL(.long, .quad) | 21 | #define _ASM_PTR __ASM_SEL(.long, .quad) |
| 19 | #define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8) | 22 | #define _ASM_ALIGN __ASM_SEL(.balign 4, .balign 8) |
| @@ -24,10 +27,14 @@ | |||
| 24 | #define _ASM_ADD __ASM_SIZE(add) | 27 | #define _ASM_ADD __ASM_SIZE(add) |
| 25 | #define _ASM_SUB __ASM_SIZE(sub) | 28 | #define _ASM_SUB __ASM_SIZE(sub) |
| 26 | #define _ASM_XADD __ASM_SIZE(xadd) | 29 | #define _ASM_XADD __ASM_SIZE(xadd) |
| 30 | #define _ASM_AX __ASM_REG(ax) | ||
| 31 | #define _ASM_BX __ASM_REG(bx) | ||
| 32 | #define _ASM_CX __ASM_REG(cx) | ||
| 33 | #define _ASM_DX __ASM_REG(dx) | ||
| 27 | 34 | ||
| 28 | /* Exception table entry */ | 35 | /* Exception table entry */ |
| 29 | # define _ASM_EXTABLE(from,to) \ | 36 | # define _ASM_EXTABLE(from,to) \ |
| 30 | " .section __ex_table,\"a\"\n" \ | 37 | __ASM_EX_SEC \ |
| 31 | _ASM_ALIGN "\n" \ | 38 | _ASM_ALIGN "\n" \ |
| 32 | _ASM_PTR #from "," #to "\n" \ | 39 | _ASM_PTR #from "," #to "\n" \ |
| 33 | " .previous\n" | 40 | " .previous\n" |
diff --git a/include/asm-x86/delay.h b/include/asm-x86/delay.h index bb80880c834b..409a649204aa 100644 --- a/include/asm-x86/delay.h +++ b/include/asm-x86/delay.h | |||
| @@ -26,10 +26,6 @@ extern void __delay(unsigned long loops); | |||
| 26 | ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \ | 26 | ((n) > 20000 ? __bad_ndelay() : __const_udelay((n) * 5ul)) : \ |
| 27 | __ndelay(n)) | 27 | __ndelay(n)) |
| 28 | 28 | ||
| 29 | #ifdef CONFIG_X86_32 | ||
| 30 | void use_tsc_delay(void); | 29 | void use_tsc_delay(void); |
| 31 | #else | ||
| 32 | #define use_tsc_delay() {} | ||
| 33 | #endif | ||
| 34 | 30 | ||
| 35 | #endif /* _ASM_X86_DELAY_H */ | 31 | #endif /* _ASM_X86_DELAY_H */ |
diff --git a/include/asm-x86/fixmap_64.h b/include/asm-x86/fixmap_64.h index 1a0b61eb02ff..6a4789d57e6c 100644 --- a/include/asm-x86/fixmap_64.h +++ b/include/asm-x86/fixmap_64.h | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | #define _ASM_FIXMAP_64_H | 12 | #define _ASM_FIXMAP_64_H |
| 13 | 13 | ||
| 14 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
| 15 | #include <asm/acpi.h> | ||
| 15 | #include <asm/apicdef.h> | 16 | #include <asm/apicdef.h> |
| 16 | #include <asm/page.h> | 17 | #include <asm/page.h> |
| 17 | #include <asm/vsyscall.h> | 18 | #include <asm/vsyscall.h> |
| @@ -49,6 +50,10 @@ enum fixed_addresses { | |||
| 49 | #ifdef CONFIG_PARAVIRT | 50 | #ifdef CONFIG_PARAVIRT |
| 50 | FIX_PARAVIRT_BOOTMAP, | 51 | FIX_PARAVIRT_BOOTMAP, |
| 51 | #endif | 52 | #endif |
| 53 | #ifdef CONFIG_ACPI | ||
| 54 | FIX_ACPI_BEGIN, | ||
| 55 | FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1, | ||
| 56 | #endif | ||
| 52 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT | 57 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT |
| 53 | FIX_OHCI1394_BASE, | 58 | FIX_OHCI1394_BASE, |
| 54 | #endif | 59 | #endif |
diff --git a/include/asm-x86/gart.h b/include/asm-x86/gart.h index 417f76ea677b..33b9aeeb35a2 100644 --- a/include/asm-x86/gart.h +++ b/include/asm-x86/gart.h | |||
| @@ -1,40 +1,14 @@ | |||
| 1 | #ifndef _ASM_X8664_IOMMU_H | 1 | #ifndef _ASM_X8664_GART_H |
| 2 | #define _ASM_X8664_IOMMU_H 1 | 2 | #define _ASM_X8664_GART_H 1 |
| 3 | 3 | ||
| 4 | #include <asm/e820.h> | 4 | #include <asm/e820.h> |
| 5 | #include <asm/iommu.h> | ||
| 5 | 6 | ||
| 6 | extern void pci_iommu_shutdown(void); | ||
| 7 | extern void no_iommu_init(void); | ||
| 8 | extern int force_iommu, no_iommu; | ||
| 9 | extern int iommu_detected; | ||
| 10 | extern int agp_amd64_init(void); | ||
| 11 | #ifdef CONFIG_GART_IOMMU | ||
| 12 | extern void gart_iommu_init(void); | ||
| 13 | extern void gart_iommu_shutdown(void); | ||
| 14 | extern void __init gart_parse_options(char *); | ||
| 15 | extern void early_gart_iommu_check(void); | ||
| 16 | extern void gart_iommu_hole_init(void); | ||
| 17 | extern void set_up_gart_resume(u32, u32); | 7 | extern void set_up_gart_resume(u32, u32); |
| 8 | |||
| 18 | extern int fallback_aper_order; | 9 | extern int fallback_aper_order; |
| 19 | extern int fallback_aper_force; | 10 | extern int fallback_aper_force; |
| 20 | extern int gart_iommu_aperture; | ||
| 21 | extern int gart_iommu_aperture_allowed; | ||
| 22 | extern int gart_iommu_aperture_disabled; | ||
| 23 | extern int fix_aperture; | 11 | extern int fix_aperture; |
| 24 | #else | ||
| 25 | #define gart_iommu_aperture 0 | ||
| 26 | #define gart_iommu_aperture_allowed 0 | ||
| 27 | #define gart_iommu_aperture_disabled 1 | ||
| 28 | |||
| 29 | static inline void early_gart_iommu_check(void) | ||
| 30 | { | ||
| 31 | } | ||
| 32 | |||
| 33 | static inline void gart_iommu_shutdown(void) | ||
| 34 | { | ||
| 35 | } | ||
| 36 | |||
| 37 | #endif | ||
| 38 | 12 | ||
| 39 | /* PTE bits. */ | 13 | /* PTE bits. */ |
| 40 | #define GPTE_VALID 1 | 14 | #define GPTE_VALID 1 |
diff --git a/include/asm-x86/genapic_32.h b/include/asm-x86/genapic_32.h index 8d4c8bdb9065..33a73f5ed222 100644 --- a/include/asm-x86/genapic_32.h +++ b/include/asm-x86/genapic_32.h | |||
| @@ -119,6 +119,10 @@ enum uv_system_type {UV_NONE, UV_LEGACY_APIC, UV_X2APIC, UV_NON_UNIQUE_APIC}; | |||
| 119 | #define is_uv_system() 0 | 119 | #define is_uv_system() 0 |
| 120 | #define uv_wakeup_secondary(a, b) 1 | 120 | #define uv_wakeup_secondary(a, b) 1 |
| 121 | 121 | ||
| 122 | #ifdef CONFIG_X86_IO_APIC | ||
| 122 | extern void force_mask_ioapic_irq_2(void); | 123 | extern void force_mask_ioapic_irq_2(void); |
| 124 | #else | ||
| 125 | static inline void force_mask_ioapic_irq_2(void) { } | ||
| 126 | #endif | ||
| 123 | 127 | ||
| 124 | #endif | 128 | #endif |
diff --git a/include/asm-x86/genapic_64.h b/include/asm-x86/genapic_64.h index 082ad020e412..647e4e5c2580 100644 --- a/include/asm-x86/genapic_64.h +++ b/include/asm-x86/genapic_64.h | |||
| @@ -46,6 +46,10 @@ extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip); | |||
| 46 | 46 | ||
| 47 | extern void setup_apic_routing(void); | 47 | extern void setup_apic_routing(void); |
| 48 | 48 | ||
| 49 | #ifdef CONFIG_X86_IO_APIC | ||
| 49 | extern void force_mask_ioapic_irq_2(void); | 50 | extern void force_mask_ioapic_irq_2(void); |
| 51 | #else | ||
| 52 | static inline void force_mask_ioapic_irq_2(void) { } | ||
| 53 | #endif | ||
| 50 | 54 | ||
| 51 | #endif | 55 | #endif |
diff --git a/include/asm-x86/iommu.h b/include/asm-x86/iommu.h index 07862fdd23c0..068c9a40aa5b 100644 --- a/include/asm-x86/iommu.h +++ b/include/asm-x86/iommu.h | |||
| @@ -1,29 +1,34 @@ | |||
| 1 | #ifndef _ASM_X8664_GART_H | 1 | #ifndef _ASM_X8664_IOMMU_H |
| 2 | #define _ASM_X8664_GART_H 1 | 2 | #define _ASM_X8664_IOMMU_H 1 |
| 3 | 3 | ||
| 4 | extern void pci_iommu_shutdown(void); | 4 | extern void pci_iommu_shutdown(void); |
| 5 | extern void no_iommu_init(void); | 5 | extern void no_iommu_init(void); |
| 6 | extern int force_iommu, no_iommu; | 6 | extern int force_iommu, no_iommu; |
| 7 | extern int iommu_detected; | 7 | extern int iommu_detected; |
| 8 | #ifdef CONFIG_IOMMU | 8 | |
| 9 | #ifdef CONFIG_GART_IOMMU | ||
| 10 | extern int gart_iommu_aperture; | ||
| 11 | extern int gart_iommu_aperture_allowed; | ||
| 12 | extern int gart_iommu_aperture_disabled; | ||
| 13 | |||
| 14 | extern void early_gart_iommu_check(void); | ||
| 9 | extern void gart_iommu_init(void); | 15 | extern void gart_iommu_init(void); |
| 10 | extern void gart_iommu_shutdown(void); | 16 | extern void gart_iommu_shutdown(void); |
| 11 | extern void __init gart_parse_options(char *); | 17 | extern void __init gart_parse_options(char *); |
| 12 | extern void iommu_hole_init(void); | 18 | extern void gart_iommu_hole_init(void); |
| 13 | extern int fallback_aper_order; | 19 | |
| 14 | extern int fallback_aper_force; | ||
| 15 | extern int iommu_aperture; | ||
| 16 | extern int iommu_aperture_allowed; | ||
| 17 | extern int iommu_aperture_disabled; | ||
| 18 | extern int fix_aperture; | ||
| 19 | #else | 20 | #else |
| 20 | #define iommu_aperture 0 | 21 | #define gart_iommu_aperture 0 |
| 21 | #define iommu_aperture_allowed 0 | 22 | #define gart_iommu_aperture_allowed 0 |
| 23 | #define gart_iommu_aperture_disabled 1 | ||
| 22 | 24 | ||
| 23 | static inline void gart_iommu_shutdown(void) | 25 | static inline void early_gart_iommu_check(void) |
| 24 | { | 26 | { |
| 25 | } | 27 | } |
| 26 | 28 | ||
| 29 | static inline void gart_iommu_shutdown(void) | ||
| 30 | { | ||
| 31 | } | ||
| 27 | #endif | 32 | #endif |
| 28 | 33 | ||
| 29 | #endif | 34 | #endif |
diff --git a/include/asm-x86/uaccess.h b/include/asm-x86/uaccess.h index 9fefd2947e78..f6fa4d841bbc 100644 --- a/include/asm-x86/uaccess.h +++ b/include/asm-x86/uaccess.h | |||
| @@ -1,5 +1,453 @@ | |||
| 1 | #ifndef _ASM_UACCES_H_ | ||
| 2 | #define _ASM_UACCES_H_ | ||
| 3 | /* | ||
| 4 | * User space memory access functions | ||
| 5 | */ | ||
| 6 | #include <linux/errno.h> | ||
| 7 | #include <linux/compiler.h> | ||
| 8 | #include <linux/thread_info.h> | ||
| 9 | #include <linux/prefetch.h> | ||
| 10 | #include <linux/string.h> | ||
| 11 | #include <asm/asm.h> | ||
| 12 | #include <asm/page.h> | ||
| 13 | |||
| 14 | #define VERIFY_READ 0 | ||
| 15 | #define VERIFY_WRITE 1 | ||
| 16 | |||
| 17 | /* | ||
| 18 | * The fs value determines whether argument validity checking should be | ||
| 19 | * performed or not. If get_fs() == USER_DS, checking is performed, with | ||
| 20 | * get_fs() == KERNEL_DS, checking is bypassed. | ||
| 21 | * | ||
| 22 | * For historical reasons, these macros are grossly misnamed. | ||
| 23 | */ | ||
| 24 | |||
| 25 | #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) | ||
| 26 | |||
| 27 | #define KERNEL_DS MAKE_MM_SEG(-1UL) | ||
| 28 | #define USER_DS MAKE_MM_SEG(PAGE_OFFSET) | ||
| 29 | |||
| 30 | #define get_ds() (KERNEL_DS) | ||
| 31 | #define get_fs() (current_thread_info()->addr_limit) | ||
| 32 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) | ||
| 33 | |||
| 34 | #define segment_eq(a, b) ((a).seg == (b).seg) | ||
| 35 | |||
| 36 | #define __addr_ok(addr) \ | ||
| 37 | ((unsigned long __force)(addr) < \ | ||
| 38 | (current_thread_info()->addr_limit.seg)) | ||
| 39 | |||
| 40 | /* | ||
| 41 | * Test whether a block of memory is a valid user space address. | ||
| 42 | * Returns 0 if the range is valid, nonzero otherwise. | ||
| 43 | * | ||
| 44 | * This is equivalent to the following test: | ||
| 45 | * (u33)addr + (u33)size >= (u33)current->addr_limit.seg (u65 for x86_64) | ||
| 46 | * | ||
| 47 | * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry... | ||
| 48 | */ | ||
| 49 | |||
| 50 | #define __range_not_ok(addr, size) \ | ||
| 51 | ({ \ | ||
| 52 | unsigned long flag, roksum; \ | ||
| 53 | __chk_user_ptr(addr); \ | ||
| 54 | asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \ | ||
| 55 | : "=&r" (flag), "=r" (roksum) \ | ||
| 56 | : "1" (addr), "g" ((long)(size)), \ | ||
| 57 | "rm" (current_thread_info()->addr_limit.seg)); \ | ||
| 58 | flag; \ | ||
| 59 | }) | ||
| 60 | |||
| 61 | /** | ||
| 62 | * access_ok: - Checks if a user space pointer is valid | ||
| 63 | * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that | ||
| 64 | * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe | ||
| 65 | * to write to a block, it is always safe to read from it. | ||
| 66 | * @addr: User space pointer to start of block to check | ||
| 67 | * @size: Size of block to check | ||
| 68 | * | ||
| 69 | * Context: User context only. This function may sleep. | ||
| 70 | * | ||
| 71 | * Checks if a pointer to a block of memory in user space is valid. | ||
| 72 | * | ||
| 73 | * Returns true (nonzero) if the memory block may be valid, false (zero) | ||
| 74 | * if it is definitely invalid. | ||
| 75 | * | ||
| 76 | * Note that, depending on architecture, this function probably just | ||
| 77 | * checks that the pointer is in the user space range - after calling | ||
| 78 | * this function, memory access functions may still return -EFAULT. | ||
| 79 | */ | ||
| 80 | #define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0)) | ||
| 81 | |||
| 82 | /* | ||
| 83 | * The exception table consists of pairs of addresses: the first is the | ||
| 84 | * address of an instruction that is allowed to fault, and the second is | ||
| 85 | * the address at which the program should continue. No registers are | ||
| 86 | * modified, so it is entirely up to the continuation code to figure out | ||
| 87 | * what to do. | ||
| 88 | * | ||
| 89 | * All the routines below use bits of fixup code that are out of line | ||
| 90 | * with the main instruction path. This means when everything is well, | ||
| 91 | * we don't even have to jump over them. Further, they do not intrude | ||
| 92 | * on our cache or tlb entries. | ||
| 93 | */ | ||
| 94 | |||
| 95 | struct exception_table_entry { | ||
| 96 | unsigned long insn, fixup; | ||
| 97 | }; | ||
| 98 | |||
| 99 | extern int fixup_exception(struct pt_regs *regs); | ||
| 100 | |||
| 101 | /* | ||
| 102 | * These are the main single-value transfer routines. They automatically | ||
| 103 | * use the right size if we just have the right pointer type. | ||
| 104 | * | ||
| 105 | * This gets kind of ugly. We want to return _two_ values in "get_user()" | ||
| 106 | * and yet we don't want to do any pointers, because that is too much | ||
| 107 | * of a performance impact. Thus we have a few rather ugly macros here, | ||
| 108 | * and hide all the ugliness from the user. | ||
| 109 | * | ||
| 110 | * The "__xxx" versions of the user access functions are versions that | ||
| 111 | * do not verify the address space, that must have been done previously | ||
| 112 | * with a separate "access_ok()" call (this is used when we do multiple | ||
| 113 | * accesses to the same area of user memory). | ||
| 114 | */ | ||
| 115 | |||
| 116 | extern int __get_user_1(void); | ||
| 117 | extern int __get_user_2(void); | ||
| 118 | extern int __get_user_4(void); | ||
| 119 | extern int __get_user_8(void); | ||
| 120 | extern int __get_user_bad(void); | ||
| 121 | |||
| 122 | #define __get_user_x(size, ret, x, ptr) \ | ||
| 123 | asm volatile("call __get_user_" #size \ | ||
| 124 | : "=a" (ret),"=d" (x) \ | ||
| 125 | : "0" (ptr)) \ | ||
| 126 | |||
| 127 | /* Careful: we have to cast the result to the type of the pointer | ||
| 128 | * for sign reasons */ | ||
| 129 | |||
| 130 | /** | ||
| 131 | * get_user: - Get a simple variable from user space. | ||
| 132 | * @x: Variable to store result. | ||
| 133 | * @ptr: Source address, in user space. | ||
| 134 | * | ||
| 135 | * Context: User context only. This function may sleep. | ||
| 136 | * | ||
| 137 | * This macro copies a single simple variable from user space to kernel | ||
| 138 | * space. It supports simple types like char and int, but not larger | ||
| 139 | * data types like structures or arrays. | ||
| 140 | * | ||
| 141 | * @ptr must have pointer-to-simple-variable type, and the result of | ||
| 142 | * dereferencing @ptr must be assignable to @x without a cast. | ||
| 143 | * | ||
| 144 | * Returns zero on success, or -EFAULT on error. | ||
| 145 | * On error, the variable @x is set to zero. | ||
| 146 | */ | ||
| 147 | #ifdef CONFIG_X86_32 | ||
| 148 | #define __get_user_8(__ret_gu, __val_gu, ptr) \ | ||
| 149 | __get_user_x(X, __ret_gu, __val_gu, ptr) | ||
| 150 | #else | ||
| 151 | #define __get_user_8(__ret_gu, __val_gu, ptr) \ | ||
| 152 | __get_user_x(8, __ret_gu, __val_gu, ptr) | ||
| 153 | #endif | ||
| 154 | |||
| 155 | #define get_user(x, ptr) \ | ||
| 156 | ({ \ | ||
| 157 | int __ret_gu; \ | ||
| 158 | unsigned long __val_gu; \ | ||
| 159 | __chk_user_ptr(ptr); \ | ||
| 160 | switch (sizeof(*(ptr))) { \ | ||
| 161 | case 1: \ | ||
| 162 | __get_user_x(1, __ret_gu, __val_gu, ptr); \ | ||
| 163 | break; \ | ||
| 164 | case 2: \ | ||
| 165 | __get_user_x(2, __ret_gu, __val_gu, ptr); \ | ||
| 166 | break; \ | ||
| 167 | case 4: \ | ||
| 168 | __get_user_x(4, __ret_gu, __val_gu, ptr); \ | ||
| 169 | break; \ | ||
| 170 | case 8: \ | ||
| 171 | __get_user_8(__ret_gu, __val_gu, ptr); \ | ||
| 172 | break; \ | ||
| 173 | default: \ | ||
| 174 | __get_user_x(X, __ret_gu, __val_gu, ptr); \ | ||
| 175 | break; \ | ||
| 176 | } \ | ||
| 177 | (x) = (__typeof__(*(ptr)))__val_gu; \ | ||
| 178 | __ret_gu; \ | ||
| 179 | }) | ||
| 180 | |||
| 181 | #define __put_user_x(size, x, ptr, __ret_pu) \ | ||
| 182 | asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ | ||
| 183 | :"0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") | ||
| 184 | |||
| 185 | |||
| 186 | |||
| 187 | #ifdef CONFIG_X86_32 | ||
| 188 | #define __put_user_u64(x, addr, err) \ | ||
| 189 | asm volatile("1: movl %%eax,0(%2)\n" \ | ||
| 190 | "2: movl %%edx,4(%2)\n" \ | ||
| 191 | "3:\n" \ | ||
| 192 | ".section .fixup,\"ax\"\n" \ | ||
| 193 | "4: movl %3,%0\n" \ | ||
| 194 | " jmp 3b\n" \ | ||
| 195 | ".previous\n" \ | ||
| 196 | _ASM_EXTABLE(1b, 4b) \ | ||
| 197 | _ASM_EXTABLE(2b, 4b) \ | ||
| 198 | : "=r" (err) \ | ||
| 199 | : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err)) | ||
| 200 | |||
| 201 | #define __put_user_x8(x, ptr, __ret_pu) \ | ||
| 202 | asm volatile("call __put_user_8" : "=a" (__ret_pu) \ | ||
| 203 | : "A" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") | ||
| 204 | #else | ||
| 205 | #define __put_user_u64(x, ptr, retval) \ | ||
| 206 | __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT) | ||
| 207 | #define __put_user_x8(x, ptr, __ret_pu) __put_user_x(8, x, ptr, __ret_pu) | ||
| 208 | #endif | ||
| 209 | |||
| 210 | extern void __put_user_bad(void); | ||
| 211 | |||
| 212 | /* | ||
| 213 | * Strange magic calling convention: pointer in %ecx, | ||
| 214 | * value in %eax(:%edx), return value in %eax. clobbers %rbx | ||
| 215 | */ | ||
| 216 | extern void __put_user_1(void); | ||
| 217 | extern void __put_user_2(void); | ||
| 218 | extern void __put_user_4(void); | ||
| 219 | extern void __put_user_8(void); | ||
| 220 | |||
| 221 | #ifdef CONFIG_X86_WP_WORKS_OK | ||
| 222 | |||
| 223 | /** | ||
| 224 | * put_user: - Write a simple value into user space. | ||
| 225 | * @x: Value to copy to user space. | ||
| 226 | * @ptr: Destination address, in user space. | ||
| 227 | * | ||
| 228 | * Context: User context only. This function may sleep. | ||
| 229 | * | ||
| 230 | * This macro copies a single simple value from kernel space to user | ||
| 231 | * space. It supports simple types like char and int, but not larger | ||
| 232 | * data types like structures or arrays. | ||
| 233 | * | ||
| 234 | * @ptr must have pointer-to-simple-variable type, and @x must be assignable | ||
| 235 | * to the result of dereferencing @ptr. | ||
| 236 | * | ||
| 237 | * Returns zero on success, or -EFAULT on error. | ||
| 238 | */ | ||
| 239 | #define put_user(x, ptr) \ | ||
| 240 | ({ \ | ||
| 241 | int __ret_pu; \ | ||
| 242 | __typeof__(*(ptr)) __pu_val; \ | ||
| 243 | __chk_user_ptr(ptr); \ | ||
| 244 | __pu_val = x; \ | ||
| 245 | switch (sizeof(*(ptr))) { \ | ||
| 246 | case 1: \ | ||
| 247 | __put_user_x(1, __pu_val, ptr, __ret_pu); \ | ||
| 248 | break; \ | ||
| 249 | case 2: \ | ||
| 250 | __put_user_x(2, __pu_val, ptr, __ret_pu); \ | ||
| 251 | break; \ | ||
| 252 | case 4: \ | ||
| 253 | __put_user_x(4, __pu_val, ptr, __ret_pu); \ | ||
| 254 | break; \ | ||
| 255 | case 8: \ | ||
| 256 | __put_user_x8(__pu_val, ptr, __ret_pu); \ | ||
| 257 | break; \ | ||
| 258 | default: \ | ||
| 259 | __put_user_x(X, __pu_val, ptr, __ret_pu); \ | ||
| 260 | break; \ | ||
| 261 | } \ | ||
| 262 | __ret_pu; \ | ||
| 263 | }) | ||
| 264 | |||
| 265 | #define __put_user_size(x, ptr, size, retval, errret) \ | ||
| 266 | do { \ | ||
| 267 | retval = 0; \ | ||
| 268 | __chk_user_ptr(ptr); \ | ||
| 269 | switch (size) { \ | ||
| 270 | case 1: \ | ||
| 271 | __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \ | ||
| 272 | break; \ | ||
| 273 | case 2: \ | ||
| 274 | __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \ | ||
| 275 | break; \ | ||
| 276 | case 4: \ | ||
| 277 | __put_user_asm(x, ptr, retval, "l", "k", "ir", errret);\ | ||
| 278 | break; \ | ||
| 279 | case 8: \ | ||
| 280 | __put_user_u64((__typeof__(*ptr))(x), ptr, retval); \ | ||
| 281 | break; \ | ||
| 282 | default: \ | ||
| 283 | __put_user_bad(); \ | ||
| 284 | } \ | ||
| 285 | } while (0) | ||
| 286 | |||
| 287 | #else | ||
| 288 | |||
| 289 | #define __put_user_size(x, ptr, size, retval, errret) \ | ||
| 290 | do { \ | ||
| 291 | __typeof__(*(ptr))__pus_tmp = x; \ | ||
| 292 | retval = 0; \ | ||
| 293 | \ | ||
| 294 | if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \ | ||
| 295 | retval = errret; \ | ||
| 296 | } while (0) | ||
| 297 | |||
| 298 | #define put_user(x, ptr) \ | ||
| 299 | ({ \ | ||
| 300 | int __ret_pu; \ | ||
| 301 | __typeof__(*(ptr))__pus_tmp = x; \ | ||
| 302 | __ret_pu = 0; \ | ||
| 303 | if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \ | ||
| 304 | sizeof(*(ptr))) != 0)) \ | ||
| 305 | __ret_pu = -EFAULT; \ | ||
| 306 | __ret_pu; \ | ||
| 307 | }) | ||
| 308 | #endif | ||
| 309 | |||
| 310 | #ifdef CONFIG_X86_32 | ||
| 311 | #define __get_user_asm_u64(x, ptr, retval, errret) (x) = __get_user_bad() | ||
| 312 | #else | ||
| 313 | #define __get_user_asm_u64(x, ptr, retval, errret) \ | ||
| 314 | __get_user_asm(x, ptr, retval, "q", "", "=r", errret) | ||
| 315 | #endif | ||
| 316 | |||
| 317 | #define __get_user_size(x, ptr, size, retval, errret) \ | ||
| 318 | do { \ | ||
| 319 | retval = 0; \ | ||
| 320 | __chk_user_ptr(ptr); \ | ||
| 321 | switch (size) { \ | ||
| 322 | case 1: \ | ||
| 323 | __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \ | ||
| 324 | break; \ | ||
| 325 | case 2: \ | ||
| 326 | __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \ | ||
| 327 | break; \ | ||
| 328 | case 4: \ | ||
| 329 | __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \ | ||
| 330 | break; \ | ||
| 331 | case 8: \ | ||
| 332 | __get_user_asm_u64(x, ptr, retval, errret); \ | ||
| 333 | break; \ | ||
| 334 | default: \ | ||
| 335 | (x) = __get_user_bad(); \ | ||
| 336 | } \ | ||
| 337 | } while (0) | ||
| 338 | |||
| 339 | #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ | ||
| 340 | asm volatile("1: mov"itype" %2,%"rtype"1\n" \ | ||
| 341 | "2:\n" \ | ||
| 342 | ".section .fixup,\"ax\"\n" \ | ||
| 343 | "3: mov %3,%0\n" \ | ||
| 344 | " xor"itype" %"rtype"1,%"rtype"1\n" \ | ||
| 345 | " jmp 2b\n" \ | ||
| 346 | ".previous\n" \ | ||
| 347 | _ASM_EXTABLE(1b, 3b) \ | ||
| 348 | : "=r" (err), ltype(x) \ | ||
| 349 | : "m" (__m(addr)), "i" (errret), "0" (err)) | ||
| 350 | |||
| 351 | #define __put_user_nocheck(x, ptr, size) \ | ||
| 352 | ({ \ | ||
| 353 | long __pu_err; \ | ||
| 354 | __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \ | ||
| 355 | __pu_err; \ | ||
| 356 | }) | ||
| 357 | |||
| 358 | #define __get_user_nocheck(x, ptr, size) \ | ||
| 359 | ({ \ | ||
| 360 | long __gu_err; \ | ||
| 361 | unsigned long __gu_val; \ | ||
| 362 | __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ | ||
| 363 | (x) = (__force __typeof__(*(ptr)))__gu_val; \ | ||
| 364 | __gu_err; \ | ||
| 365 | }) | ||
| 366 | |||
| 367 | /* FIXME: this hack is definitely wrong -AK */ | ||
| 368 | struct __large_struct { unsigned long buf[100]; }; | ||
| 369 | #define __m(x) (*(struct __large_struct __user *)(x)) | ||
| 370 | |||
| 371 | /* | ||
| 372 | * Tell gcc we read from memory instead of writing: this is because | ||
| 373 | * we do not write to any memory gcc knows about, so there are no | ||
| 374 | * aliasing issues. | ||
| 375 | */ | ||
| 376 | #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ | ||
| 377 | asm volatile("1: mov"itype" %"rtype"1,%2\n" \ | ||
| 378 | "2:\n" \ | ||
| 379 | ".section .fixup,\"ax\"\n" \ | ||
| 380 | "3: mov %3,%0\n" \ | ||
| 381 | " jmp 2b\n" \ | ||
| 382 | ".previous\n" \ | ||
| 383 | _ASM_EXTABLE(1b, 3b) \ | ||
| 384 | : "=r"(err) \ | ||
| 385 | : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) | ||
| 386 | /** | ||
| 387 | * __get_user: - Get a simple variable from user space, with less checking. | ||
| 388 | * @x: Variable to store result. | ||
| 389 | * @ptr: Source address, in user space. | ||
| 390 | * | ||
| 391 | * Context: User context only. This function may sleep. | ||
| 392 | * | ||
| 393 | * This macro copies a single simple variable from user space to kernel | ||
| 394 | * space. It supports simple types like char and int, but not larger | ||
| 395 | * data types like structures or arrays. | ||
| 396 | * | ||
| 397 | * @ptr must have pointer-to-simple-variable type, and the result of | ||
| 398 | * dereferencing @ptr must be assignable to @x without a cast. | ||
| 399 | * | ||
| 400 | * Caller must check the pointer with access_ok() before calling this | ||
| 401 | * function. | ||
| 402 | * | ||
| 403 | * Returns zero on success, or -EFAULT on error. | ||
| 404 | * On error, the variable @x is set to zero. | ||
| 405 | */ | ||
| 406 | |||
| 407 | #define __get_user(x, ptr) \ | ||
| 408 | __get_user_nocheck((x), (ptr), sizeof(*(ptr))) | ||
| 409 | /** | ||
| 410 | * __put_user: - Write a simple value into user space, with less checking. | ||
| 411 | * @x: Value to copy to user space. | ||
| 412 | * @ptr: Destination address, in user space. | ||
| 413 | * | ||
| 414 | * Context: User context only. This function may sleep. | ||
| 415 | * | ||
| 416 | * This macro copies a single simple value from kernel space to user | ||
| 417 | * space. It supports simple types like char and int, but not larger | ||
| 418 | * data types like structures or arrays. | ||
| 419 | * | ||
| 420 | * @ptr must have pointer-to-simple-variable type, and @x must be assignable | ||
| 421 | * to the result of dereferencing @ptr. | ||
| 422 | * | ||
| 423 | * Caller must check the pointer with access_ok() before calling this | ||
| 424 | * function. | ||
| 425 | * | ||
| 426 | * Returns zero on success, or -EFAULT on error. | ||
| 427 | */ | ||
| 428 | |||
| 429 | #define __put_user(x, ptr) \ | ||
| 430 | __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) | ||
| 431 | |||
| 432 | #define __get_user_unaligned __get_user | ||
| 433 | #define __put_user_unaligned __put_user | ||
| 434 | |||
| 435 | /* | ||
| 436 | * movsl can be slow when source and dest are not both 8-byte aligned | ||
| 437 | */ | ||
| 438 | #ifdef CONFIG_X86_INTEL_USERCOPY | ||
| 439 | extern struct movsl_mask { | ||
| 440 | int mask; | ||
| 441 | } ____cacheline_aligned_in_smp movsl_mask; | ||
| 442 | #endif | ||
| 443 | |||
| 444 | #define ARCH_HAS_NOCACHE_UACCESS 1 | ||
| 445 | |||
| 1 | #ifdef CONFIG_X86_32 | 446 | #ifdef CONFIG_X86_32 |
| 2 | # include "uaccess_32.h" | 447 | # include "uaccess_32.h" |
| 3 | #else | 448 | #else |
| 449 | # define ARCH_HAS_SEARCH_EXTABLE | ||
| 4 | # include "uaccess_64.h" | 450 | # include "uaccess_64.h" |
| 5 | #endif | 451 | #endif |
| 452 | |||
| 453 | #endif | ||
diff --git a/include/asm-x86/uaccess_32.h b/include/asm-x86/uaccess_32.h index 8e7595c1f34e..6fdef39a0bcb 100644 --- a/include/asm-x86/uaccess_32.h +++ b/include/asm-x86/uaccess_32.h | |||
| @@ -11,426 +11,6 @@ | |||
| 11 | #include <asm/asm.h> | 11 | #include <asm/asm.h> |
| 12 | #include <asm/page.h> | 12 | #include <asm/page.h> |
| 13 | 13 | ||
| 14 | #define VERIFY_READ 0 | ||
| 15 | #define VERIFY_WRITE 1 | ||
| 16 | |||
| 17 | /* | ||
| 18 | * The fs value determines whether argument validity checking should be | ||
| 19 | * performed or not. If get_fs() == USER_DS, checking is performed, with | ||
| 20 | * get_fs() == KERNEL_DS, checking is bypassed. | ||
| 21 | * | ||
| 22 | * For historical reasons, these macros are grossly misnamed. | ||
| 23 | */ | ||
| 24 | |||
| 25 | #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) | ||
| 26 | |||
| 27 | |||
| 28 | #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFUL) | ||
| 29 | #define USER_DS MAKE_MM_SEG(PAGE_OFFSET) | ||
| 30 | |||
| 31 | #define get_ds() (KERNEL_DS) | ||
| 32 | #define get_fs() (current_thread_info()->addr_limit) | ||
| 33 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) | ||
| 34 | |||
| 35 | #define segment_eq(a, b) ((a).seg == (b).seg) | ||
| 36 | |||
| 37 | /* | ||
| 38 | * movsl can be slow when source and dest are not both 8-byte aligned | ||
| 39 | */ | ||
| 40 | #ifdef CONFIG_X86_INTEL_USERCOPY | ||
| 41 | extern struct movsl_mask { | ||
| 42 | int mask; | ||
| 43 | } ____cacheline_aligned_in_smp movsl_mask; | ||
| 44 | #endif | ||
| 45 | |||
| 46 | #define __addr_ok(addr) \ | ||
| 47 | ((unsigned long __force)(addr) < \ | ||
| 48 | (current_thread_info()->addr_limit.seg)) | ||
| 49 | |||
| 50 | /* | ||
| 51 | * Test whether a block of memory is a valid user space address. | ||
| 52 | * Returns 0 if the range is valid, nonzero otherwise. | ||
| 53 | * | ||
| 54 | * This is equivalent to the following test: | ||
| 55 | * (u33)addr + (u33)size >= (u33)current->addr_limit.seg | ||
| 56 | * | ||
| 57 | * This needs 33-bit arithmetic. We have a carry... | ||
| 58 | */ | ||
| 59 | #define __range_ok(addr, size) \ | ||
| 60 | ({ \ | ||
| 61 | unsigned long flag, roksum; \ | ||
| 62 | __chk_user_ptr(addr); \ | ||
| 63 | asm("addl %3,%1 ; sbbl %0,%0; cmpl %1,%4; sbbl $0,%0" \ | ||
| 64 | :"=&r" (flag), "=r" (roksum) \ | ||
| 65 | :"1" (addr), "g" ((int)(size)), \ | ||
| 66 | "rm" (current_thread_info()->addr_limit.seg)); \ | ||
| 67 | flag; \ | ||
| 68 | }) | ||
| 69 | |||
| 70 | /** | ||
| 71 | * access_ok: - Checks if a user space pointer is valid | ||
| 72 | * @type: Type of access: %VERIFY_READ or %VERIFY_WRITE. Note that | ||
| 73 | * %VERIFY_WRITE is a superset of %VERIFY_READ - if it is safe | ||
| 74 | * to write to a block, it is always safe to read from it. | ||
| 75 | * @addr: User space pointer to start of block to check | ||
| 76 | * @size: Size of block to check | ||
| 77 | * | ||
| 78 | * Context: User context only. This function may sleep. | ||
| 79 | * | ||
| 80 | * Checks if a pointer to a block of memory in user space is valid. | ||
| 81 | * | ||
| 82 | * Returns true (nonzero) if the memory block may be valid, false (zero) | ||
| 83 | * if it is definitely invalid. | ||
| 84 | * | ||
| 85 | * Note that, depending on architecture, this function probably just | ||
| 86 | * checks that the pointer is in the user space range - after calling | ||
| 87 | * this function, memory access functions may still return -EFAULT. | ||
| 88 | */ | ||
| 89 | #define access_ok(type, addr, size) (likely(__range_ok(addr, size) == 0)) | ||
| 90 | |||
| 91 | /* | ||
| 92 | * The exception table consists of pairs of addresses: the first is the | ||
| 93 | * address of an instruction that is allowed to fault, and the second is | ||
| 94 | * the address at which the program should continue. No registers are | ||
| 95 | * modified, so it is entirely up to the continuation code to figure out | ||
| 96 | * what to do. | ||
| 97 | * | ||
| 98 | * All the routines below use bits of fixup code that are out of line | ||
| 99 | * with the main instruction path. This means when everything is well, | ||
| 100 | * we don't even have to jump over them. Further, they do not intrude | ||
| 101 | * on our cache or tlb entries. | ||
| 102 | */ | ||
| 103 | |||
| 104 | struct exception_table_entry { | ||
| 105 | unsigned long insn, fixup; | ||
| 106 | }; | ||
| 107 | |||
| 108 | extern int fixup_exception(struct pt_regs *regs); | ||
| 109 | |||
| 110 | /* | ||
| 111 | * These are the main single-value transfer routines. They automatically | ||
| 112 | * use the right size if we just have the right pointer type. | ||
| 113 | * | ||
| 114 | * This gets kind of ugly. We want to return _two_ values in "get_user()" | ||
| 115 | * and yet we don't want to do any pointers, because that is too much | ||
| 116 | * of a performance impact. Thus we have a few rather ugly macros here, | ||
| 117 | * and hide all the ugliness from the user. | ||
| 118 | * | ||
| 119 | * The "__xxx" versions of the user access functions are versions that | ||
| 120 | * do not verify the address space, that must have been done previously | ||
| 121 | * with a separate "access_ok()" call (this is used when we do multiple | ||
| 122 | * accesses to the same area of user memory). | ||
| 123 | */ | ||
| 124 | |||
| 125 | extern void __get_user_1(void); | ||
| 126 | extern void __get_user_2(void); | ||
| 127 | extern void __get_user_4(void); | ||
| 128 | |||
| 129 | #define __get_user_x(size, ret, x, ptr) \ | ||
| 130 | asm volatile("call __get_user_" #size \ | ||
| 131 | :"=a" (ret),"=d" (x) \ | ||
| 132 | :"0" (ptr)) | ||
| 133 | |||
| 134 | |||
| 135 | /* Careful: we have to cast the result to the type of the pointer | ||
| 136 | * for sign reasons */ | ||
| 137 | |||
| 138 | /** | ||
| 139 | * get_user: - Get a simple variable from user space. | ||
| 140 | * @x: Variable to store result. | ||
| 141 | * @ptr: Source address, in user space. | ||
| 142 | * | ||
| 143 | * Context: User context only. This function may sleep. | ||
| 144 | * | ||
| 145 | * This macro copies a single simple variable from user space to kernel | ||
| 146 | * space. It supports simple types like char and int, but not larger | ||
| 147 | * data types like structures or arrays. | ||
| 148 | * | ||
| 149 | * @ptr must have pointer-to-simple-variable type, and the result of | ||
| 150 | * dereferencing @ptr must be assignable to @x without a cast. | ||
| 151 | * | ||
| 152 | * Returns zero on success, or -EFAULT on error. | ||
| 153 | * On error, the variable @x is set to zero. | ||
| 154 | */ | ||
| 155 | #define get_user(x, ptr) \ | ||
| 156 | ({ \ | ||
| 157 | int __ret_gu; \ | ||
| 158 | unsigned long __val_gu; \ | ||
| 159 | __chk_user_ptr(ptr); \ | ||
| 160 | switch (sizeof(*(ptr))) { \ | ||
| 161 | case 1: \ | ||
| 162 | __get_user_x(1, __ret_gu, __val_gu, ptr); \ | ||
| 163 | break; \ | ||
| 164 | case 2: \ | ||
| 165 | __get_user_x(2, __ret_gu, __val_gu, ptr); \ | ||
| 166 | break; \ | ||
| 167 | case 4: \ | ||
| 168 | __get_user_x(4, __ret_gu, __val_gu, ptr); \ | ||
| 169 | break; \ | ||
| 170 | default: \ | ||
| 171 | __get_user_x(X, __ret_gu, __val_gu, ptr); \ | ||
| 172 | break; \ | ||
| 173 | } \ | ||
| 174 | (x) = (__typeof__(*(ptr)))__val_gu; \ | ||
| 175 | __ret_gu; \ | ||
| 176 | }) | ||
| 177 | |||
| 178 | extern void __put_user_bad(void); | ||
| 179 | |||
| 180 | /* | ||
| 181 | * Strange magic calling convention: pointer in %ecx, | ||
| 182 | * value in %eax(:%edx), return value in %eax, no clobbers. | ||
| 183 | */ | ||
| 184 | extern void __put_user_1(void); | ||
| 185 | extern void __put_user_2(void); | ||
| 186 | extern void __put_user_4(void); | ||
| 187 | extern void __put_user_8(void); | ||
| 188 | |||
| 189 | #define __put_user_1(x, ptr) \ | ||
| 190 | asm volatile("call __put_user_1" : "=a" (__ret_pu) \ | ||
| 191 | : "0" ((typeof(*(ptr)))(x)), "c" (ptr)) | ||
| 192 | |||
| 193 | #define __put_user_2(x, ptr) \ | ||
| 194 | asm volatile("call __put_user_2" : "=a" (__ret_pu) \ | ||
| 195 | : "0" ((typeof(*(ptr)))(x)), "c" (ptr)) | ||
| 196 | |||
| 197 | #define __put_user_4(x, ptr) \ | ||
| 198 | asm volatile("call __put_user_4" : "=a" (__ret_pu) \ | ||
| 199 | : "0" ((typeof(*(ptr)))(x)), "c" (ptr)) | ||
| 200 | |||
| 201 | #define __put_user_8(x, ptr) \ | ||
| 202 | asm volatile("call __put_user_8" : "=a" (__ret_pu) \ | ||
| 203 | : "A" ((typeof(*(ptr)))(x)), "c" (ptr)) | ||
| 204 | |||
| 205 | #define __put_user_X(x, ptr) \ | ||
| 206 | asm volatile("call __put_user_X" : "=a" (__ret_pu) \ | ||
| 207 | : "c" (ptr)) | ||
| 208 | |||
| 209 | /** | ||
| 210 | * put_user: - Write a simple value into user space. | ||
| 211 | * @x: Value to copy to user space. | ||
| 212 | * @ptr: Destination address, in user space. | ||
| 213 | * | ||
| 214 | * Context: User context only. This function may sleep. | ||
| 215 | * | ||
| 216 | * This macro copies a single simple value from kernel space to user | ||
| 217 | * space. It supports simple types like char and int, but not larger | ||
| 218 | * data types like structures or arrays. | ||
| 219 | * | ||
| 220 | * @ptr must have pointer-to-simple-variable type, and @x must be assignable | ||
| 221 | * to the result of dereferencing @ptr. | ||
| 222 | * | ||
| 223 | * Returns zero on success, or -EFAULT on error. | ||
| 224 | */ | ||
| 225 | #ifdef CONFIG_X86_WP_WORKS_OK | ||
| 226 | |||
| 227 | #define put_user(x, ptr) \ | ||
| 228 | ({ \ | ||
| 229 | int __ret_pu; \ | ||
| 230 | __typeof__(*(ptr)) __pu_val; \ | ||
| 231 | __chk_user_ptr(ptr); \ | ||
| 232 | __pu_val = x; \ | ||
| 233 | switch (sizeof(*(ptr))) { \ | ||
| 234 | case 1: \ | ||
| 235 | __put_user_1(__pu_val, ptr); \ | ||
| 236 | break; \ | ||
| 237 | case 2: \ | ||
| 238 | __put_user_2(__pu_val, ptr); \ | ||
| 239 | break; \ | ||
| 240 | case 4: \ | ||
| 241 | __put_user_4(__pu_val, ptr); \ | ||
| 242 | break; \ | ||
| 243 | case 8: \ | ||
| 244 | __put_user_8(__pu_val, ptr); \ | ||
| 245 | break; \ | ||
| 246 | default: \ | ||
| 247 | __put_user_X(__pu_val, ptr); \ | ||
| 248 | break; \ | ||
| 249 | } \ | ||
| 250 | __ret_pu; \ | ||
| 251 | }) | ||
| 252 | |||
| 253 | #else | ||
| 254 | #define put_user(x, ptr) \ | ||
| 255 | ({ \ | ||
| 256 | int __ret_pu; \ | ||
| 257 | __typeof__(*(ptr))__pus_tmp = x; \ | ||
| 258 | __ret_pu = 0; \ | ||
| 259 | if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, \ | ||
| 260 | sizeof(*(ptr))) != 0)) \ | ||
| 261 | __ret_pu = -EFAULT; \ | ||
| 262 | __ret_pu; \ | ||
| 263 | }) | ||
| 264 | |||
| 265 | |||
| 266 | #endif | ||
| 267 | |||
| 268 | /** | ||
| 269 | * __get_user: - Get a simple variable from user space, with less checking. | ||
| 270 | * @x: Variable to store result. | ||
| 271 | * @ptr: Source address, in user space. | ||
| 272 | * | ||
| 273 | * Context: User context only. This function may sleep. | ||
| 274 | * | ||
| 275 | * This macro copies a single simple variable from user space to kernel | ||
| 276 | * space. It supports simple types like char and int, but not larger | ||
| 277 | * data types like structures or arrays. | ||
| 278 | * | ||
| 279 | * @ptr must have pointer-to-simple-variable type, and the result of | ||
| 280 | * dereferencing @ptr must be assignable to @x without a cast. | ||
| 281 | * | ||
| 282 | * Caller must check the pointer with access_ok() before calling this | ||
| 283 | * function. | ||
| 284 | * | ||
| 285 | * Returns zero on success, or -EFAULT on error. | ||
| 286 | * On error, the variable @x is set to zero. | ||
| 287 | */ | ||
| 288 | #define __get_user(x, ptr) \ | ||
| 289 | __get_user_nocheck((x), (ptr), sizeof(*(ptr))) | ||
| 290 | |||
| 291 | |||
| 292 | /** | ||
| 293 | * __put_user: - Write a simple value into user space, with less checking. | ||
| 294 | * @x: Value to copy to user space. | ||
| 295 | * @ptr: Destination address, in user space. | ||
| 296 | * | ||
| 297 | * Context: User context only. This function may sleep. | ||
| 298 | * | ||
| 299 | * This macro copies a single simple value from kernel space to user | ||
| 300 | * space. It supports simple types like char and int, but not larger | ||
| 301 | * data types like structures or arrays. | ||
| 302 | * | ||
| 303 | * @ptr must have pointer-to-simple-variable type, and @x must be assignable | ||
| 304 | * to the result of dereferencing @ptr. | ||
| 305 | * | ||
| 306 | * Caller must check the pointer with access_ok() before calling this | ||
| 307 | * function. | ||
| 308 | * | ||
| 309 | * Returns zero on success, or -EFAULT on error. | ||
| 310 | */ | ||
| 311 | #define __put_user(x, ptr) \ | ||
| 312 | __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) | ||
| 313 | |||
| 314 | #define __put_user_nocheck(x, ptr, size) \ | ||
| 315 | ({ \ | ||
| 316 | long __pu_err; \ | ||
| 317 | __put_user_size((x), (ptr), (size), __pu_err, -EFAULT); \ | ||
| 318 | __pu_err; \ | ||
| 319 | }) | ||
| 320 | |||
| 321 | |||
| 322 | #define __put_user_u64(x, addr, err) \ | ||
| 323 | asm volatile("1: movl %%eax,0(%2)\n" \ | ||
| 324 | "2: movl %%edx,4(%2)\n" \ | ||
| 325 | "3:\n" \ | ||
| 326 | ".section .fixup,\"ax\"\n" \ | ||
| 327 | "4: movl %3,%0\n" \ | ||
| 328 | " jmp 3b\n" \ | ||
| 329 | ".previous\n" \ | ||
| 330 | _ASM_EXTABLE(1b, 4b) \ | ||
| 331 | _ASM_EXTABLE(2b, 4b) \ | ||
| 332 | : "=r" (err) \ | ||
| 333 | : "A" (x), "r" (addr), "i" (-EFAULT), "0" (err)) | ||
| 334 | |||
| 335 | #ifdef CONFIG_X86_WP_WORKS_OK | ||
| 336 | |||
| 337 | #define __put_user_size(x, ptr, size, retval, errret) \ | ||
| 338 | do { \ | ||
| 339 | retval = 0; \ | ||
| 340 | __chk_user_ptr(ptr); \ | ||
| 341 | switch (size) { \ | ||
| 342 | case 1: \ | ||
| 343 | __put_user_asm(x, ptr, retval, "b", "b", "iq", errret); \ | ||
| 344 | break; \ | ||
| 345 | case 2: \ | ||
| 346 | __put_user_asm(x, ptr, retval, "w", "w", "ir", errret); \ | ||
| 347 | break; \ | ||
| 348 | case 4: \ | ||
| 349 | __put_user_asm(x, ptr, retval, "l", "", "ir", errret); \ | ||
| 350 | break; \ | ||
| 351 | case 8: \ | ||
| 352 | __put_user_u64((__typeof__(*ptr))(x), ptr, retval); \ | ||
| 353 | break; \ | ||
| 354 | default: \ | ||
| 355 | __put_user_bad(); \ | ||
| 356 | } \ | ||
| 357 | } while (0) | ||
| 358 | |||
| 359 | #else | ||
| 360 | |||
| 361 | #define __put_user_size(x, ptr, size, retval, errret) \ | ||
| 362 | do { \ | ||
| 363 | __typeof__(*(ptr))__pus_tmp = x; \ | ||
| 364 | retval = 0; \ | ||
| 365 | \ | ||
| 366 | if (unlikely(__copy_to_user_ll(ptr, &__pus_tmp, size) != 0)) \ | ||
| 367 | retval = errret; \ | ||
| 368 | } while (0) | ||
| 369 | |||
| 370 | #endif | ||
| 371 | struct __large_struct { unsigned long buf[100]; }; | ||
| 372 | #define __m(x) (*(struct __large_struct __user *)(x)) | ||
| 373 | |||
| 374 | /* | ||
| 375 | * Tell gcc we read from memory instead of writing: this is because | ||
| 376 | * we do not write to any memory gcc knows about, so there are no | ||
| 377 | * aliasing issues. | ||
| 378 | */ | ||
| 379 | #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ | ||
| 380 | asm volatile("1: mov"itype" %"rtype"1,%2\n" \ | ||
| 381 | "2:\n" \ | ||
| 382 | ".section .fixup,\"ax\"\n" \ | ||
| 383 | "3: movl %3,%0\n" \ | ||
| 384 | " jmp 2b\n" \ | ||
| 385 | ".previous\n" \ | ||
| 386 | _ASM_EXTABLE(1b, 3b) \ | ||
| 387 | : "=r"(err) \ | ||
| 388 | : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err)) | ||
| 389 | |||
| 390 | |||
| 391 | #define __get_user_nocheck(x, ptr, size) \ | ||
| 392 | ({ \ | ||
| 393 | long __gu_err; \ | ||
| 394 | unsigned long __gu_val; \ | ||
| 395 | __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ | ||
| 396 | (x) = (__typeof__(*(ptr)))__gu_val; \ | ||
| 397 | __gu_err; \ | ||
| 398 | }) | ||
| 399 | |||
| 400 | extern long __get_user_bad(void); | ||
| 401 | |||
| 402 | #define __get_user_size(x, ptr, size, retval, errret) \ | ||
| 403 | do { \ | ||
| 404 | retval = 0; \ | ||
| 405 | __chk_user_ptr(ptr); \ | ||
| 406 | switch (size) { \ | ||
| 407 | case 1: \ | ||
| 408 | __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \ | ||
| 409 | break; \ | ||
| 410 | case 2: \ | ||
| 411 | __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \ | ||
| 412 | break; \ | ||
| 413 | case 4: \ | ||
| 414 | __get_user_asm(x, ptr, retval, "l", "", "=r", errret); \ | ||
| 415 | break; \ | ||
| 416 | default: \ | ||
| 417 | (x) = __get_user_bad(); \ | ||
| 418 | } \ | ||
| 419 | } while (0) | ||
| 420 | |||
| 421 | #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ | ||
| 422 | asm volatile("1: mov"itype" %2,%"rtype"1\n" \ | ||
| 423 | "2:\n" \ | ||
| 424 | ".section .fixup,\"ax\"\n" \ | ||
| 425 | "3: movl %3,%0\n" \ | ||
| 426 | " xor"itype" %"rtype"1,%"rtype"1\n" \ | ||
| 427 | " jmp 2b\n" \ | ||
| 428 | ".previous\n" \ | ||
| 429 | _ASM_EXTABLE(1b, 3b) \ | ||
| 430 | : "=r" (err), ltype (x) \ | ||
| 431 | : "m" (__m(addr)), "i" (errret), "0" (err)) | ||
| 432 | |||
| 433 | |||
| 434 | unsigned long __must_check __copy_to_user_ll | 14 | unsigned long __must_check __copy_to_user_ll |
| 435 | (void __user *to, const void *from, unsigned long n); | 15 | (void __user *to, const void *from, unsigned long n); |
| 436 | unsigned long __must_check __copy_from_user_ll | 16 | unsigned long __must_check __copy_from_user_ll |
| @@ -576,8 +156,6 @@ __copy_from_user(void *to, const void __user *from, unsigned long n) | |||
| 576 | return __copy_from_user_ll(to, from, n); | 156 | return __copy_from_user_ll(to, from, n); |
| 577 | } | 157 | } |
| 578 | 158 | ||
| 579 | #define ARCH_HAS_NOCACHE_UACCESS | ||
| 580 | |||
| 581 | static __always_inline unsigned long __copy_from_user_nocache(void *to, | 159 | static __always_inline unsigned long __copy_from_user_nocache(void *to, |
| 582 | const void __user *from, unsigned long n) | 160 | const void __user *from, unsigned long n) |
| 583 | { | 161 | { |
diff --git a/include/asm-x86/uaccess_64.h b/include/asm-x86/uaccess_64.h index b8a2f4339903..515d4dce96b5 100644 --- a/include/asm-x86/uaccess_64.h +++ b/include/asm-x86/uaccess_64.h | |||
| @@ -9,265 +9,6 @@ | |||
| 9 | #include <linux/prefetch.h> | 9 | #include <linux/prefetch.h> |
| 10 | #include <asm/page.h> | 10 | #include <asm/page.h> |
| 11 | 11 | ||
| 12 | #define VERIFY_READ 0 | ||
| 13 | #define VERIFY_WRITE 1 | ||
| 14 | |||
| 15 | /* | ||
| 16 | * The fs value determines whether argument validity checking should be | ||
| 17 | * performed or not. If get_fs() == USER_DS, checking is performed, with | ||
| 18 | * get_fs() == KERNEL_DS, checking is bypassed. | ||
| 19 | * | ||
| 20 | * For historical reasons, these macros are grossly misnamed. | ||
| 21 | */ | ||
| 22 | |||
| 23 | #define MAKE_MM_SEG(s) ((mm_segment_t) { (s) }) | ||
| 24 | |||
| 25 | #define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFFFFFFFFFFUL) | ||
| 26 | #define USER_DS MAKE_MM_SEG(PAGE_OFFSET) | ||
| 27 | |||
| 28 | #define get_ds() (KERNEL_DS) | ||
| 29 | #define get_fs() (current_thread_info()->addr_limit) | ||
| 30 | #define set_fs(x) (current_thread_info()->addr_limit = (x)) | ||
| 31 | |||
| 32 | #define segment_eq(a, b) ((a).seg == (b).seg) | ||
| 33 | |||
| 34 | #define __addr_ok(addr) (!((unsigned long)(addr) & \ | ||
| 35 | (current_thread_info()->addr_limit.seg))) | ||
| 36 | |||
| 37 | /* | ||
| 38 | * Uhhuh, this needs 65-bit arithmetic. We have a carry.. | ||
| 39 | */ | ||
| 40 | #define __range_not_ok(addr, size) \ | ||
| 41 | ({ \ | ||
| 42 | unsigned long flag, roksum; \ | ||
| 43 | __chk_user_ptr(addr); \ | ||
| 44 | asm("# range_ok\n\r" \ | ||
| 45 | "addq %3,%1 ; sbbq %0,%0 ; cmpq %1,%4 ; sbbq $0,%0" \ | ||
| 46 | : "=&r" (flag), "=r" (roksum) \ | ||
| 47 | : "1" (addr), "g" ((long)(size)), \ | ||
| 48 | "g" (current_thread_info()->addr_limit.seg)); \ | ||
| 49 | flag; \ | ||
| 50 | }) | ||
| 51 | |||
| 52 | #define access_ok(type, addr, size) (__range_not_ok(addr, size) == 0) | ||
| 53 | |||
| 54 | /* | ||
| 55 | * The exception table consists of pairs of addresses: the first is the | ||
| 56 | * address of an instruction that is allowed to fault, and the second is | ||
| 57 | * the address at which the program should continue. No registers are | ||
| 58 | * modified, so it is entirely up to the continuation code to figure out | ||
| 59 | * what to do. | ||
| 60 | * | ||
| 61 | * All the routines below use bits of fixup code that are out of line | ||
| 62 | * with the main instruction path. This means when everything is well, | ||
| 63 | * we don't even have to jump over them. Further, they do not intrude | ||
| 64 | * on our cache or tlb entries. | ||
| 65 | */ | ||
| 66 | |||
| 67 | struct exception_table_entry { | ||
| 68 | unsigned long insn, fixup; | ||
| 69 | }; | ||
| 70 | |||
| 71 | extern int fixup_exception(struct pt_regs *regs); | ||
| 72 | |||
| 73 | #define ARCH_HAS_SEARCH_EXTABLE | ||
| 74 | |||
| 75 | /* | ||
| 76 | * These are the main single-value transfer routines. They automatically | ||
| 77 | * use the right size if we just have the right pointer type. | ||
| 78 | * | ||
| 79 | * This gets kind of ugly. We want to return _two_ values in "get_user()" | ||
| 80 | * and yet we don't want to do any pointers, because that is too much | ||
| 81 | * of a performance impact. Thus we have a few rather ugly macros here, | ||
| 82 | * and hide all the ugliness from the user. | ||
| 83 | * | ||
| 84 | * The "__xxx" versions of the user access functions are versions that | ||
| 85 | * do not verify the address space, that must have been done previously | ||
| 86 | * with a separate "access_ok()" call (this is used when we do multiple | ||
| 87 | * accesses to the same area of user memory). | ||
| 88 | */ | ||
| 89 | |||
| 90 | #define __get_user_x(size, ret, x, ptr) \ | ||
| 91 | asm volatile("call __get_user_" #size \ | ||
| 92 | : "=a" (ret),"=d" (x) \ | ||
| 93 | : "c" (ptr) \ | ||
| 94 | : "r8") | ||
| 95 | |||
| 96 | /* Careful: we have to cast the result to the type of the pointer | ||
| 97 | * for sign reasons */ | ||
| 98 | |||
| 99 | #define get_user(x, ptr) \ | ||
| 100 | ({ \ | ||
| 101 | unsigned long __val_gu; \ | ||
| 102 | int __ret_gu; \ | ||
| 103 | __chk_user_ptr(ptr); \ | ||
| 104 | switch (sizeof(*(ptr))) { \ | ||
| 105 | case 1: \ | ||
| 106 | __get_user_x(1, __ret_gu, __val_gu, ptr); \ | ||
| 107 | break; \ | ||
| 108 | case 2: \ | ||
| 109 | __get_user_x(2, __ret_gu, __val_gu, ptr); \ | ||
| 110 | break; \ | ||
| 111 | case 4: \ | ||
| 112 | __get_user_x(4, __ret_gu, __val_gu, ptr); \ | ||
| 113 | break; \ | ||
| 114 | case 8: \ | ||
| 115 | __get_user_x(8, __ret_gu, __val_gu, ptr); \ | ||
| 116 | break; \ | ||
| 117 | default: \ | ||
| 118 | __get_user_bad(); \ | ||
| 119 | break; \ | ||
| 120 | } \ | ||
| 121 | (x) = (__force typeof(*(ptr)))__val_gu; \ | ||
| 122 | __ret_gu; \ | ||
| 123 | }) | ||
| 124 | |||
| 125 | extern void __put_user_1(void); | ||
| 126 | extern void __put_user_2(void); | ||
| 127 | extern void __put_user_4(void); | ||
| 128 | extern void __put_user_8(void); | ||
| 129 | extern void __put_user_bad(void); | ||
| 130 | |||
| 131 | #define __put_user_x(size, ret, x, ptr) \ | ||
| 132 | asm volatile("call __put_user_" #size \ | ||
| 133 | :"=a" (ret) \ | ||
| 134 | :"c" (ptr),"d" (x) \ | ||
| 135 | :"r8") | ||
| 136 | |||
| 137 | #define put_user(x, ptr) \ | ||
| 138 | __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) | ||
| 139 | |||
| 140 | #define __get_user(x, ptr) \ | ||
| 141 | __get_user_nocheck((x), (ptr), sizeof(*(ptr))) | ||
| 142 | #define __put_user(x, ptr) \ | ||
| 143 | __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) | ||
| 144 | |||
| 145 | #define __get_user_unaligned __get_user | ||
| 146 | #define __put_user_unaligned __put_user | ||
| 147 | |||
| 148 | #define __put_user_nocheck(x, ptr, size) \ | ||
| 149 | ({ \ | ||
| 150 | int __pu_err; \ | ||
| 151 | __put_user_size((x), (ptr), (size), __pu_err); \ | ||
| 152 | __pu_err; \ | ||
| 153 | }) | ||
| 154 | |||
| 155 | |||
| 156 | #define __put_user_check(x, ptr, size) \ | ||
| 157 | ({ \ | ||
| 158 | int __pu_err; \ | ||
| 159 | typeof(*(ptr)) __user *__pu_addr = (ptr); \ | ||
| 160 | switch (size) { \ | ||
| 161 | case 1: \ | ||
| 162 | __put_user_x(1, __pu_err, x, __pu_addr); \ | ||
| 163 | break; \ | ||
| 164 | case 2: \ | ||
| 165 | __put_user_x(2, __pu_err, x, __pu_addr); \ | ||
| 166 | break; \ | ||
| 167 | case 4: \ | ||
| 168 | __put_user_x(4, __pu_err, x, __pu_addr); \ | ||
| 169 | break; \ | ||
| 170 | case 8: \ | ||
| 171 | __put_user_x(8, __pu_err, x, __pu_addr); \ | ||
| 172 | break; \ | ||
| 173 | default: \ | ||
| 174 | __put_user_bad(); \ | ||
| 175 | } \ | ||
| 176 | __pu_err; \ | ||
| 177 | }) | ||
| 178 | |||
| 179 | #define __put_user_size(x, ptr, size, retval) \ | ||
| 180 | do { \ | ||
| 181 | retval = 0; \ | ||
| 182 | __chk_user_ptr(ptr); \ | ||
| 183 | switch (size) { \ | ||
| 184 | case 1: \ | ||
| 185 | __put_user_asm(x, ptr, retval, "b", "b", "iq", -EFAULT);\ | ||
| 186 | break; \ | ||
| 187 | case 2: \ | ||
| 188 | __put_user_asm(x, ptr, retval, "w", "w", "ir", -EFAULT);\ | ||
| 189 | break; \ | ||
| 190 | case 4: \ | ||
| 191 | __put_user_asm(x, ptr, retval, "l", "k", "ir", -EFAULT);\ | ||
| 192 | break; \ | ||
| 193 | case 8: \ | ||
| 194 | __put_user_asm(x, ptr, retval, "q", "", "Zr", -EFAULT); \ | ||
| 195 | break; \ | ||
| 196 | default: \ | ||
| 197 | __put_user_bad(); \ | ||
| 198 | } \ | ||
| 199 | } while (0) | ||
| 200 | |||
| 201 | /* FIXME: this hack is definitely wrong -AK */ | ||
| 202 | struct __large_struct { unsigned long buf[100]; }; | ||
| 203 | #define __m(x) (*(struct __large_struct __user *)(x)) | ||
| 204 | |||
| 205 | /* | ||
| 206 | * Tell gcc we read from memory instead of writing: this is because | ||
| 207 | * we do not write to any memory gcc knows about, so there are no | ||
| 208 | * aliasing issues. | ||
| 209 | */ | ||
| 210 | #define __put_user_asm(x, addr, err, itype, rtype, ltype, errno) \ | ||
| 211 | asm volatile("1: mov"itype" %"rtype"1,%2\n" \ | ||
| 212 | "2:\n" \ | ||
| 213 | ".section .fixup, \"ax\"\n" \ | ||
| 214 | "3: mov %3,%0\n" \ | ||
| 215 | " jmp 2b\n" \ | ||
| 216 | ".previous\n" \ | ||
| 217 | _ASM_EXTABLE(1b, 3b) \ | ||
| 218 | : "=r"(err) \ | ||
| 219 | : ltype (x), "m" (__m(addr)), "i" (errno), "0" (err)) | ||
| 220 | |||
| 221 | |||
| 222 | #define __get_user_nocheck(x, ptr, size) \ | ||
| 223 | ({ \ | ||
| 224 | int __gu_err; \ | ||
| 225 | unsigned long __gu_val; \ | ||
| 226 | __get_user_size(__gu_val, (ptr), (size), __gu_err); \ | ||
| 227 | (x) = (__force typeof(*(ptr)))__gu_val; \ | ||
| 228 | __gu_err; \ | ||
| 229 | }) | ||
| 230 | |||
| 231 | extern int __get_user_1(void); | ||
| 232 | extern int __get_user_2(void); | ||
| 233 | extern int __get_user_4(void); | ||
| 234 | extern int __get_user_8(void); | ||
| 235 | extern int __get_user_bad(void); | ||
| 236 | |||
| 237 | #define __get_user_size(x, ptr, size, retval) \ | ||
| 238 | do { \ | ||
| 239 | retval = 0; \ | ||
| 240 | __chk_user_ptr(ptr); \ | ||
| 241 | switch (size) { \ | ||
| 242 | case 1: \ | ||
| 243 | __get_user_asm(x, ptr, retval, "b", "b", "=q", -EFAULT);\ | ||
| 244 | break; \ | ||
| 245 | case 2: \ | ||
| 246 | __get_user_asm(x, ptr, retval, "w", "w", "=r", -EFAULT);\ | ||
| 247 | break; \ | ||
| 248 | case 4: \ | ||
| 249 | __get_user_asm(x, ptr, retval, "l", "k", "=r", -EFAULT);\ | ||
| 250 | break; \ | ||
| 251 | case 8: \ | ||
| 252 | __get_user_asm(x, ptr, retval, "q", "", "=r", -EFAULT); \ | ||
| 253 | break; \ | ||
| 254 | default: \ | ||
| 255 | (x) = __get_user_bad(); \ | ||
| 256 | } \ | ||
| 257 | } while (0) | ||
| 258 | |||
| 259 | #define __get_user_asm(x, addr, err, itype, rtype, ltype, errno) \ | ||
| 260 | asm volatile("1: mov"itype" %2,%"rtype"1\n" \ | ||
| 261 | "2:\n" \ | ||
| 262 | ".section .fixup, \"ax\"\n" \ | ||
| 263 | "3: mov %3,%0\n" \ | ||
| 264 | " xor"itype" %"rtype"1,%"rtype"1\n" \ | ||
| 265 | " jmp 2b\n" \ | ||
| 266 | ".previous\n" \ | ||
| 267 | _ASM_EXTABLE(1b, 3b) \ | ||
| 268 | : "=r" (err), ltype (x) \ | ||
| 269 | : "m" (__m(addr)), "i"(errno), "0"(err)) | ||
| 270 | |||
| 271 | /* | 12 | /* |
| 272 | * Copy To/From Userspace | 13 | * Copy To/From Userspace |
| 273 | */ | 14 | */ |
| @@ -437,7 +178,6 @@ __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) | |||
| 437 | return copy_user_generic((__force void *)dst, src, size); | 178 | return copy_user_generic((__force void *)dst, src, size); |
| 438 | } | 179 | } |
| 439 | 180 | ||
| 440 | #define ARCH_HAS_NOCACHE_UACCESS 1 | ||
| 441 | extern long __copy_user_nocache(void *dst, const void __user *src, | 181 | extern long __copy_user_nocache(void *dst, const void __user *src, |
| 442 | unsigned size, int zerorest); | 182 | unsigned size, int zerorest); |
| 443 | 183 | ||
| @@ -455,4 +195,7 @@ static inline int __copy_from_user_inatomic_nocache(void *dst, | |||
| 455 | return __copy_user_nocache(dst, src, size, 0); | 195 | return __copy_user_nocache(dst, src, size, 0); |
| 456 | } | 196 | } |
| 457 | 197 | ||
| 198 | unsigned long | ||
| 199 | copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest); | ||
| 200 | |||
| 458 | #endif /* __X86_64_UACCESS_H */ | 201 | #endif /* __X86_64_UACCESS_H */ |
