diff options
Diffstat (limited to 'include/linux')
44 files changed, 1176 insertions, 351 deletions
diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 27b9350052b4..85b2482cc736 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild | |||
@@ -100,7 +100,6 @@ header-y += iso_fs.h | |||
100 | header-y += ixjuser.h | 100 | header-y += ixjuser.h |
101 | header-y += jffs2.h | 101 | header-y += jffs2.h |
102 | header-y += keyctl.h | 102 | header-y += keyctl.h |
103 | header-y += kvm.h | ||
104 | header-y += limits.h | 103 | header-y += limits.h |
105 | header-y += lock_dlm_plock.h | 104 | header-y += lock_dlm_plock.h |
106 | header-y += magic.h | 105 | header-y += magic.h |
@@ -256,6 +255,7 @@ unifdef-y += kd.h | |||
256 | unifdef-y += kernelcapi.h | 255 | unifdef-y += kernelcapi.h |
257 | unifdef-y += kernel.h | 256 | unifdef-y += kernel.h |
258 | unifdef-y += keyboard.h | 257 | unifdef-y += keyboard.h |
258 | unifdef-$(CONFIG_HAVE_KVM) += kvm.h | ||
259 | unifdef-y += llc.h | 259 | unifdef-y += llc.h |
260 | unifdef-y += loop.h | 260 | unifdef-y += loop.h |
261 | unifdef-y += lp.h | 261 | unifdef-y += lp.h |
diff --git a/include/linux/acpi_pmtmr.h b/include/linux/acpi_pmtmr.h index 1d0ef1ae8036..7e3d2859be50 100644 --- a/include/linux/acpi_pmtmr.h +++ b/include/linux/acpi_pmtmr.h | |||
@@ -25,6 +25,8 @@ static inline u32 acpi_pm_read_early(void) | |||
25 | return acpi_pm_read_verified() & ACPI_PM_MASK; | 25 | return acpi_pm_read_verified() & ACPI_PM_MASK; |
26 | } | 26 | } |
27 | 27 | ||
28 | extern void pmtimer_wait(unsigned); | ||
29 | |||
28 | #else | 30 | #else |
29 | 31 | ||
30 | static inline u32 acpi_pm_read_early(void) | 32 | static inline u32 acpi_pm_read_early(void) |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 71e7a847dffc..e18d4192f6e8 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -825,6 +825,7 @@ static inline void exit_io_context(void) | |||
825 | { | 825 | { |
826 | } | 826 | } |
827 | 827 | ||
828 | struct io_context; | ||
828 | static inline int put_io_context(struct io_context *ioc) | 829 | static inline int put_io_context(struct io_context *ioc) |
829 | { | 830 | { |
830 | return 1; | 831 | return 1; |
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 06dadba349ac..cfc3147e5cf9 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h | |||
@@ -282,10 +282,10 @@ static inline void blk_add_trace_remap(struct request_queue *q, struct bio *bio, | |||
282 | __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r); | 282 | __blk_add_trace(bt, from, bio->bi_size, bio->bi_rw, BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r); |
283 | } | 283 | } |
284 | 284 | ||
285 | extern int blk_trace_setup(request_queue_t *q, char *name, dev_t dev, | 285 | extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, |
286 | char __user *arg); | 286 | char __user *arg); |
287 | extern int blk_trace_startstop(request_queue_t *q, int start); | 287 | extern int blk_trace_startstop(struct request_queue *q, int start); |
288 | extern int blk_trace_remove(request_queue_t *q); | 288 | extern int blk_trace_remove(struct request_queue *q); |
289 | 289 | ||
290 | #else /* !CONFIG_BLK_DEV_IO_TRACE */ | 290 | #else /* !CONFIG_BLK_DEV_IO_TRACE */ |
291 | #define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) | 291 | #define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY) |
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index 107787aacb64..85778a4b1209 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h | |||
@@ -103,7 +103,7 @@ struct clocksource { | |||
103 | #define CLOCK_SOURCE_VALID_FOR_HRES 0x20 | 103 | #define CLOCK_SOURCE_VALID_FOR_HRES 0x20 |
104 | 104 | ||
105 | /* simplify initialization of mask field */ | 105 | /* simplify initialization of mask field */ |
106 | #define CLOCKSOURCE_MASK(bits) (cycle_t)(bits<64 ? ((1ULL<<bits)-1) : -1) | 106 | #define CLOCKSOURCE_MASK(bits) (cycle_t)((bits) < 64 ? ((1ULL<<(bits))-1) : -1) |
107 | 107 | ||
108 | /** | 108 | /** |
109 | * clocksource_khz2mult - calculates mult from khz and shift | 109 | * clocksource_khz2mult - calculates mult from khz and shift |
@@ -215,6 +215,7 @@ static inline void clocksource_calculate_interval(struct clocksource *c, | |||
215 | 215 | ||
216 | /* used to install a new clocksource */ | 216 | /* used to install a new clocksource */ |
217 | extern int clocksource_register(struct clocksource*); | 217 | extern int clocksource_register(struct clocksource*); |
218 | extern void clocksource_unregister(struct clocksource*); | ||
218 | extern struct clocksource* clocksource_get_next(void); | 219 | extern struct clocksource* clocksource_get_next(void); |
219 | extern void clocksource_change_rating(struct clocksource *cs, int rating); | 220 | extern void clocksource_change_rating(struct clocksource *cs, int rating); |
220 | extern void clocksource_resume(void); | 221 | extern void clocksource_resume(void); |
diff --git a/include/linux/compat.h b/include/linux/compat.h index 0e69d2cf14aa..d38655f2be70 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h | |||
@@ -191,6 +191,10 @@ asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, | |||
191 | compat_ulong_t __user *outp, compat_ulong_t __user *exp, | 191 | compat_ulong_t __user *outp, compat_ulong_t __user *exp, |
192 | struct compat_timeval __user *tvp); | 192 | struct compat_timeval __user *tvp); |
193 | 193 | ||
194 | asmlinkage long compat_sys_wait4(compat_pid_t pid, | ||
195 | compat_uint_t *stat_addr, int options, | ||
196 | struct compat_rusage *ru); | ||
197 | |||
194 | #define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t)) | 198 | #define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t)) |
195 | 199 | ||
196 | #define BITS_TO_COMPAT_LONGS(bits) \ | 200 | #define BITS_TO_COMPAT_LONGS(bits) \ |
@@ -239,6 +243,17 @@ asmlinkage long compat_sys_migrate_pages(compat_pid_t pid, | |||
239 | compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes, | 243 | compat_ulong_t maxnode, const compat_ulong_t __user *old_nodes, |
240 | const compat_ulong_t __user *new_nodes); | 244 | const compat_ulong_t __user *new_nodes); |
241 | 245 | ||
246 | extern int compat_ptrace_request(struct task_struct *child, | ||
247 | compat_long_t request, | ||
248 | compat_ulong_t addr, compat_ulong_t data); | ||
249 | |||
250 | #ifdef __ARCH_WANT_COMPAT_SYS_PTRACE | ||
251 | extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | ||
252 | compat_ulong_t addr, compat_ulong_t data); | ||
253 | asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, | ||
254 | compat_long_t addr, compat_long_t data); | ||
255 | #endif /* __ARCH_WANT_COMPAT_SYS_PTRACE */ | ||
256 | |||
242 | /* | 257 | /* |
243 | * epoll (fs/eventpoll.c) compat bits follow ... | 258 | * epoll (fs/eventpoll.c) compat bits follow ... |
244 | */ | 259 | */ |
diff --git a/include/linux/const.h b/include/linux/const.h index 07b300bfe34b..c22c707c455d 100644 --- a/include/linux/const.h +++ b/include/linux/const.h | |||
@@ -7,13 +7,18 @@ | |||
7 | * C code. Therefore we cannot annotate them always with | 7 | * C code. Therefore we cannot annotate them always with |
8 | * 'UL' and other type specifiers unilaterally. We | 8 | * 'UL' and other type specifiers unilaterally. We |
9 | * use the following macros to deal with this. | 9 | * use the following macros to deal with this. |
10 | * | ||
11 | * Similarly, _AT() will cast an expression with a type in C, but | ||
12 | * leave it unchanged in asm. | ||
10 | */ | 13 | */ |
11 | 14 | ||
12 | #ifdef __ASSEMBLY__ | 15 | #ifdef __ASSEMBLY__ |
13 | #define _AC(X,Y) X | 16 | #define _AC(X,Y) X |
17 | #define _AT(T,X) X | ||
14 | #else | 18 | #else |
15 | #define __AC(X,Y) (X##Y) | 19 | #define __AC(X,Y) (X##Y) |
16 | #define _AC(X,Y) __AC(X,Y) | 20 | #define _AC(X,Y) __AC(X,Y) |
21 | #define _AT(T,X) ((T)(X)) | ||
17 | #endif | 22 | #endif |
18 | 23 | ||
19 | #endif /* !(_LINUX_CONST_H) */ | 24 | #endif /* !(_LINUX_CONST_H) */ |
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 85bd790c201e..7047f58306a7 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h | |||
@@ -218,8 +218,8 @@ int __first_cpu(const cpumask_t *srcp); | |||
218 | int __next_cpu(int n, const cpumask_t *srcp); | 218 | int __next_cpu(int n, const cpumask_t *srcp); |
219 | #define next_cpu(n, src) __next_cpu((n), &(src)) | 219 | #define next_cpu(n, src) __next_cpu((n), &(src)) |
220 | #else | 220 | #else |
221 | #define first_cpu(src) 0 | 221 | #define first_cpu(src) ({ (void)(src); 0; }) |
222 | #define next_cpu(n, src) 1 | 222 | #define next_cpu(n, src) ({ (void)(src); 1; }) |
223 | #endif | 223 | #endif |
224 | 224 | ||
225 | #define cpumask_of_cpu(cpu) \ | 225 | #define cpumask_of_cpu(cpu) \ |
diff --git a/include/linux/device.h b/include/linux/device.h index 1880208964d6..db375be333c7 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
@@ -84,6 +84,9 @@ int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data, | |||
84 | struct device *bus_find_device(struct bus_type *bus, struct device *start, | 84 | struct device *bus_find_device(struct bus_type *bus, struct device *start, |
85 | void *data, | 85 | void *data, |
86 | int (*match)(struct device *dev, void *data)); | 86 | int (*match)(struct device *dev, void *data)); |
87 | struct device *bus_find_device_by_name(struct bus_type *bus, | ||
88 | struct device *start, | ||
89 | const char *name); | ||
87 | 90 | ||
88 | int __must_check bus_for_each_drv(struct bus_type *bus, | 91 | int __must_check bus_for_each_drv(struct bus_type *bus, |
89 | struct device_driver *start, void *data, | 92 | struct device_driver *start, void *data, |
diff --git a/include/linux/elf.h b/include/linux/elf.h index 576e83bd6d88..7ceb24d87c1a 100644 --- a/include/linux/elf.h +++ b/include/linux/elf.h | |||
@@ -355,6 +355,7 @@ typedef struct elf64_shdr { | |||
355 | #define NT_AUXV 6 | 355 | #define NT_AUXV 6 |
356 | #define NT_PRXFPREG 0x46e62b7f /* copied from gdb5.1/include/elf/common.h */ | 356 | #define NT_PRXFPREG 0x46e62b7f /* copied from gdb5.1/include/elf/common.h */ |
357 | #define NT_PPC_VMX 0x100 /* PowerPC Altivec/VMX registers */ | 357 | #define NT_PPC_VMX 0x100 /* PowerPC Altivec/VMX registers */ |
358 | #define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */ | ||
358 | 359 | ||
359 | 360 | ||
360 | /* Note header in a PT_NOTE section */ | 361 | /* Note header in a PT_NOTE section */ |
diff --git a/include/linux/hpet.h b/include/linux/hpet.h index 707f7cb9e795..9cd94bfd07e5 100644 --- a/include/linux/hpet.h +++ b/include/linux/hpet.h | |||
@@ -64,7 +64,7 @@ struct hpet { | |||
64 | */ | 64 | */ |
65 | 65 | ||
66 | #define Tn_INT_ROUTE_CAP_MASK (0xffffffff00000000ULL) | 66 | #define Tn_INT_ROUTE_CAP_MASK (0xffffffff00000000ULL) |
67 | #define Tn_INI_ROUTE_CAP_SHIFT (32UL) | 67 | #define Tn_INT_ROUTE_CAP_SHIFT (32UL) |
68 | #define Tn_FSB_INT_DELCAP_MASK (0x8000UL) | 68 | #define Tn_FSB_INT_DELCAP_MASK (0x8000UL) |
69 | #define Tn_FSB_INT_DELCAP_SHIFT (15) | 69 | #define Tn_FSB_INT_DELCAP_SHIFT (15) |
70 | #define Tn_FSB_EN_CNF_MASK (0x4000UL) | 70 | #define Tn_FSB_EN_CNF_MASK (0x4000UL) |
@@ -115,9 +115,6 @@ static inline void hpet_reserve_timer(struct hpet_data *hd, int timer) | |||
115 | } | 115 | } |
116 | 116 | ||
117 | int hpet_alloc(struct hpet_data *); | 117 | int hpet_alloc(struct hpet_data *); |
118 | int hpet_register(struct hpet_task *, int); | ||
119 | int hpet_unregister(struct hpet_task *); | ||
120 | int hpet_control(struct hpet_task *, unsigned int, unsigned long); | ||
121 | 118 | ||
122 | #endif /* __KERNEL__ */ | 119 | #endif /* __KERNEL__ */ |
123 | 120 | ||
diff --git a/include/linux/init_ohci1394_dma.h b/include/linux/init_ohci1394_dma.h new file mode 100644 index 000000000000..3c03a4bba5e4 --- /dev/null +++ b/include/linux/init_ohci1394_dma.h | |||
@@ -0,0 +1,4 @@ | |||
1 | #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT | ||
2 | extern int __initdata init_ohci1394_dma_early; | ||
3 | extern void __init init_ohci1394_dma_on_all_controllers(void); | ||
4 | #endif | ||
diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 6187a8567bc7..605d237364d2 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h | |||
@@ -8,6 +8,7 @@ | |||
8 | #ifndef _LINUX_IOPORT_H | 8 | #ifndef _LINUX_IOPORT_H |
9 | #define _LINUX_IOPORT_H | 9 | #define _LINUX_IOPORT_H |
10 | 10 | ||
11 | #ifndef __ASSEMBLY__ | ||
11 | #include <linux/compiler.h> | 12 | #include <linux/compiler.h> |
12 | #include <linux/types.h> | 13 | #include <linux/types.h> |
13 | /* | 14 | /* |
@@ -153,4 +154,5 @@ extern struct resource * __devm_request_region(struct device *dev, | |||
153 | extern void __devm_release_region(struct device *dev, struct resource *parent, | 154 | extern void __devm_release_region(struct device *dev, struct resource *parent, |
154 | resource_size_t start, resource_size_t n); | 155 | resource_size_t start, resource_size_t n); |
155 | 156 | ||
157 | #endif /* __ASSEMBLY__ */ | ||
156 | #endif /* _LINUX_IOPORT_H */ | 158 | #endif /* _LINUX_IOPORT_H */ |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index a7283c9beadf..ff356b2ee478 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -194,6 +194,9 @@ static inline int log_buf_read(int idx) { return 0; } | |||
194 | static inline int log_buf_copy(char *dest, int idx, int len) { return 0; } | 194 | static inline int log_buf_copy(char *dest, int idx, int len) { return 0; } |
195 | #endif | 195 | #endif |
196 | 196 | ||
197 | extern void __attribute__((format(printf, 1, 2))) | ||
198 | early_printk(const char *fmt, ...); | ||
199 | |||
197 | unsigned long int_sqrt(unsigned long); | 200 | unsigned long int_sqrt(unsigned long); |
198 | 201 | ||
199 | extern int printk_ratelimit(void); | 202 | extern int printk_ratelimit(void); |
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 81891581e89b..6168c0a44172 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h | |||
@@ -182,6 +182,15 @@ static inline void kretprobe_assert(struct kretprobe_instance *ri, | |||
182 | } | 182 | } |
183 | } | 183 | } |
184 | 184 | ||
185 | #ifdef CONFIG_KPROBES_SANITY_TEST | ||
186 | extern int init_test_probes(void); | ||
187 | #else | ||
188 | static inline int init_test_probes(void) | ||
189 | { | ||
190 | return 0; | ||
191 | } | ||
192 | #endif /* CONFIG_KPROBES_SANITY_TEST */ | ||
193 | |||
185 | extern spinlock_t kretprobe_lock; | 194 | extern spinlock_t kretprobe_lock; |
186 | extern struct mutex kprobe_mutex; | 195 | extern struct mutex kprobe_mutex; |
187 | extern int arch_prepare_kprobe(struct kprobe *p); | 196 | extern int arch_prepare_kprobe(struct kprobe *p); |
@@ -227,6 +236,7 @@ void unregister_kretprobe(struct kretprobe *rp); | |||
227 | 236 | ||
228 | void kprobe_flush_task(struct task_struct *tk); | 237 | void kprobe_flush_task(struct task_struct *tk); |
229 | void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head); | 238 | void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head); |
239 | |||
230 | #else /* CONFIG_KPROBES */ | 240 | #else /* CONFIG_KPROBES */ |
231 | 241 | ||
232 | #define __kprobes /**/ | 242 | #define __kprobes /**/ |
diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 057a7f34ee36..4de4fd2d8607 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h | |||
@@ -9,12 +9,10 @@ | |||
9 | 9 | ||
10 | #include <asm/types.h> | 10 | #include <asm/types.h> |
11 | #include <linux/ioctl.h> | 11 | #include <linux/ioctl.h> |
12 | #include <asm/kvm.h> | ||
12 | 13 | ||
13 | #define KVM_API_VERSION 12 | 14 | #define KVM_API_VERSION 12 |
14 | 15 | ||
15 | /* Architectural interrupt line count. */ | ||
16 | #define KVM_NR_INTERRUPTS 256 | ||
17 | |||
18 | /* for KVM_CREATE_MEMORY_REGION */ | 16 | /* for KVM_CREATE_MEMORY_REGION */ |
19 | struct kvm_memory_region { | 17 | struct kvm_memory_region { |
20 | __u32 slot; | 18 | __u32 slot; |
@@ -23,17 +21,19 @@ struct kvm_memory_region { | |||
23 | __u64 memory_size; /* bytes */ | 21 | __u64 memory_size; /* bytes */ |
24 | }; | 22 | }; |
25 | 23 | ||
26 | /* for kvm_memory_region::flags */ | 24 | /* for KVM_SET_USER_MEMORY_REGION */ |
27 | #define KVM_MEM_LOG_DIRTY_PAGES 1UL | 25 | struct kvm_userspace_memory_region { |
28 | 26 | __u32 slot; | |
29 | struct kvm_memory_alias { | ||
30 | __u32 slot; /* this has a different namespace than memory slots */ | ||
31 | __u32 flags; | 27 | __u32 flags; |
32 | __u64 guest_phys_addr; | 28 | __u64 guest_phys_addr; |
33 | __u64 memory_size; | 29 | __u64 memory_size; /* bytes */ |
34 | __u64 target_phys_addr; | 30 | __u64 userspace_addr; /* start of the userspace allocated memory */ |
35 | }; | 31 | }; |
36 | 32 | ||
33 | /* for kvm_memory_region::flags */ | ||
34 | #define KVM_MEM_LOG_DIRTY_PAGES 1UL | ||
35 | |||
36 | |||
37 | /* for KVM_IRQ_LINE */ | 37 | /* for KVM_IRQ_LINE */ |
38 | struct kvm_irq_level { | 38 | struct kvm_irq_level { |
39 | /* | 39 | /* |
@@ -45,62 +45,18 @@ struct kvm_irq_level { | |||
45 | __u32 level; | 45 | __u32 level; |
46 | }; | 46 | }; |
47 | 47 | ||
48 | /* for KVM_GET_IRQCHIP and KVM_SET_IRQCHIP */ | ||
49 | struct kvm_pic_state { | ||
50 | __u8 last_irr; /* edge detection */ | ||
51 | __u8 irr; /* interrupt request register */ | ||
52 | __u8 imr; /* interrupt mask register */ | ||
53 | __u8 isr; /* interrupt service register */ | ||
54 | __u8 priority_add; /* highest irq priority */ | ||
55 | __u8 irq_base; | ||
56 | __u8 read_reg_select; | ||
57 | __u8 poll; | ||
58 | __u8 special_mask; | ||
59 | __u8 init_state; | ||
60 | __u8 auto_eoi; | ||
61 | __u8 rotate_on_auto_eoi; | ||
62 | __u8 special_fully_nested_mode; | ||
63 | __u8 init4; /* true if 4 byte init */ | ||
64 | __u8 elcr; /* PIIX edge/trigger selection */ | ||
65 | __u8 elcr_mask; | ||
66 | }; | ||
67 | |||
68 | #define KVM_IOAPIC_NUM_PINS 24 | ||
69 | struct kvm_ioapic_state { | ||
70 | __u64 base_address; | ||
71 | __u32 ioregsel; | ||
72 | __u32 id; | ||
73 | __u32 irr; | ||
74 | __u32 pad; | ||
75 | union { | ||
76 | __u64 bits; | ||
77 | struct { | ||
78 | __u8 vector; | ||
79 | __u8 delivery_mode:3; | ||
80 | __u8 dest_mode:1; | ||
81 | __u8 delivery_status:1; | ||
82 | __u8 polarity:1; | ||
83 | __u8 remote_irr:1; | ||
84 | __u8 trig_mode:1; | ||
85 | __u8 mask:1; | ||
86 | __u8 reserve:7; | ||
87 | __u8 reserved[4]; | ||
88 | __u8 dest_id; | ||
89 | } fields; | ||
90 | } redirtbl[KVM_IOAPIC_NUM_PINS]; | ||
91 | }; | ||
92 | |||
93 | #define KVM_IRQCHIP_PIC_MASTER 0 | ||
94 | #define KVM_IRQCHIP_PIC_SLAVE 1 | ||
95 | #define KVM_IRQCHIP_IOAPIC 2 | ||
96 | 48 | ||
97 | struct kvm_irqchip { | 49 | struct kvm_irqchip { |
98 | __u32 chip_id; | 50 | __u32 chip_id; |
99 | __u32 pad; | 51 | __u32 pad; |
100 | union { | 52 | union { |
101 | char dummy[512]; /* reserving space */ | 53 | char dummy[512]; /* reserving space */ |
54 | #ifdef CONFIG_X86 | ||
102 | struct kvm_pic_state pic; | 55 | struct kvm_pic_state pic; |
56 | #endif | ||
57 | #if defined(CONFIG_X86) || defined(CONFIG_IA64) | ||
103 | struct kvm_ioapic_state ioapic; | 58 | struct kvm_ioapic_state ioapic; |
59 | #endif | ||
104 | } chip; | 60 | } chip; |
105 | }; | 61 | }; |
106 | 62 | ||
@@ -116,6 +72,7 @@ struct kvm_irqchip { | |||
116 | #define KVM_EXIT_FAIL_ENTRY 9 | 72 | #define KVM_EXIT_FAIL_ENTRY 9 |
117 | #define KVM_EXIT_INTR 10 | 73 | #define KVM_EXIT_INTR 10 |
118 | #define KVM_EXIT_SET_TPR 11 | 74 | #define KVM_EXIT_SET_TPR 11 |
75 | #define KVM_EXIT_TPR_ACCESS 12 | ||
119 | 76 | ||
120 | /* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */ | 77 | /* for KVM_RUN, returned by mmap(vcpu_fd, offset=0) */ |
121 | struct kvm_run { | 78 | struct kvm_run { |
@@ -174,90 +131,17 @@ struct kvm_run { | |||
174 | __u32 longmode; | 131 | __u32 longmode; |
175 | __u32 pad; | 132 | __u32 pad; |
176 | } hypercall; | 133 | } hypercall; |
134 | /* KVM_EXIT_TPR_ACCESS */ | ||
135 | struct { | ||
136 | __u64 rip; | ||
137 | __u32 is_write; | ||
138 | __u32 pad; | ||
139 | } tpr_access; | ||
177 | /* Fix the size of the union. */ | 140 | /* Fix the size of the union. */ |
178 | char padding[256]; | 141 | char padding[256]; |
179 | }; | 142 | }; |
180 | }; | 143 | }; |
181 | 144 | ||
182 | /* for KVM_GET_REGS and KVM_SET_REGS */ | ||
183 | struct kvm_regs { | ||
184 | /* out (KVM_GET_REGS) / in (KVM_SET_REGS) */ | ||
185 | __u64 rax, rbx, rcx, rdx; | ||
186 | __u64 rsi, rdi, rsp, rbp; | ||
187 | __u64 r8, r9, r10, r11; | ||
188 | __u64 r12, r13, r14, r15; | ||
189 | __u64 rip, rflags; | ||
190 | }; | ||
191 | |||
192 | /* for KVM_GET_FPU and KVM_SET_FPU */ | ||
193 | struct kvm_fpu { | ||
194 | __u8 fpr[8][16]; | ||
195 | __u16 fcw; | ||
196 | __u16 fsw; | ||
197 | __u8 ftwx; /* in fxsave format */ | ||
198 | __u8 pad1; | ||
199 | __u16 last_opcode; | ||
200 | __u64 last_ip; | ||
201 | __u64 last_dp; | ||
202 | __u8 xmm[16][16]; | ||
203 | __u32 mxcsr; | ||
204 | __u32 pad2; | ||
205 | }; | ||
206 | |||
207 | /* for KVM_GET_LAPIC and KVM_SET_LAPIC */ | ||
208 | #define KVM_APIC_REG_SIZE 0x400 | ||
209 | struct kvm_lapic_state { | ||
210 | char regs[KVM_APIC_REG_SIZE]; | ||
211 | }; | ||
212 | |||
213 | struct kvm_segment { | ||
214 | __u64 base; | ||
215 | __u32 limit; | ||
216 | __u16 selector; | ||
217 | __u8 type; | ||
218 | __u8 present, dpl, db, s, l, g, avl; | ||
219 | __u8 unusable; | ||
220 | __u8 padding; | ||
221 | }; | ||
222 | |||
223 | struct kvm_dtable { | ||
224 | __u64 base; | ||
225 | __u16 limit; | ||
226 | __u16 padding[3]; | ||
227 | }; | ||
228 | |||
229 | /* for KVM_GET_SREGS and KVM_SET_SREGS */ | ||
230 | struct kvm_sregs { | ||
231 | /* out (KVM_GET_SREGS) / in (KVM_SET_SREGS) */ | ||
232 | struct kvm_segment cs, ds, es, fs, gs, ss; | ||
233 | struct kvm_segment tr, ldt; | ||
234 | struct kvm_dtable gdt, idt; | ||
235 | __u64 cr0, cr2, cr3, cr4, cr8; | ||
236 | __u64 efer; | ||
237 | __u64 apic_base; | ||
238 | __u64 interrupt_bitmap[(KVM_NR_INTERRUPTS + 63) / 64]; | ||
239 | }; | ||
240 | |||
241 | struct kvm_msr_entry { | ||
242 | __u32 index; | ||
243 | __u32 reserved; | ||
244 | __u64 data; | ||
245 | }; | ||
246 | |||
247 | /* for KVM_GET_MSRS and KVM_SET_MSRS */ | ||
248 | struct kvm_msrs { | ||
249 | __u32 nmsrs; /* number of msrs in entries */ | ||
250 | __u32 pad; | ||
251 | |||
252 | struct kvm_msr_entry entries[0]; | ||
253 | }; | ||
254 | |||
255 | /* for KVM_GET_MSR_INDEX_LIST */ | ||
256 | struct kvm_msr_list { | ||
257 | __u32 nmsrs; /* number of msrs in entries */ | ||
258 | __u32 indices[0]; | ||
259 | }; | ||
260 | |||
261 | /* for KVM_TRANSLATE */ | 145 | /* for KVM_TRANSLATE */ |
262 | struct kvm_translation { | 146 | struct kvm_translation { |
263 | /* in */ | 147 | /* in */ |
@@ -302,28 +186,24 @@ struct kvm_dirty_log { | |||
302 | }; | 186 | }; |
303 | }; | 187 | }; |
304 | 188 | ||
305 | struct kvm_cpuid_entry { | ||
306 | __u32 function; | ||
307 | __u32 eax; | ||
308 | __u32 ebx; | ||
309 | __u32 ecx; | ||
310 | __u32 edx; | ||
311 | __u32 padding; | ||
312 | }; | ||
313 | |||
314 | /* for KVM_SET_CPUID */ | ||
315 | struct kvm_cpuid { | ||
316 | __u32 nent; | ||
317 | __u32 padding; | ||
318 | struct kvm_cpuid_entry entries[0]; | ||
319 | }; | ||
320 | |||
321 | /* for KVM_SET_SIGNAL_MASK */ | 189 | /* for KVM_SET_SIGNAL_MASK */ |
322 | struct kvm_signal_mask { | 190 | struct kvm_signal_mask { |
323 | __u32 len; | 191 | __u32 len; |
324 | __u8 sigset[0]; | 192 | __u8 sigset[0]; |
325 | }; | 193 | }; |
326 | 194 | ||
195 | /* for KVM_TPR_ACCESS_REPORTING */ | ||
196 | struct kvm_tpr_access_ctl { | ||
197 | __u32 enabled; | ||
198 | __u32 flags; | ||
199 | __u32 reserved[8]; | ||
200 | }; | ||
201 | |||
202 | /* for KVM_SET_VAPIC_ADDR */ | ||
203 | struct kvm_vapic_addr { | ||
204 | __u64 vapic_addr; | ||
205 | }; | ||
206 | |||
327 | #define KVMIO 0xAE | 207 | #define KVMIO 0xAE |
328 | 208 | ||
329 | /* | 209 | /* |
@@ -347,11 +227,21 @@ struct kvm_signal_mask { | |||
347 | */ | 227 | */ |
348 | #define KVM_CAP_IRQCHIP 0 | 228 | #define KVM_CAP_IRQCHIP 0 |
349 | #define KVM_CAP_HLT 1 | 229 | #define KVM_CAP_HLT 1 |
230 | #define KVM_CAP_MMU_SHADOW_CACHE_CONTROL 2 | ||
231 | #define KVM_CAP_USER_MEMORY 3 | ||
232 | #define KVM_CAP_SET_TSS_ADDR 4 | ||
233 | #define KVM_CAP_EXT_CPUID 5 | ||
234 | #define KVM_CAP_VAPIC 6 | ||
350 | 235 | ||
351 | /* | 236 | /* |
352 | * ioctls for VM fds | 237 | * ioctls for VM fds |
353 | */ | 238 | */ |
354 | #define KVM_SET_MEMORY_REGION _IOW(KVMIO, 0x40, struct kvm_memory_region) | 239 | #define KVM_SET_MEMORY_REGION _IOW(KVMIO, 0x40, struct kvm_memory_region) |
240 | #define KVM_SET_NR_MMU_PAGES _IO(KVMIO, 0x44) | ||
241 | #define KVM_GET_NR_MMU_PAGES _IO(KVMIO, 0x45) | ||
242 | #define KVM_SET_USER_MEMORY_REGION _IOW(KVMIO, 0x46,\ | ||
243 | struct kvm_userspace_memory_region) | ||
244 | #define KVM_SET_TSS_ADDR _IO(KVMIO, 0x47) | ||
355 | /* | 245 | /* |
356 | * KVM_CREATE_VCPU receives as a parameter the vcpu slot, and returns | 246 | * KVM_CREATE_VCPU receives as a parameter the vcpu slot, and returns |
357 | * a vcpu fd. | 247 | * a vcpu fd. |
@@ -359,6 +249,7 @@ struct kvm_signal_mask { | |||
359 | #define KVM_CREATE_VCPU _IO(KVMIO, 0x41) | 249 | #define KVM_CREATE_VCPU _IO(KVMIO, 0x41) |
360 | #define KVM_GET_DIRTY_LOG _IOW(KVMIO, 0x42, struct kvm_dirty_log) | 250 | #define KVM_GET_DIRTY_LOG _IOW(KVMIO, 0x42, struct kvm_dirty_log) |
361 | #define KVM_SET_MEMORY_ALIAS _IOW(KVMIO, 0x43, struct kvm_memory_alias) | 251 | #define KVM_SET_MEMORY_ALIAS _IOW(KVMIO, 0x43, struct kvm_memory_alias) |
252 | #define KVM_GET_SUPPORTED_CPUID _IOWR(KVMIO, 0x48, struct kvm_cpuid2) | ||
362 | /* Device model IOC */ | 253 | /* Device model IOC */ |
363 | #define KVM_CREATE_IRQCHIP _IO(KVMIO, 0x60) | 254 | #define KVM_CREATE_IRQCHIP _IO(KVMIO, 0x60) |
364 | #define KVM_IRQ_LINE _IOW(KVMIO, 0x61, struct kvm_irq_level) | 255 | #define KVM_IRQ_LINE _IOW(KVMIO, 0x61, struct kvm_irq_level) |
@@ -384,5 +275,11 @@ struct kvm_signal_mask { | |||
384 | #define KVM_SET_FPU _IOW(KVMIO, 0x8d, struct kvm_fpu) | 275 | #define KVM_SET_FPU _IOW(KVMIO, 0x8d, struct kvm_fpu) |
385 | #define KVM_GET_LAPIC _IOR(KVMIO, 0x8e, struct kvm_lapic_state) | 276 | #define KVM_GET_LAPIC _IOR(KVMIO, 0x8e, struct kvm_lapic_state) |
386 | #define KVM_SET_LAPIC _IOW(KVMIO, 0x8f, struct kvm_lapic_state) | 277 | #define KVM_SET_LAPIC _IOW(KVMIO, 0x8f, struct kvm_lapic_state) |
278 | #define KVM_SET_CPUID2 _IOW(KVMIO, 0x90, struct kvm_cpuid2) | ||
279 | #define KVM_GET_CPUID2 _IOWR(KVMIO, 0x91, struct kvm_cpuid2) | ||
280 | /* Available with KVM_CAP_VAPIC */ | ||
281 | #define KVM_TPR_ACCESS_REPORTING _IOWR(KVMIO, 0x92, struct kvm_tpr_access_ctl) | ||
282 | /* Available with KVM_CAP_VAPIC */ | ||
283 | #define KVM_SET_VAPIC_ADDR _IOW(KVMIO, 0x93, struct kvm_vapic_addr) | ||
387 | 284 | ||
388 | #endif | 285 | #endif |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h new file mode 100644 index 000000000000..ea4764b0a2f4 --- /dev/null +++ b/include/linux/kvm_host.h | |||
@@ -0,0 +1,299 @@ | |||
1 | #ifndef __KVM_HOST_H | ||
2 | #define __KVM_HOST_H | ||
3 | |||
4 | /* | ||
5 | * This work is licensed under the terms of the GNU GPL, version 2. See | ||
6 | * the COPYING file in the top-level directory. | ||
7 | */ | ||
8 | |||
9 | #include <linux/types.h> | ||
10 | #include <linux/hardirq.h> | ||
11 | #include <linux/list.h> | ||
12 | #include <linux/mutex.h> | ||
13 | #include <linux/spinlock.h> | ||
14 | #include <linux/signal.h> | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/mm.h> | ||
17 | #include <linux/preempt.h> | ||
18 | #include <asm/signal.h> | ||
19 | |||
20 | #include <linux/kvm.h> | ||
21 | #include <linux/kvm_para.h> | ||
22 | |||
23 | #include <linux/kvm_types.h> | ||
24 | |||
25 | #include <asm/kvm_host.h> | ||
26 | |||
27 | #define KVM_MAX_VCPUS 4 | ||
28 | #define KVM_MEMORY_SLOTS 8 | ||
29 | /* memory slots that does not exposed to userspace */ | ||
30 | #define KVM_PRIVATE_MEM_SLOTS 4 | ||
31 | |||
32 | #define KVM_PIO_PAGE_OFFSET 1 | ||
33 | |||
34 | /* | ||
35 | * vcpu->requests bit members | ||
36 | */ | ||
37 | #define KVM_REQ_TLB_FLUSH 0 | ||
38 | #define KVM_REQ_MIGRATE_TIMER 1 | ||
39 | #define KVM_REQ_REPORT_TPR_ACCESS 2 | ||
40 | |||
41 | struct kvm_vcpu; | ||
42 | extern struct kmem_cache *kvm_vcpu_cache; | ||
43 | |||
44 | struct kvm_guest_debug { | ||
45 | int enabled; | ||
46 | unsigned long bp[4]; | ||
47 | int singlestep; | ||
48 | }; | ||
49 | |||
50 | /* | ||
51 | * It would be nice to use something smarter than a linear search, TBD... | ||
52 | * Thankfully we dont expect many devices to register (famous last words :), | ||
53 | * so until then it will suffice. At least its abstracted so we can change | ||
54 | * in one place. | ||
55 | */ | ||
56 | struct kvm_io_bus { | ||
57 | int dev_count; | ||
58 | #define NR_IOBUS_DEVS 6 | ||
59 | struct kvm_io_device *devs[NR_IOBUS_DEVS]; | ||
60 | }; | ||
61 | |||
62 | void kvm_io_bus_init(struct kvm_io_bus *bus); | ||
63 | void kvm_io_bus_destroy(struct kvm_io_bus *bus); | ||
64 | struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus, gpa_t addr); | ||
65 | void kvm_io_bus_register_dev(struct kvm_io_bus *bus, | ||
66 | struct kvm_io_device *dev); | ||
67 | |||
68 | struct kvm_vcpu { | ||
69 | struct kvm *kvm; | ||
70 | struct preempt_notifier preempt_notifier; | ||
71 | int vcpu_id; | ||
72 | struct mutex mutex; | ||
73 | int cpu; | ||
74 | struct kvm_run *run; | ||
75 | int guest_mode; | ||
76 | unsigned long requests; | ||
77 | struct kvm_guest_debug guest_debug; | ||
78 | int fpu_active; | ||
79 | int guest_fpu_loaded; | ||
80 | wait_queue_head_t wq; | ||
81 | int sigset_active; | ||
82 | sigset_t sigset; | ||
83 | struct kvm_vcpu_stat stat; | ||
84 | |||
85 | #ifdef CONFIG_HAS_IOMEM | ||
86 | int mmio_needed; | ||
87 | int mmio_read_completed; | ||
88 | int mmio_is_write; | ||
89 | int mmio_size; | ||
90 | unsigned char mmio_data[8]; | ||
91 | gpa_t mmio_phys_addr; | ||
92 | #endif | ||
93 | |||
94 | struct kvm_vcpu_arch arch; | ||
95 | }; | ||
96 | |||
97 | struct kvm_memory_slot { | ||
98 | gfn_t base_gfn; | ||
99 | unsigned long npages; | ||
100 | unsigned long flags; | ||
101 | unsigned long *rmap; | ||
102 | unsigned long *dirty_bitmap; | ||
103 | unsigned long userspace_addr; | ||
104 | int user_alloc; | ||
105 | }; | ||
106 | |||
107 | struct kvm { | ||
108 | struct mutex lock; /* protects the vcpus array and APIC accesses */ | ||
109 | spinlock_t mmu_lock; | ||
110 | struct mm_struct *mm; /* userspace tied to this vm */ | ||
111 | int nmemslots; | ||
112 | struct kvm_memory_slot memslots[KVM_MEMORY_SLOTS + | ||
113 | KVM_PRIVATE_MEM_SLOTS]; | ||
114 | struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; | ||
115 | struct list_head vm_list; | ||
116 | struct file *filp; | ||
117 | struct kvm_io_bus mmio_bus; | ||
118 | struct kvm_io_bus pio_bus; | ||
119 | struct kvm_vm_stat stat; | ||
120 | struct kvm_arch arch; | ||
121 | }; | ||
122 | |||
123 | /* The guest did something we don't support. */ | ||
124 | #define pr_unimpl(vcpu, fmt, ...) \ | ||
125 | do { \ | ||
126 | if (printk_ratelimit()) \ | ||
127 | printk(KERN_ERR "kvm: %i: cpu%i " fmt, \ | ||
128 | current->tgid, (vcpu)->vcpu_id , ## __VA_ARGS__); \ | ||
129 | } while (0) | ||
130 | |||
131 | #define kvm_printf(kvm, fmt ...) printk(KERN_DEBUG fmt) | ||
132 | #define vcpu_printf(vcpu, fmt...) kvm_printf(vcpu->kvm, fmt) | ||
133 | |||
134 | int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); | ||
135 | void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); | ||
136 | |||
137 | void vcpu_load(struct kvm_vcpu *vcpu); | ||
138 | void vcpu_put(struct kvm_vcpu *vcpu); | ||
139 | |||
140 | void decache_vcpus_on_cpu(int cpu); | ||
141 | |||
142 | |||
143 | int kvm_init(void *opaque, unsigned int vcpu_size, | ||
144 | struct module *module); | ||
145 | void kvm_exit(void); | ||
146 | |||
147 | #define HPA_MSB ((sizeof(hpa_t) * 8) - 1) | ||
148 | #define HPA_ERR_MASK ((hpa_t)1 << HPA_MSB) | ||
149 | static inline int is_error_hpa(hpa_t hpa) { return hpa >> HPA_MSB; } | ||
150 | struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva); | ||
151 | |||
152 | extern struct page *bad_page; | ||
153 | |||
154 | int is_error_page(struct page *page); | ||
155 | int kvm_is_error_hva(unsigned long addr); | ||
156 | int kvm_set_memory_region(struct kvm *kvm, | ||
157 | struct kvm_userspace_memory_region *mem, | ||
158 | int user_alloc); | ||
159 | int __kvm_set_memory_region(struct kvm *kvm, | ||
160 | struct kvm_userspace_memory_region *mem, | ||
161 | int user_alloc); | ||
162 | int kvm_arch_set_memory_region(struct kvm *kvm, | ||
163 | struct kvm_userspace_memory_region *mem, | ||
164 | struct kvm_memory_slot old, | ||
165 | int user_alloc); | ||
166 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn); | ||
167 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); | ||
168 | void kvm_release_page_clean(struct page *page); | ||
169 | void kvm_release_page_dirty(struct page *page); | ||
170 | int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, | ||
171 | int len); | ||
172 | int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, | ||
173 | unsigned long len); | ||
174 | int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); | ||
175 | int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, | ||
176 | int offset, int len); | ||
177 | int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, | ||
178 | unsigned long len); | ||
179 | int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); | ||
180 | int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); | ||
181 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); | ||
182 | int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); | ||
183 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn); | ||
184 | |||
185 | void kvm_vcpu_block(struct kvm_vcpu *vcpu); | ||
186 | void kvm_resched(struct kvm_vcpu *vcpu); | ||
187 | void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); | ||
188 | void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); | ||
189 | void kvm_flush_remote_tlbs(struct kvm *kvm); | ||
190 | |||
191 | long kvm_arch_dev_ioctl(struct file *filp, | ||
192 | unsigned int ioctl, unsigned long arg); | ||
193 | long kvm_arch_vcpu_ioctl(struct file *filp, | ||
194 | unsigned int ioctl, unsigned long arg); | ||
195 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); | ||
196 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); | ||
197 | |||
198 | int kvm_dev_ioctl_check_extension(long ext); | ||
199 | |||
200 | int kvm_get_dirty_log(struct kvm *kvm, | ||
201 | struct kvm_dirty_log *log, int *is_dirty); | ||
202 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | ||
203 | struct kvm_dirty_log *log); | ||
204 | |||
205 | int kvm_vm_ioctl_set_memory_region(struct kvm *kvm, | ||
206 | struct | ||
207 | kvm_userspace_memory_region *mem, | ||
208 | int user_alloc); | ||
209 | long kvm_arch_vm_ioctl(struct file *filp, | ||
210 | unsigned int ioctl, unsigned long arg); | ||
211 | void kvm_arch_destroy_vm(struct kvm *kvm); | ||
212 | |||
213 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); | ||
214 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); | ||
215 | |||
216 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | ||
217 | struct kvm_translation *tr); | ||
218 | |||
219 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); | ||
220 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); | ||
221 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | ||
222 | struct kvm_sregs *sregs); | ||
223 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | ||
224 | struct kvm_sregs *sregs); | ||
225 | int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, | ||
226 | struct kvm_debug_guest *dbg); | ||
227 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); | ||
228 | |||
229 | int kvm_arch_init(void *opaque); | ||
230 | void kvm_arch_exit(void); | ||
231 | |||
232 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu); | ||
233 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu); | ||
234 | |||
235 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu); | ||
236 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); | ||
237 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); | ||
238 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id); | ||
239 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); | ||
240 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); | ||
241 | |||
242 | int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu); | ||
243 | void kvm_arch_hardware_enable(void *garbage); | ||
244 | void kvm_arch_hardware_disable(void *garbage); | ||
245 | int kvm_arch_hardware_setup(void); | ||
246 | void kvm_arch_hardware_unsetup(void); | ||
247 | void kvm_arch_check_processor_compat(void *rtn); | ||
248 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); | ||
249 | |||
250 | void kvm_free_physmem(struct kvm *kvm); | ||
251 | |||
252 | struct kvm *kvm_arch_create_vm(void); | ||
253 | void kvm_arch_destroy_vm(struct kvm *kvm); | ||
254 | |||
255 | int kvm_cpu_get_interrupt(struct kvm_vcpu *v); | ||
256 | int kvm_cpu_has_interrupt(struct kvm_vcpu *v); | ||
257 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu); | ||
258 | |||
259 | static inline void kvm_guest_enter(void) | ||
260 | { | ||
261 | account_system_vtime(current); | ||
262 | current->flags |= PF_VCPU; | ||
263 | } | ||
264 | |||
265 | static inline void kvm_guest_exit(void) | ||
266 | { | ||
267 | account_system_vtime(current); | ||
268 | current->flags &= ~PF_VCPU; | ||
269 | } | ||
270 | |||
271 | static inline int memslot_id(struct kvm *kvm, struct kvm_memory_slot *slot) | ||
272 | { | ||
273 | return slot - kvm->memslots; | ||
274 | } | ||
275 | |||
276 | static inline gpa_t gfn_to_gpa(gfn_t gfn) | ||
277 | { | ||
278 | return (gpa_t)gfn << PAGE_SHIFT; | ||
279 | } | ||
280 | |||
281 | static inline void kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) | ||
282 | { | ||
283 | set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests); | ||
284 | } | ||
285 | |||
286 | enum kvm_stat_kind { | ||
287 | KVM_STAT_VM, | ||
288 | KVM_STAT_VCPU, | ||
289 | }; | ||
290 | |||
291 | struct kvm_stats_debugfs_item { | ||
292 | const char *name; | ||
293 | int offset; | ||
294 | enum kvm_stat_kind kind; | ||
295 | struct dentry *dentry; | ||
296 | }; | ||
297 | extern struct kvm_stats_debugfs_item debugfs_entries[]; | ||
298 | |||
299 | #endif | ||
diff --git a/include/linux/kvm_para.h b/include/linux/kvm_para.h index 3b292565a693..5497aac0d2f8 100644 --- a/include/linux/kvm_para.h +++ b/include/linux/kvm_para.h | |||
@@ -2,72 +2,30 @@ | |||
2 | #define __LINUX_KVM_PARA_H | 2 | #define __LINUX_KVM_PARA_H |
3 | 3 | ||
4 | /* | 4 | /* |
5 | * Guest OS interface for KVM paravirtualization | 5 | * This header file provides a method for making a hypercall to the host |
6 | * | 6 | * Architectures should define: |
7 | * Note: this interface is totally experimental, and is certain to change | 7 | * - kvm_hypercall0, kvm_hypercall1... |
8 | * as we make progress. | 8 | * - kvm_arch_para_features |
9 | * - kvm_para_available | ||
9 | */ | 10 | */ |
10 | 11 | ||
11 | /* | 12 | /* Return values for hypercalls */ |
12 | * Per-VCPU descriptor area shared between guest and host. Writable to | 13 | #define KVM_ENOSYS 1000 |
13 | * both guest and host. Registered with the host by the guest when | ||
14 | * a guest acknowledges paravirtual mode. | ||
15 | * | ||
16 | * NOTE: all addresses are guest-physical addresses (gpa), to make it | ||
17 | * easier for the hypervisor to map between the various addresses. | ||
18 | */ | ||
19 | struct kvm_vcpu_para_state { | ||
20 | /* | ||
21 | * API version information for compatibility. If there's any support | ||
22 | * mismatch (too old host trying to execute too new guest) then | ||
23 | * the host will deny entry into paravirtual mode. Any other | ||
24 | * combination (new host + old guest and new host + new guest) | ||
25 | * is supposed to work - new host versions will support all old | ||
26 | * guest API versions. | ||
27 | */ | ||
28 | u32 guest_version; | ||
29 | u32 host_version; | ||
30 | u32 size; | ||
31 | u32 ret; | ||
32 | |||
33 | /* | ||
34 | * The address of the vm exit instruction (VMCALL or VMMCALL), | ||
35 | * which the host will patch according to the CPU model the | ||
36 | * VM runs on: | ||
37 | */ | ||
38 | u64 hypercall_gpa; | ||
39 | |||
40 | } __attribute__ ((aligned(PAGE_SIZE))); | ||
41 | |||
42 | #define KVM_PARA_API_VERSION 1 | ||
43 | |||
44 | /* | ||
45 | * This is used for an RDMSR's ECX parameter to probe for a KVM host. | ||
46 | * Hopefully no CPU vendor will use up this number. This is placed well | ||
47 | * out of way of the typical space occupied by CPU vendors' MSR indices, | ||
48 | * and we think (or at least hope) it wont be occupied in the future | ||
49 | * either. | ||
50 | */ | ||
51 | #define MSR_KVM_API_MAGIC 0x87655678 | ||
52 | 14 | ||
53 | #define KVM_EINVAL 1 | 15 | #define KVM_HC_VAPIC_POLL_IRQ 1 |
54 | 16 | ||
55 | /* | 17 | /* |
56 | * Hypercall calling convention: | 18 | * hypercalls use architecture specific |
57 | * | ||
58 | * Each hypercall may have 0-6 parameters. | ||
59 | * | ||
60 | * 64-bit hypercall index is in RAX, goes from 0 to __NR_hypercalls-1 | ||
61 | * | ||
62 | * 64-bit parameters 1-6 are in the standard gcc x86_64 calling convention | ||
63 | * order: RDI, RSI, RDX, RCX, R8, R9. | ||
64 | * | ||
65 | * 32-bit index is EBX, parameters are: EAX, ECX, EDX, ESI, EDI, EBP. | ||
66 | * (the first 3 are according to the gcc regparm calling convention) | ||
67 | * | ||
68 | * No registers are clobbered by the hypercall, except that the | ||
69 | * return value is in RAX. | ||
70 | */ | 19 | */ |
71 | #define __NR_hypercalls 0 | 20 | #include <asm/kvm_para.h> |
21 | |||
22 | #ifdef __KERNEL__ | ||
23 | static inline int kvm_para_has_feature(unsigned int feature) | ||
24 | { | ||
25 | if (kvm_arch_para_features() & (1UL << feature)) | ||
26 | return 1; | ||
27 | return 0; | ||
28 | } | ||
29 | #endif /* __KERNEL__ */ | ||
30 | #endif /* __LINUX_KVM_PARA_H */ | ||
72 | 31 | ||
73 | #endif | ||
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h new file mode 100644 index 000000000000..1c4e46decb22 --- /dev/null +++ b/include/linux/kvm_types.h | |||
@@ -0,0 +1,54 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License as published by | ||
4 | * the Free Software Foundation; either version 2 of the License. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #ifndef __KVM_TYPES_H__ | ||
18 | #define __KVM_TYPES_H__ | ||
19 | |||
20 | #include <asm/types.h> | ||
21 | |||
22 | /* | ||
23 | * Address types: | ||
24 | * | ||
25 | * gva - guest virtual address | ||
26 | * gpa - guest physical address | ||
27 | * gfn - guest frame number | ||
28 | * hva - host virtual address | ||
29 | * hpa - host physical address | ||
30 | * hfn - host frame number | ||
31 | */ | ||
32 | |||
33 | typedef unsigned long gva_t; | ||
34 | typedef u64 gpa_t; | ||
35 | typedef unsigned long gfn_t; | ||
36 | |||
37 | typedef unsigned long hva_t; | ||
38 | typedef u64 hpa_t; | ||
39 | typedef unsigned long hfn_t; | ||
40 | |||
41 | struct kvm_pio_request { | ||
42 | unsigned long count; | ||
43 | int cur_count; | ||
44 | struct page *guest_pages[2]; | ||
45 | unsigned guest_page_offset; | ||
46 | int in; | ||
47 | int port; | ||
48 | int size; | ||
49 | int string; | ||
50 | int down; | ||
51 | int rep; | ||
52 | }; | ||
53 | |||
54 | #endif /* __KVM_TYPES_H__ */ | ||
diff --git a/include/linux/linkage.h b/include/linux/linkage.h index ff203dd02919..3faf599ea58e 100644 --- a/include/linux/linkage.h +++ b/include/linux/linkage.h | |||
@@ -13,6 +13,10 @@ | |||
13 | #define asmlinkage CPP_ASMLINKAGE | 13 | #define asmlinkage CPP_ASMLINKAGE |
14 | #endif | 14 | #endif |
15 | 15 | ||
16 | #ifndef asmregparm | ||
17 | # define asmregparm | ||
18 | #endif | ||
19 | |||
16 | #ifndef prevent_tail_call | 20 | #ifndef prevent_tail_call |
17 | # define prevent_tail_call(ret) do { } while (0) | 21 | # define prevent_tail_call(ret) do { } while (0) |
18 | #endif | 22 | #endif |
@@ -53,6 +57,10 @@ | |||
53 | .size name, .-name | 57 | .size name, .-name |
54 | #endif | 58 | #endif |
55 | 59 | ||
60 | /* If symbol 'name' is treated as a subroutine (gets called, and returns) | ||
61 | * then please use ENDPROC to mark 'name' as STT_FUNC for the benefit of | ||
62 | * static analysis tools such as stack depth analyzer. | ||
63 | */ | ||
56 | #ifndef ENDPROC | 64 | #ifndef ENDPROC |
57 | #define ENDPROC(name) \ | 65 | #define ENDPROC(name) \ |
58 | .type name, @function; \ | 66 | .type name, @function; \ |
diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h index 6f1637c61e10..3d25bcd139d1 100644 --- a/include/linux/lockd/bind.h +++ b/include/linux/lockd/bind.h | |||
@@ -33,9 +33,26 @@ struct nlmsvc_binding { | |||
33 | extern struct nlmsvc_binding * nlmsvc_ops; | 33 | extern struct nlmsvc_binding * nlmsvc_ops; |
34 | 34 | ||
35 | /* | 35 | /* |
36 | * Similar to nfs_client_initdata, but without the NFS-specific | ||
37 | * rpc_ops field. | ||
38 | */ | ||
39 | struct nlmclnt_initdata { | ||
40 | const char *hostname; | ||
41 | const struct sockaddr *address; | ||
42 | size_t addrlen; | ||
43 | unsigned short protocol; | ||
44 | u32 nfs_version; | ||
45 | }; | ||
46 | |||
47 | /* | ||
36 | * Functions exported by the lockd module | 48 | * Functions exported by the lockd module |
37 | */ | 49 | */ |
38 | extern int nlmclnt_proc(struct inode *, int, struct file_lock *); | 50 | |
51 | extern struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init); | ||
52 | extern void nlmclnt_done(struct nlm_host *host); | ||
53 | |||
54 | extern int nlmclnt_proc(struct nlm_host *host, int cmd, | ||
55 | struct file_lock *fl); | ||
39 | extern int lockd_up(int proto); | 56 | extern int lockd_up(int proto); |
40 | extern void lockd_down(void); | 57 | extern void lockd_down(void); |
41 | 58 | ||
diff --git a/include/linux/mm.h b/include/linux/mm.h index 1897ca223eca..1bba6789a50a 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -1118,9 +1118,21 @@ static inline void vm_stat_account(struct mm_struct *mm, | |||
1118 | } | 1118 | } |
1119 | #endif /* CONFIG_PROC_FS */ | 1119 | #endif /* CONFIG_PROC_FS */ |
1120 | 1120 | ||
1121 | #ifndef CONFIG_DEBUG_PAGEALLOC | 1121 | #ifdef CONFIG_DEBUG_PAGEALLOC |
1122 | extern int debug_pagealloc_enabled; | ||
1123 | |||
1124 | extern void kernel_map_pages(struct page *page, int numpages, int enable); | ||
1125 | |||
1126 | static inline void enable_debug_pagealloc(void) | ||
1127 | { | ||
1128 | debug_pagealloc_enabled = 1; | ||
1129 | } | ||
1130 | #else | ||
1122 | static inline void | 1131 | static inline void |
1123 | kernel_map_pages(struct page *page, int numpages, int enable) {} | 1132 | kernel_map_pages(struct page *page, int numpages, int enable) {} |
1133 | static inline void enable_debug_pagealloc(void) | ||
1134 | { | ||
1135 | } | ||
1124 | #endif | 1136 | #endif |
1125 | 1137 | ||
1126 | extern struct vm_area_struct *get_gate_vma(struct task_struct *tsk); | 1138 | extern struct vm_area_struct *get_gate_vma(struct task_struct *tsk); |
@@ -1146,6 +1158,7 @@ extern int randomize_va_space; | |||
1146 | #endif | 1158 | #endif |
1147 | 1159 | ||
1148 | const char * arch_vma_name(struct vm_area_struct *vma); | 1160 | const char * arch_vma_name(struct vm_area_struct *vma); |
1161 | void print_vma_addr(char *prefix, unsigned long rip); | ||
1149 | 1162 | ||
1150 | struct page *sparse_mem_map_populate(unsigned long pnum, int nid); | 1163 | struct page *sparse_mem_map_populate(unsigned long pnum, int nid); |
1151 | pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); | 1164 | pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); |
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 2d15d4aac094..099ddb4481c0 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h | |||
@@ -196,28 +196,67 @@ struct nfs_inode { | |||
196 | #define NFS_INO_STALE (2) /* possible stale inode */ | 196 | #define NFS_INO_STALE (2) /* possible stale inode */ |
197 | #define NFS_INO_ACL_LRU_SET (3) /* Inode is on the LRU list */ | 197 | #define NFS_INO_ACL_LRU_SET (3) /* Inode is on the LRU list */ |
198 | 198 | ||
199 | static inline struct nfs_inode *NFS_I(struct inode *inode) | 199 | static inline struct nfs_inode *NFS_I(const struct inode *inode) |
200 | { | 200 | { |
201 | return container_of(inode, struct nfs_inode, vfs_inode); | 201 | return container_of(inode, struct nfs_inode, vfs_inode); |
202 | } | 202 | } |
203 | #define NFS_SB(s) ((struct nfs_server *)(s->s_fs_info)) | ||
204 | 203 | ||
205 | #define NFS_FH(inode) (&NFS_I(inode)->fh) | 204 | static inline struct nfs_server *NFS_SB(const struct super_block *s) |
206 | #define NFS_SERVER(inode) (NFS_SB(inode->i_sb)) | 205 | { |
207 | #define NFS_CLIENT(inode) (NFS_SERVER(inode)->client) | 206 | return (struct nfs_server *)(s->s_fs_info); |
208 | #define NFS_PROTO(inode) (NFS_SERVER(inode)->nfs_client->rpc_ops) | 207 | } |
209 | #define NFS_COOKIEVERF(inode) (NFS_I(inode)->cookieverf) | 208 | |
210 | #define NFS_MINATTRTIMEO(inode) \ | 209 | static inline struct nfs_fh *NFS_FH(const struct inode *inode) |
211 | (S_ISDIR(inode->i_mode)? NFS_SERVER(inode)->acdirmin \ | 210 | { |
212 | : NFS_SERVER(inode)->acregmin) | 211 | return &NFS_I(inode)->fh; |
213 | #define NFS_MAXATTRTIMEO(inode) \ | 212 | } |
214 | (S_ISDIR(inode->i_mode)? NFS_SERVER(inode)->acdirmax \ | 213 | |
215 | : NFS_SERVER(inode)->acregmax) | 214 | static inline struct nfs_server *NFS_SERVER(const struct inode *inode) |
215 | { | ||
216 | return NFS_SB(inode->i_sb); | ||
217 | } | ||
218 | |||
219 | static inline struct rpc_clnt *NFS_CLIENT(const struct inode *inode) | ||
220 | { | ||
221 | return NFS_SERVER(inode)->client; | ||
222 | } | ||
223 | |||
224 | static inline const struct nfs_rpc_ops *NFS_PROTO(const struct inode *inode) | ||
225 | { | ||
226 | return NFS_SERVER(inode)->nfs_client->rpc_ops; | ||
227 | } | ||
228 | |||
229 | static inline __be32 *NFS_COOKIEVERF(const struct inode *inode) | ||
230 | { | ||
231 | return NFS_I(inode)->cookieverf; | ||
232 | } | ||
233 | |||
234 | static inline unsigned NFS_MINATTRTIMEO(const struct inode *inode) | ||
235 | { | ||
236 | struct nfs_server *nfss = NFS_SERVER(inode); | ||
237 | return S_ISDIR(inode->i_mode) ? nfss->acdirmin : nfss->acregmin; | ||
238 | } | ||
216 | 239 | ||
217 | #define NFS_FLAGS(inode) (NFS_I(inode)->flags) | 240 | static inline unsigned NFS_MAXATTRTIMEO(const struct inode *inode) |
218 | #define NFS_STALE(inode) (test_bit(NFS_INO_STALE, &NFS_FLAGS(inode))) | 241 | { |
242 | struct nfs_server *nfss = NFS_SERVER(inode); | ||
243 | return S_ISDIR(inode->i_mode) ? nfss->acdirmax : nfss->acregmax; | ||
244 | } | ||
219 | 245 | ||
220 | #define NFS_FILEID(inode) (NFS_I(inode)->fileid) | 246 | static inline int NFS_STALE(const struct inode *inode) |
247 | { | ||
248 | return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags); | ||
249 | } | ||
250 | |||
251 | static inline __u64 NFS_FILEID(const struct inode *inode) | ||
252 | { | ||
253 | return NFS_I(inode)->fileid; | ||
254 | } | ||
255 | |||
256 | static inline void set_nfs_fileid(struct inode *inode, __u64 fileid) | ||
257 | { | ||
258 | NFS_I(inode)->fileid = fileid; | ||
259 | } | ||
221 | 260 | ||
222 | static inline void nfs_mark_for_revalidate(struct inode *inode) | 261 | static inline void nfs_mark_for_revalidate(struct inode *inode) |
223 | { | 262 | { |
@@ -237,7 +276,7 @@ static inline int nfs_server_capable(struct inode *inode, int cap) | |||
237 | 276 | ||
238 | static inline int NFS_USE_READDIRPLUS(struct inode *inode) | 277 | static inline int NFS_USE_READDIRPLUS(struct inode *inode) |
239 | { | 278 | { |
240 | return test_bit(NFS_INO_ADVISE_RDPLUS, &NFS_FLAGS(inode)); | 279 | return test_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags); |
241 | } | 280 | } |
242 | 281 | ||
243 | static inline void nfs_set_verifier(struct dentry * dentry, unsigned long verf) | 282 | static inline void nfs_set_verifier(struct dentry * dentry, unsigned long verf) |
@@ -366,6 +405,7 @@ extern const struct inode_operations nfs3_dir_inode_operations; | |||
366 | extern const struct file_operations nfs_dir_operations; | 405 | extern const struct file_operations nfs_dir_operations; |
367 | extern struct dentry_operations nfs_dentry_operations; | 406 | extern struct dentry_operations nfs_dentry_operations; |
368 | 407 | ||
408 | extern void nfs_force_lookup_revalidate(struct inode *dir); | ||
369 | extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, struct nfs_fattr *fattr); | 409 | extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, struct nfs_fattr *fattr); |
370 | extern int nfs_may_open(struct inode *inode, struct rpc_cred *cred, int openflags); | 410 | extern int nfs_may_open(struct inode *inode, struct rpc_cred *cred, int openflags); |
371 | extern void nfs_access_zap_cache(struct inode *inode); | 411 | extern void nfs_access_zap_cache(struct inode *inode); |
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 0cac49bc0955..3423c6761bf7 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h | |||
@@ -3,8 +3,12 @@ | |||
3 | 3 | ||
4 | #include <linux/list.h> | 4 | #include <linux/list.h> |
5 | #include <linux/backing-dev.h> | 5 | #include <linux/backing-dev.h> |
6 | #include <linux/wait.h> | ||
7 | |||
8 | #include <asm/atomic.h> | ||
6 | 9 | ||
7 | struct nfs_iostats; | 10 | struct nfs_iostats; |
11 | struct nlm_host; | ||
8 | 12 | ||
9 | /* | 13 | /* |
10 | * The nfs_client identifies our client state to the server. | 14 | * The nfs_client identifies our client state to the server. |
@@ -14,20 +18,19 @@ struct nfs_client { | |||
14 | int cl_cons_state; /* current construction state (-ve: init error) */ | 18 | int cl_cons_state; /* current construction state (-ve: init error) */ |
15 | #define NFS_CS_READY 0 /* ready to be used */ | 19 | #define NFS_CS_READY 0 /* ready to be used */ |
16 | #define NFS_CS_INITING 1 /* busy initialising */ | 20 | #define NFS_CS_INITING 1 /* busy initialising */ |
17 | int cl_nfsversion; /* NFS protocol version */ | ||
18 | unsigned long cl_res_state; /* NFS resources state */ | 21 | unsigned long cl_res_state; /* NFS resources state */ |
19 | #define NFS_CS_CALLBACK 1 /* - callback started */ | 22 | #define NFS_CS_CALLBACK 1 /* - callback started */ |
20 | #define NFS_CS_IDMAP 2 /* - idmap started */ | 23 | #define NFS_CS_IDMAP 2 /* - idmap started */ |
21 | #define NFS_CS_RENEWD 3 /* - renewd started */ | 24 | #define NFS_CS_RENEWD 3 /* - renewd started */ |
22 | struct sockaddr_in cl_addr; /* server identifier */ | 25 | struct sockaddr_storage cl_addr; /* server identifier */ |
26 | size_t cl_addrlen; | ||
23 | char * cl_hostname; /* hostname of server */ | 27 | char * cl_hostname; /* hostname of server */ |
24 | struct list_head cl_share_link; /* link in global client list */ | 28 | struct list_head cl_share_link; /* link in global client list */ |
25 | struct list_head cl_superblocks; /* List of nfs_server structs */ | 29 | struct list_head cl_superblocks; /* List of nfs_server structs */ |
26 | 30 | ||
27 | struct rpc_clnt * cl_rpcclient; | 31 | struct rpc_clnt * cl_rpcclient; |
28 | const struct nfs_rpc_ops *rpc_ops; /* NFS protocol vector */ | 32 | const struct nfs_rpc_ops *rpc_ops; /* NFS protocol vector */ |
29 | unsigned long retrans_timeo; /* retransmit timeout */ | 33 | int cl_proto; /* Network transport protocol */ |
30 | unsigned int retrans_count; /* number of retransmit tries */ | ||
31 | 34 | ||
32 | #ifdef CONFIG_NFS_V4 | 35 | #ifdef CONFIG_NFS_V4 |
33 | u64 cl_clientid; /* constant */ | 36 | u64 cl_clientid; /* constant */ |
@@ -62,7 +65,7 @@ struct nfs_client { | |||
62 | /* Our own IP address, as a null-terminated string. | 65 | /* Our own IP address, as a null-terminated string. |
63 | * This is used to generate the clientid, and the callback address. | 66 | * This is used to generate the clientid, and the callback address. |
64 | */ | 67 | */ |
65 | char cl_ipaddr[16]; | 68 | char cl_ipaddr[48]; |
66 | unsigned char cl_id_uniquifier; | 69 | unsigned char cl_id_uniquifier; |
67 | #endif | 70 | #endif |
68 | }; | 71 | }; |
@@ -78,6 +81,7 @@ struct nfs_server { | |||
78 | struct list_head master_link; /* link in master servers list */ | 81 | struct list_head master_link; /* link in master servers list */ |
79 | struct rpc_clnt * client; /* RPC client handle */ | 82 | struct rpc_clnt * client; /* RPC client handle */ |
80 | struct rpc_clnt * client_acl; /* ACL RPC client handle */ | 83 | struct rpc_clnt * client_acl; /* ACL RPC client handle */ |
84 | struct nlm_host *nlm_host; /* NLM client handle */ | ||
81 | struct nfs_iostats * io_stats; /* I/O statistics */ | 85 | struct nfs_iostats * io_stats; /* I/O statistics */ |
82 | struct backing_dev_info backing_dev_info; | 86 | struct backing_dev_info backing_dev_info; |
83 | atomic_long_t writeback; /* number of writeback pages */ | 87 | atomic_long_t writeback; /* number of writeback pages */ |
@@ -110,6 +114,9 @@ struct nfs_server { | |||
110 | filesystem */ | 114 | filesystem */ |
111 | #endif | 115 | #endif |
112 | void (*destroy)(struct nfs_server *); | 116 | void (*destroy)(struct nfs_server *); |
117 | |||
118 | atomic_t active; /* Keep trace of any activity to this server */ | ||
119 | wait_queue_head_t active_wq; /* Wait for any activity to stop */ | ||
113 | }; | 120 | }; |
114 | 121 | ||
115 | /* Server capabilities */ | 122 | /* Server capabilities */ |
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index 30dbcc185e69..a1676e19e491 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h | |||
@@ -83,6 +83,7 @@ extern void nfs_pageio_complete(struct nfs_pageio_descriptor *desc); | |||
83 | extern void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *, pgoff_t); | 83 | extern void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *, pgoff_t); |
84 | extern int nfs_wait_on_request(struct nfs_page *); | 84 | extern int nfs_wait_on_request(struct nfs_page *); |
85 | extern void nfs_unlock_request(struct nfs_page *req); | 85 | extern void nfs_unlock_request(struct nfs_page *req); |
86 | extern int nfs_set_page_tag_locked(struct nfs_page *req); | ||
86 | extern void nfs_clear_page_tag_locked(struct nfs_page *req); | 87 | extern void nfs_clear_page_tag_locked(struct nfs_page *req); |
87 | 88 | ||
88 | 89 | ||
@@ -95,18 +96,6 @@ nfs_lock_request_dontget(struct nfs_page *req) | |||
95 | return !test_and_set_bit(PG_BUSY, &req->wb_flags); | 96 | return !test_and_set_bit(PG_BUSY, &req->wb_flags); |
96 | } | 97 | } |
97 | 98 | ||
98 | /* | ||
99 | * Lock the page of an asynchronous request and take a reference | ||
100 | */ | ||
101 | static inline int | ||
102 | nfs_lock_request(struct nfs_page *req) | ||
103 | { | ||
104 | if (test_and_set_bit(PG_BUSY, &req->wb_flags)) | ||
105 | return 0; | ||
106 | kref_get(&req->wb_kref); | ||
107 | return 1; | ||
108 | } | ||
109 | |||
110 | /** | 99 | /** |
111 | * nfs_list_add_request - Insert a request into a list | 100 | * nfs_list_add_request - Insert a request into a list |
112 | * @req: request | 101 | * @req: request |
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index daab252f2e5c..f301d0b8babc 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h | |||
@@ -666,16 +666,17 @@ struct nfs4_rename_res { | |||
666 | struct nfs_fattr * new_fattr; | 666 | struct nfs_fattr * new_fattr; |
667 | }; | 667 | }; |
668 | 668 | ||
669 | #define NFS4_SETCLIENTID_NAMELEN (56) | ||
669 | struct nfs4_setclientid { | 670 | struct nfs4_setclientid { |
670 | const nfs4_verifier * sc_verifier; /* request */ | 671 | const nfs4_verifier * sc_verifier; |
671 | unsigned int sc_name_len; | 672 | unsigned int sc_name_len; |
672 | char sc_name[48]; /* request */ | 673 | char sc_name[NFS4_SETCLIENTID_NAMELEN]; |
673 | u32 sc_prog; /* request */ | 674 | u32 sc_prog; |
674 | unsigned int sc_netid_len; | 675 | unsigned int sc_netid_len; |
675 | char sc_netid[4]; /* request */ | 676 | char sc_netid[RPCBIND_MAXNETIDLEN]; |
676 | unsigned int sc_uaddr_len; | 677 | unsigned int sc_uaddr_len; |
677 | char sc_uaddr[24]; /* request */ | 678 | char sc_uaddr[RPCBIND_MAXUADDRLEN]; |
678 | u32 sc_cb_ident; /* request */ | 679 | u32 sc_cb_ident; |
679 | }; | 680 | }; |
680 | 681 | ||
681 | struct nfs4_statfs_arg { | 682 | struct nfs4_statfs_arg { |
@@ -773,7 +774,7 @@ struct nfs_access_entry; | |||
773 | * RPC procedure vector for NFSv2/NFSv3 demuxing | 774 | * RPC procedure vector for NFSv2/NFSv3 demuxing |
774 | */ | 775 | */ |
775 | struct nfs_rpc_ops { | 776 | struct nfs_rpc_ops { |
776 | int version; /* Protocol version */ | 777 | u32 version; /* Protocol version */ |
777 | struct dentry_operations *dentry_ops; | 778 | struct dentry_operations *dentry_ops; |
778 | const struct inode_operations *dir_inode_ops; | 779 | const struct inode_operations *dir_inode_ops; |
779 | const struct inode_operations *file_inode_ops; | 780 | const struct inode_operations *file_inode_ops; |
@@ -816,11 +817,11 @@ struct nfs_rpc_ops { | |||
816 | struct nfs_pathconf *); | 817 | struct nfs_pathconf *); |
817 | int (*set_capabilities)(struct nfs_server *, struct nfs_fh *); | 818 | int (*set_capabilities)(struct nfs_server *, struct nfs_fh *); |
818 | __be32 *(*decode_dirent)(__be32 *, struct nfs_entry *, int plus); | 819 | __be32 *(*decode_dirent)(__be32 *, struct nfs_entry *, int plus); |
819 | void (*read_setup) (struct nfs_read_data *); | 820 | void (*read_setup) (struct nfs_read_data *, struct rpc_message *); |
820 | int (*read_done) (struct rpc_task *, struct nfs_read_data *); | 821 | int (*read_done) (struct rpc_task *, struct nfs_read_data *); |
821 | void (*write_setup) (struct nfs_write_data *, int how); | 822 | void (*write_setup) (struct nfs_write_data *, struct rpc_message *); |
822 | int (*write_done) (struct rpc_task *, struct nfs_write_data *); | 823 | int (*write_done) (struct rpc_task *, struct nfs_write_data *); |
823 | void (*commit_setup) (struct nfs_write_data *, int how); | 824 | void (*commit_setup) (struct nfs_write_data *, struct rpc_message *); |
824 | int (*commit_done) (struct rpc_task *, struct nfs_write_data *); | 825 | int (*commit_done) (struct rpc_task *, struct nfs_write_data *); |
825 | int (*file_open) (struct inode *, struct file *); | 826 | int (*file_open) (struct inode *, struct file *); |
826 | int (*file_release) (struct inode *, struct file *); | 827 | int (*file_release) (struct inode *, struct file *); |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index c69531348363..41f6f28690f6 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -2085,6 +2085,13 @@ | |||
2085 | #define PCI_VENDOR_ID_BELKIN 0x1799 | 2085 | #define PCI_VENDOR_ID_BELKIN 0x1799 |
2086 | #define PCI_DEVICE_ID_BELKIN_F5D7010V7 0x701f | 2086 | #define PCI_DEVICE_ID_BELKIN_F5D7010V7 0x701f |
2087 | 2087 | ||
2088 | #define PCI_VENDOR_ID_RDC 0x17f3 | ||
2089 | #define PCI_DEVICE_ID_RDC_R6020 0x6020 | ||
2090 | #define PCI_DEVICE_ID_RDC_R6030 0x6030 | ||
2091 | #define PCI_DEVICE_ID_RDC_R6040 0x6040 | ||
2092 | #define PCI_DEVICE_ID_RDC_R6060 0x6060 | ||
2093 | #define PCI_DEVICE_ID_RDC_R6061 0x6061 | ||
2094 | |||
2088 | #define PCI_VENDOR_ID_LENOVO 0x17aa | 2095 | #define PCI_VENDOR_ID_LENOVO 0x17aa |
2089 | 2096 | ||
2090 | #define PCI_VENDOR_ID_ARECA 0x17d3 | 2097 | #define PCI_VENDOR_ID_ARECA 0x17d3 |
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 926adaae0f96..00412bb494c4 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
@@ -9,6 +9,30 @@ | |||
9 | 9 | ||
10 | #include <asm/percpu.h> | 10 | #include <asm/percpu.h> |
11 | 11 | ||
12 | #ifndef PER_CPU_ATTRIBUTES | ||
13 | #define PER_CPU_ATTRIBUTES | ||
14 | #endif | ||
15 | |||
16 | #ifdef CONFIG_SMP | ||
17 | #define DEFINE_PER_CPU(type, name) \ | ||
18 | __attribute__((__section__(".data.percpu"))) \ | ||
19 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name | ||
20 | |||
21 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | ||
22 | __attribute__((__section__(".data.percpu.shared_aligned"))) \ | ||
23 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name \ | ||
24 | ____cacheline_aligned_in_smp | ||
25 | #else | ||
26 | #define DEFINE_PER_CPU(type, name) \ | ||
27 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name | ||
28 | |||
29 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | ||
30 | DEFINE_PER_CPU(type, name) | ||
31 | #endif | ||
32 | |||
33 | #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) | ||
34 | #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) | ||
35 | |||
12 | /* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */ | 36 | /* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */ |
13 | #ifndef PERCPU_ENOUGH_ROOM | 37 | #ifndef PERCPU_ENOUGH_ROOM |
14 | #ifdef CONFIG_MODULES | 38 | #ifdef CONFIG_MODULES |
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index 3ea5750a0f7e..515bff053de8 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h | |||
@@ -129,6 +129,81 @@ int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data); | |||
129 | #define force_successful_syscall_return() do { } while (0) | 129 | #define force_successful_syscall_return() do { } while (0) |
130 | #endif | 130 | #endif |
131 | 131 | ||
132 | /* | ||
133 | * <asm/ptrace.h> should define the following things inside #ifdef __KERNEL__. | ||
134 | * | ||
135 | * These do-nothing inlines are used when the arch does not | ||
136 | * implement single-step. The kerneldoc comments are here | ||
137 | * to document the interface for all arch definitions. | ||
138 | */ | ||
139 | |||
140 | #ifndef arch_has_single_step | ||
141 | /** | ||
142 | * arch_has_single_step - does this CPU support user-mode single-step? | ||
143 | * | ||
144 | * If this is defined, then there must be function declarations or | ||
145 | * inlines for user_enable_single_step() and user_disable_single_step(). | ||
146 | * arch_has_single_step() should evaluate to nonzero iff the machine | ||
147 | * supports instruction single-step for user mode. | ||
148 | * It can be a constant or it can test a CPU feature bit. | ||
149 | */ | ||
150 | #define arch_has_single_step() (0) | ||
151 | |||
152 | /** | ||
153 | * user_enable_single_step - single-step in user-mode task | ||
154 | * @task: either current or a task stopped in %TASK_TRACED | ||
155 | * | ||
156 | * This can only be called when arch_has_single_step() has returned nonzero. | ||
157 | * Set @task so that when it returns to user mode, it will trap after the | ||
158 | * next single instruction executes. If arch_has_block_step() is defined, | ||
159 | * this must clear the effects of user_enable_block_step() too. | ||
160 | */ | ||
161 | static inline void user_enable_single_step(struct task_struct *task) | ||
162 | { | ||
163 | BUG(); /* This can never be called. */ | ||
164 | } | ||
165 | |||
166 | /** | ||
167 | * user_disable_single_step - cancel user-mode single-step | ||
168 | * @task: either current or a task stopped in %TASK_TRACED | ||
169 | * | ||
170 | * Clear @task of the effects of user_enable_single_step() and | ||
171 | * user_enable_block_step(). This can be called whether or not either | ||
172 | * of those was ever called on @task, and even if arch_has_single_step() | ||
173 | * returned zero. | ||
174 | */ | ||
175 | static inline void user_disable_single_step(struct task_struct *task) | ||
176 | { | ||
177 | } | ||
178 | #endif /* arch_has_single_step */ | ||
179 | |||
180 | #ifndef arch_has_block_step | ||
181 | /** | ||
182 | * arch_has_block_step - does this CPU support user-mode block-step? | ||
183 | * | ||
184 | * If this is defined, then there must be a function declaration or inline | ||
185 | * for user_enable_block_step(), and arch_has_single_step() must be defined | ||
186 | * too. arch_has_block_step() should evaluate to nonzero iff the machine | ||
187 | * supports step-until-branch for user mode. It can be a constant or it | ||
188 | * can test a CPU feature bit. | ||
189 | */ | ||
190 | #define arch_has_block_step() (0) | ||
191 | |||
192 | /** | ||
193 | * user_enable_block_step - step until branch in user-mode task | ||
194 | * @task: either current or a task stopped in %TASK_TRACED | ||
195 | * | ||
196 | * This can only be called when arch_has_block_step() has returned nonzero, | ||
197 | * and will never be called when single-instruction stepping is being used. | ||
198 | * Set @task so that when it returns to user mode, it will trap after the | ||
199 | * next branch or trap taken. | ||
200 | */ | ||
201 | static inline void user_enable_block_step(struct task_struct *task) | ||
202 | { | ||
203 | BUG(); /* This can never be called. */ | ||
204 | } | ||
205 | #endif /* arch_has_block_step */ | ||
206 | |||
132 | #endif | 207 | #endif |
133 | 208 | ||
134 | #endif | 209 | #endif |
diff --git a/include/linux/regset.h b/include/linux/regset.h new file mode 100644 index 000000000000..8abee6556223 --- /dev/null +++ b/include/linux/regset.h | |||
@@ -0,0 +1,368 @@ | |||
1 | /* | ||
2 | * User-mode machine state access | ||
3 | * | ||
4 | * Copyright (C) 2007 Red Hat, Inc. All rights reserved. | ||
5 | * | ||
6 | * This copyrighted material is made available to anyone wishing to use, | ||
7 | * modify, copy, or redistribute it subject to the terms and conditions | ||
8 | * of the GNU General Public License v.2. | ||
9 | * | ||
10 | * Red Hat Author: Roland McGrath. | ||
11 | */ | ||
12 | |||
13 | #ifndef _LINUX_REGSET_H | ||
14 | #define _LINUX_REGSET_H 1 | ||
15 | |||
16 | #include <linux/compiler.h> | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/uaccess.h> | ||
19 | struct task_struct; | ||
20 | struct user_regset; | ||
21 | |||
22 | |||
23 | /** | ||
24 | * user_regset_active_fn - type of @active function in &struct user_regset | ||
25 | * @target: thread being examined | ||
26 | * @regset: regset being examined | ||
27 | * | ||
28 | * Return -%ENODEV if not available on the hardware found. | ||
29 | * Return %0 if no interesting state in this thread. | ||
30 | * Return >%0 number of @size units of interesting state. | ||
31 | * Any get call fetching state beyond that number will | ||
32 | * see the default initialization state for this data, | ||
33 | * so a caller that knows what the default state is need | ||
34 | * not copy it all out. | ||
35 | * This call is optional; the pointer is %NULL if there | ||
36 | * is no inexpensive check to yield a value < @n. | ||
37 | */ | ||
38 | typedef int user_regset_active_fn(struct task_struct *target, | ||
39 | const struct user_regset *regset); | ||
40 | |||
41 | /** | ||
42 | * user_regset_get_fn - type of @get function in &struct user_regset | ||
43 | * @target: thread being examined | ||
44 | * @regset: regset being examined | ||
45 | * @pos: offset into the regset data to access, in bytes | ||
46 | * @count: amount of data to copy, in bytes | ||
47 | * @kbuf: if not %NULL, a kernel-space pointer to copy into | ||
48 | * @ubuf: if @kbuf is %NULL, a user-space pointer to copy into | ||
49 | * | ||
50 | * Fetch register values. Return %0 on success; -%EIO or -%ENODEV | ||
51 | * are usual failure returns. The @pos and @count values are in | ||
52 | * bytes, but must be properly aligned. If @kbuf is non-null, that | ||
53 | * buffer is used and @ubuf is ignored. If @kbuf is %NULL, then | ||
54 | * ubuf gives a userland pointer to access directly, and an -%EFAULT | ||
55 | * return value is possible. | ||
56 | */ | ||
57 | typedef int user_regset_get_fn(struct task_struct *target, | ||
58 | const struct user_regset *regset, | ||
59 | unsigned int pos, unsigned int count, | ||
60 | void *kbuf, void __user *ubuf); | ||
61 | |||
62 | /** | ||
63 | * user_regset_set_fn - type of @set function in &struct user_regset | ||
64 | * @target: thread being examined | ||
65 | * @regset: regset being examined | ||
66 | * @pos: offset into the regset data to access, in bytes | ||
67 | * @count: amount of data to copy, in bytes | ||
68 | * @kbuf: if not %NULL, a kernel-space pointer to copy from | ||
69 | * @ubuf: if @kbuf is %NULL, a user-space pointer to copy from | ||
70 | * | ||
71 | * Store register values. Return %0 on success; -%EIO or -%ENODEV | ||
72 | * are usual failure returns. The @pos and @count values are in | ||
73 | * bytes, but must be properly aligned. If @kbuf is non-null, that | ||
74 | * buffer is used and @ubuf is ignored. If @kbuf is %NULL, then | ||
75 | * ubuf gives a userland pointer to access directly, and an -%EFAULT | ||
76 | * return value is possible. | ||
77 | */ | ||
78 | typedef int user_regset_set_fn(struct task_struct *target, | ||
79 | const struct user_regset *regset, | ||
80 | unsigned int pos, unsigned int count, | ||
81 | const void *kbuf, const void __user *ubuf); | ||
82 | |||
83 | /** | ||
84 | * user_regset_writeback_fn - type of @writeback function in &struct user_regset | ||
85 | * @target: thread being examined | ||
86 | * @regset: regset being examined | ||
87 | * @immediate: zero if writeback at completion of next context switch is OK | ||
88 | * | ||
89 | * This call is optional; usually the pointer is %NULL. When | ||
90 | * provided, there is some user memory associated with this regset's | ||
91 | * hardware, such as memory backing cached register data on register | ||
92 | * window machines; the regset's data controls what user memory is | ||
93 | * used (e.g. via the stack pointer value). | ||
94 | * | ||
95 | * Write register data back to user memory. If the @immediate flag | ||
96 | * is nonzero, it must be written to the user memory so uaccess or | ||
97 | * access_process_vm() can see it when this call returns; if zero, | ||
98 | * then it must be written back by the time the task completes a | ||
99 | * context switch (as synchronized with wait_task_inactive()). | ||
100 | * Return %0 on success or if there was nothing to do, -%EFAULT for | ||
101 | * a memory problem (bad stack pointer or whatever), or -%EIO for a | ||
102 | * hardware problem. | ||
103 | */ | ||
104 | typedef int user_regset_writeback_fn(struct task_struct *target, | ||
105 | const struct user_regset *regset, | ||
106 | int immediate); | ||
107 | |||
108 | /** | ||
109 | * struct user_regset - accessible thread CPU state | ||
110 | * @n: Number of slots (registers). | ||
111 | * @size: Size in bytes of a slot (register). | ||
112 | * @align: Required alignment, in bytes. | ||
113 | * @bias: Bias from natural indexing. | ||
114 | * @core_note_type: ELF note @n_type value used in core dumps. | ||
115 | * @get: Function to fetch values. | ||
116 | * @set: Function to store values. | ||
117 | * @active: Function to report if regset is active, or %NULL. | ||
118 | * @writeback: Function to write data back to user memory, or %NULL. | ||
119 | * | ||
120 | * This data structure describes a machine resource we call a register set. | ||
121 | * This is part of the state of an individual thread, not necessarily | ||
122 | * actual CPU registers per se. A register set consists of a number of | ||
123 | * similar slots, given by @n. Each slot is @size bytes, and aligned to | ||
124 | * @align bytes (which is at least @size). | ||
125 | * | ||
126 | * These functions must be called only on the current thread or on a | ||
127 | * thread that is in %TASK_STOPPED or %TASK_TRACED state, that we are | ||
128 | * guaranteed will not be woken up and return to user mode, and that we | ||
129 | * have called wait_task_inactive() on. (The target thread always might | ||
130 | * wake up for SIGKILL while these functions are working, in which case | ||
131 | * that thread's user_regset state might be scrambled.) | ||
132 | * | ||
133 | * The @pos argument must be aligned according to @align; the @count | ||
134 | * argument must be a multiple of @size. These functions are not | ||
135 | * responsible for checking for invalid arguments. | ||
136 | * | ||
137 | * When there is a natural value to use as an index, @bias gives the | ||
138 | * difference between the natural index and the slot index for the | ||
139 | * register set. For example, x86 GDT segment descriptors form a regset; | ||
140 | * the segment selector produces a natural index, but only a subset of | ||
141 | * that index space is available as a regset (the TLS slots); subtracting | ||
142 | * @bias from a segment selector index value computes the regset slot. | ||
143 | * | ||
144 | * If nonzero, @core_note_type gives the n_type field (NT_* value) | ||
145 | * of the core file note in which this regset's data appears. | ||
146 | * NT_PRSTATUS is a special case in that the regset data starts at | ||
147 | * offsetof(struct elf_prstatus, pr_reg) into the note data; that is | ||
148 | * part of the per-machine ELF formats userland knows about. In | ||
149 | * other cases, the core file note contains exactly the whole regset | ||
150 | * (@n * @size) and nothing else. The core file note is normally | ||
151 | * omitted when there is an @active function and it returns zero. | ||
152 | */ | ||
153 | struct user_regset { | ||
154 | user_regset_get_fn *get; | ||
155 | user_regset_set_fn *set; | ||
156 | user_regset_active_fn *active; | ||
157 | user_regset_writeback_fn *writeback; | ||
158 | unsigned int n; | ||
159 | unsigned int size; | ||
160 | unsigned int align; | ||
161 | unsigned int bias; | ||
162 | unsigned int core_note_type; | ||
163 | }; | ||
164 | |||
165 | /** | ||
166 | * struct user_regset_view - available regsets | ||
167 | * @name: Identifier, e.g. UTS_MACHINE string. | ||
168 | * @regsets: Array of @n regsets available in this view. | ||
169 | * @n: Number of elements in @regsets. | ||
170 | * @e_machine: ELF header @e_machine %EM_* value written in core dumps. | ||
171 | * @e_flags: ELF header @e_flags value written in core dumps. | ||
172 | * @ei_osabi: ELF header @e_ident[%EI_OSABI] value written in core dumps. | ||
173 | * | ||
174 | * A regset view is a collection of regsets (&struct user_regset, | ||
175 | * above). This describes all the state of a thread that can be seen | ||
176 | * from a given architecture/ABI environment. More than one view might | ||
177 | * refer to the same &struct user_regset, or more than one regset | ||
178 | * might refer to the same machine-specific state in the thread. For | ||
179 | * example, a 32-bit thread's state could be examined from the 32-bit | ||
180 | * view or from the 64-bit view. Either method reaches the same thread | ||
181 | * register state, doing appropriate widening or truncation. | ||
182 | */ | ||
183 | struct user_regset_view { | ||
184 | const char *name; | ||
185 | const struct user_regset *regsets; | ||
186 | unsigned int n; | ||
187 | u32 e_flags; | ||
188 | u16 e_machine; | ||
189 | u8 ei_osabi; | ||
190 | }; | ||
191 | |||
192 | /* | ||
193 | * This is documented here rather than at the definition sites because its | ||
194 | * implementation is machine-dependent but its interface is universal. | ||
195 | */ | ||
196 | /** | ||
197 | * task_user_regset_view - Return the process's native regset view. | ||
198 | * @tsk: a thread of the process in question | ||
199 | * | ||
200 | * Return the &struct user_regset_view that is native for the given process. | ||
201 | * For example, what it would access when it called ptrace(). | ||
202 | * Throughout the life of the process, this only changes at exec. | ||
203 | */ | ||
204 | const struct user_regset_view *task_user_regset_view(struct task_struct *tsk); | ||
205 | |||
206 | |||
207 | /* | ||
208 | * These are helpers for writing regset get/set functions in arch code. | ||
209 | * Because @start_pos and @end_pos are always compile-time constants, | ||
210 | * these are inlined into very little code though they look large. | ||
211 | * | ||
212 | * Use one or more calls sequentially for each chunk of regset data stored | ||
213 | * contiguously in memory. Call with constants for @start_pos and @end_pos, | ||
214 | * giving the range of byte positions in the regset that data corresponds | ||
215 | * to; @end_pos can be -1 if this chunk is at the end of the regset layout. | ||
216 | * Each call updates the arguments to point past its chunk. | ||
217 | */ | ||
218 | |||
219 | static inline int user_regset_copyout(unsigned int *pos, unsigned int *count, | ||
220 | void **kbuf, | ||
221 | void __user **ubuf, const void *data, | ||
222 | const int start_pos, const int end_pos) | ||
223 | { | ||
224 | if (*count == 0) | ||
225 | return 0; | ||
226 | BUG_ON(*pos < start_pos); | ||
227 | if (end_pos < 0 || *pos < end_pos) { | ||
228 | unsigned int copy = (end_pos < 0 ? *count | ||
229 | : min(*count, end_pos - *pos)); | ||
230 | data += *pos - start_pos; | ||
231 | if (*kbuf) { | ||
232 | memcpy(*kbuf, data, copy); | ||
233 | *kbuf += copy; | ||
234 | } else if (__copy_to_user(*ubuf, data, copy)) | ||
235 | return -EFAULT; | ||
236 | else | ||
237 | *ubuf += copy; | ||
238 | *pos += copy; | ||
239 | *count -= copy; | ||
240 | } | ||
241 | return 0; | ||
242 | } | ||
243 | |||
244 | static inline int user_regset_copyin(unsigned int *pos, unsigned int *count, | ||
245 | const void **kbuf, | ||
246 | const void __user **ubuf, void *data, | ||
247 | const int start_pos, const int end_pos) | ||
248 | { | ||
249 | if (*count == 0) | ||
250 | return 0; | ||
251 | BUG_ON(*pos < start_pos); | ||
252 | if (end_pos < 0 || *pos < end_pos) { | ||
253 | unsigned int copy = (end_pos < 0 ? *count | ||
254 | : min(*count, end_pos - *pos)); | ||
255 | data += *pos - start_pos; | ||
256 | if (*kbuf) { | ||
257 | memcpy(data, *kbuf, copy); | ||
258 | *kbuf += copy; | ||
259 | } else if (__copy_from_user(data, *ubuf, copy)) | ||
260 | return -EFAULT; | ||
261 | else | ||
262 | *ubuf += copy; | ||
263 | *pos += copy; | ||
264 | *count -= copy; | ||
265 | } | ||
266 | return 0; | ||
267 | } | ||
268 | |||
269 | /* | ||
270 | * These two parallel the two above, but for portions of a regset layout | ||
271 | * that always read as all-zero or for which writes are ignored. | ||
272 | */ | ||
273 | static inline int user_regset_copyout_zero(unsigned int *pos, | ||
274 | unsigned int *count, | ||
275 | void **kbuf, void __user **ubuf, | ||
276 | const int start_pos, | ||
277 | const int end_pos) | ||
278 | { | ||
279 | if (*count == 0) | ||
280 | return 0; | ||
281 | BUG_ON(*pos < start_pos); | ||
282 | if (end_pos < 0 || *pos < end_pos) { | ||
283 | unsigned int copy = (end_pos < 0 ? *count | ||
284 | : min(*count, end_pos - *pos)); | ||
285 | if (*kbuf) { | ||
286 | memset(*kbuf, 0, copy); | ||
287 | *kbuf += copy; | ||
288 | } else if (__clear_user(*ubuf, copy)) | ||
289 | return -EFAULT; | ||
290 | else | ||
291 | *ubuf += copy; | ||
292 | *pos += copy; | ||
293 | *count -= copy; | ||
294 | } | ||
295 | return 0; | ||
296 | } | ||
297 | |||
298 | static inline int user_regset_copyin_ignore(unsigned int *pos, | ||
299 | unsigned int *count, | ||
300 | const void **kbuf, | ||
301 | const void __user **ubuf, | ||
302 | const int start_pos, | ||
303 | const int end_pos) | ||
304 | { | ||
305 | if (*count == 0) | ||
306 | return 0; | ||
307 | BUG_ON(*pos < start_pos); | ||
308 | if (end_pos < 0 || *pos < end_pos) { | ||
309 | unsigned int copy = (end_pos < 0 ? *count | ||
310 | : min(*count, end_pos - *pos)); | ||
311 | if (*kbuf) | ||
312 | *kbuf += copy; | ||
313 | else | ||
314 | *ubuf += copy; | ||
315 | *pos += copy; | ||
316 | *count -= copy; | ||
317 | } | ||
318 | return 0; | ||
319 | } | ||
320 | |||
321 | /** | ||
322 | * copy_regset_to_user - fetch a thread's user_regset data into user memory | ||
323 | * @target: thread to be examined | ||
324 | * @view: &struct user_regset_view describing user thread machine state | ||
325 | * @setno: index in @view->regsets | ||
326 | * @offset: offset into the regset data, in bytes | ||
327 | * @size: amount of data to copy, in bytes | ||
328 | * @data: user-mode pointer to copy into | ||
329 | */ | ||
330 | static inline int copy_regset_to_user(struct task_struct *target, | ||
331 | const struct user_regset_view *view, | ||
332 | unsigned int setno, | ||
333 | unsigned int offset, unsigned int size, | ||
334 | void __user *data) | ||
335 | { | ||
336 | const struct user_regset *regset = &view->regsets[setno]; | ||
337 | |||
338 | if (!access_ok(VERIFY_WRITE, data, size)) | ||
339 | return -EIO; | ||
340 | |||
341 | return regset->get(target, regset, offset, size, NULL, data); | ||
342 | } | ||
343 | |||
344 | /** | ||
345 | * copy_regset_from_user - store into thread's user_regset data from user memory | ||
346 | * @target: thread to be examined | ||
347 | * @view: &struct user_regset_view describing user thread machine state | ||
348 | * @setno: index in @view->regsets | ||
349 | * @offset: offset into the regset data, in bytes | ||
350 | * @size: amount of data to copy, in bytes | ||
351 | * @data: user-mode pointer to copy from | ||
352 | */ | ||
353 | static inline int copy_regset_from_user(struct task_struct *target, | ||
354 | const struct user_regset_view *view, | ||
355 | unsigned int setno, | ||
356 | unsigned int offset, unsigned int size, | ||
357 | const void __user *data) | ||
358 | { | ||
359 | const struct user_regset *regset = &view->regsets[setno]; | ||
360 | |||
361 | if (!access_ok(VERIFY_READ, data, size)) | ||
362 | return -EIO; | ||
363 | |||
364 | return regset->set(target, regset, offset, size, NULL, data); | ||
365 | } | ||
366 | |||
367 | |||
368 | #endif /* <linux/regset.h> */ | ||
diff --git a/include/linux/sched.h b/include/linux/sched.h index 2d0546e884ea..9d4797609aa5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1922,23 +1922,16 @@ extern int cond_resched_softirq(void); | |||
1922 | 1922 | ||
1923 | /* | 1923 | /* |
1924 | * Does a critical section need to be broken due to another | 1924 | * Does a critical section need to be broken due to another |
1925 | * task waiting?: | 1925 | * task waiting?: (technically does not depend on CONFIG_PREEMPT, |
1926 | * but a general need for low latency) | ||
1926 | */ | 1927 | */ |
1927 | #if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) | 1928 | static inline int spin_needbreak(spinlock_t *lock) |
1928 | # define need_lockbreak(lock) ((lock)->break_lock) | ||
1929 | #else | ||
1930 | # define need_lockbreak(lock) 0 | ||
1931 | #endif | ||
1932 | |||
1933 | /* | ||
1934 | * Does a critical section need to be broken due to another | ||
1935 | * task waiting or preemption being signalled: | ||
1936 | */ | ||
1937 | static inline int lock_need_resched(spinlock_t *lock) | ||
1938 | { | 1929 | { |
1939 | if (need_lockbreak(lock) || need_resched()) | 1930 | #ifdef CONFIG_PREEMPT |
1940 | return 1; | 1931 | return spin_is_contended(lock); |
1932 | #else | ||
1941 | return 0; | 1933 | return 0; |
1934 | #endif | ||
1942 | } | 1935 | } |
1943 | 1936 | ||
1944 | /* | 1937 | /* |
diff --git a/include/linux/smp.h b/include/linux/smp.h index c25e66bcecf3..55232ccf9cfd 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h | |||
@@ -78,6 +78,8 @@ int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait); | |||
78 | */ | 78 | */ |
79 | void smp_prepare_boot_cpu(void); | 79 | void smp_prepare_boot_cpu(void); |
80 | 80 | ||
81 | extern unsigned int setup_max_cpus; | ||
82 | |||
81 | #else /* !SMP */ | 83 | #else /* !SMP */ |
82 | 84 | ||
83 | /* | 85 | /* |
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index c376f3b36c89..124449733c55 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
@@ -120,6 +120,12 @@ do { \ | |||
120 | 120 | ||
121 | #define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) | 121 | #define spin_is_locked(lock) __raw_spin_is_locked(&(lock)->raw_lock) |
122 | 122 | ||
123 | #ifdef CONFIG_GENERIC_LOCKBREAK | ||
124 | #define spin_is_contended(lock) ((lock)->break_lock) | ||
125 | #else | ||
126 | #define spin_is_contended(lock) __raw_spin_is_contended(&(lock)->raw_lock) | ||
127 | #endif | ||
128 | |||
123 | /** | 129 | /** |
124 | * spin_unlock_wait - wait until the spinlock gets unlocked | 130 | * spin_unlock_wait - wait until the spinlock gets unlocked |
125 | * @lock: the spinlock in question. | 131 | * @lock: the spinlock in question. |
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h index f6a3a951b79e..68d88f71f1a2 100644 --- a/include/linux/spinlock_types.h +++ b/include/linux/spinlock_types.h | |||
@@ -19,7 +19,7 @@ | |||
19 | 19 | ||
20 | typedef struct { | 20 | typedef struct { |
21 | raw_spinlock_t raw_lock; | 21 | raw_spinlock_t raw_lock; |
22 | #if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) | 22 | #ifdef CONFIG_GENERIC_LOCKBREAK |
23 | unsigned int break_lock; | 23 | unsigned int break_lock; |
24 | #endif | 24 | #endif |
25 | #ifdef CONFIG_DEBUG_SPINLOCK | 25 | #ifdef CONFIG_DEBUG_SPINLOCK |
@@ -35,7 +35,7 @@ typedef struct { | |||
35 | 35 | ||
36 | typedef struct { | 36 | typedef struct { |
37 | raw_rwlock_t raw_lock; | 37 | raw_rwlock_t raw_lock; |
38 | #if defined(CONFIG_PREEMPT) && defined(CONFIG_SMP) | 38 | #ifdef CONFIG_GENERIC_LOCKBREAK |
39 | unsigned int break_lock; | 39 | unsigned int break_lock; |
40 | #endif | 40 | #endif |
41 | #ifdef CONFIG_DEBUG_SPINLOCK | 41 | #ifdef CONFIG_DEBUG_SPINLOCK |
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index ea54c4c9a4ec..938234c4a996 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h | |||
@@ -64,6 +64,8 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) | |||
64 | # define __raw_spin_trylock(lock) ({ (void)(lock); 1; }) | 64 | # define __raw_spin_trylock(lock) ({ (void)(lock); 1; }) |
65 | #endif /* DEBUG_SPINLOCK */ | 65 | #endif /* DEBUG_SPINLOCK */ |
66 | 66 | ||
67 | #define __raw_spin_is_contended(lock) (((void)(lock), 0)) | ||
68 | |||
67 | #define __raw_read_can_lock(lock) (((void)(lock), 1)) | 69 | #define __raw_read_can_lock(lock) (((void)(lock), 1)) |
68 | #define __raw_write_can_lock(lock) (((void)(lock), 1)) | 70 | #define __raw_write_can_lock(lock) (((void)(lock), 1)) |
69 | 71 | ||
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index d9d5c5ad826c..3e9addc741c1 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h | |||
@@ -46,6 +46,7 @@ struct rpc_clnt { | |||
46 | cl_autobind : 1;/* use getport() */ | 46 | cl_autobind : 1;/* use getport() */ |
47 | 47 | ||
48 | struct rpc_rtt * cl_rtt; /* RTO estimator data */ | 48 | struct rpc_rtt * cl_rtt; /* RTO estimator data */ |
49 | const struct rpc_timeout *cl_timeout; /* Timeout strategy */ | ||
49 | 50 | ||
50 | int cl_nodelen; /* nodename length */ | 51 | int cl_nodelen; /* nodename length */ |
51 | char cl_nodename[UNX_MAXNODENAME]; | 52 | char cl_nodename[UNX_MAXNODENAME]; |
@@ -54,6 +55,7 @@ struct rpc_clnt { | |||
54 | struct dentry * cl_dentry; /* inode */ | 55 | struct dentry * cl_dentry; /* inode */ |
55 | struct rpc_clnt * cl_parent; /* Points to parent of clones */ | 56 | struct rpc_clnt * cl_parent; /* Points to parent of clones */ |
56 | struct rpc_rtt cl_rtt_default; | 57 | struct rpc_rtt cl_rtt_default; |
58 | struct rpc_timeout cl_timeout_default; | ||
57 | struct rpc_program * cl_program; | 59 | struct rpc_program * cl_program; |
58 | char cl_inline_name[32]; | 60 | char cl_inline_name[32]; |
59 | }; | 61 | }; |
@@ -99,7 +101,7 @@ struct rpc_create_args { | |||
99 | struct sockaddr *address; | 101 | struct sockaddr *address; |
100 | size_t addrsize; | 102 | size_t addrsize; |
101 | struct sockaddr *saddress; | 103 | struct sockaddr *saddress; |
102 | struct rpc_timeout *timeout; | 104 | const struct rpc_timeout *timeout; |
103 | char *servername; | 105 | char *servername; |
104 | struct rpc_program *program; | 106 | struct rpc_program *program; |
105 | u32 version; | 107 | u32 version; |
@@ -123,11 +125,10 @@ void rpc_shutdown_client(struct rpc_clnt *); | |||
123 | void rpc_release_client(struct rpc_clnt *); | 125 | void rpc_release_client(struct rpc_clnt *); |
124 | 126 | ||
125 | int rpcb_register(u32, u32, int, unsigned short, int *); | 127 | int rpcb_register(u32, u32, int, unsigned short, int *); |
126 | int rpcb_getport_sync(struct sockaddr_in *, __u32, __u32, int); | 128 | int rpcb_getport_sync(struct sockaddr_in *, u32, u32, int); |
127 | void rpcb_getport_async(struct rpc_task *); | 129 | void rpcb_getport_async(struct rpc_task *); |
128 | 130 | ||
129 | void rpc_call_setup(struct rpc_task *, struct rpc_message *, int); | 131 | void rpc_call_start(struct rpc_task *); |
130 | |||
131 | int rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, | 132 | int rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, |
132 | int flags, const struct rpc_call_ops *tk_ops, | 133 | int flags, const struct rpc_call_ops *tk_ops, |
133 | void *calldata); | 134 | void *calldata); |
@@ -142,7 +143,7 @@ void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int); | |||
142 | size_t rpc_max_payload(struct rpc_clnt *); | 143 | size_t rpc_max_payload(struct rpc_clnt *); |
143 | void rpc_force_rebind(struct rpc_clnt *); | 144 | void rpc_force_rebind(struct rpc_clnt *); |
144 | size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t); | 145 | size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t); |
145 | char * rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t); | 146 | const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t); |
146 | 147 | ||
147 | #endif /* __KERNEL__ */ | 148 | #endif /* __KERNEL__ */ |
148 | #endif /* _LINUX_SUNRPC_CLNT_H */ | 149 | #endif /* _LINUX_SUNRPC_CLNT_H */ |
diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h index c4beb5775111..70df4f1d8847 100644 --- a/include/linux/sunrpc/msg_prot.h +++ b/include/linux/sunrpc/msg_prot.h | |||
@@ -152,5 +152,44 @@ typedef __be32 rpc_fraghdr; | |||
152 | */ | 152 | */ |
153 | #define RPCBIND_MAXNETIDLEN (4u) | 153 | #define RPCBIND_MAXNETIDLEN (4u) |
154 | 154 | ||
155 | /* | ||
156 | * Universal addresses are introduced in RFC 1833 and further spelled | ||
157 | * out in RFC 3530. RPCBIND_MAXUADDRLEN defines a maximum byte length | ||
158 | * of a universal address for use in allocating buffers and character | ||
159 | * arrays. | ||
160 | * | ||
161 | * Quoting RFC 3530, section 2.2: | ||
162 | * | ||
163 | * For TCP over IPv4 and for UDP over IPv4, the format of r_addr is the | ||
164 | * US-ASCII string: | ||
165 | * | ||
166 | * h1.h2.h3.h4.p1.p2 | ||
167 | * | ||
168 | * The prefix, "h1.h2.h3.h4", is the standard textual form for | ||
169 | * representing an IPv4 address, which is always four octets long. | ||
170 | * Assuming big-endian ordering, h1, h2, h3, and h4, are respectively, | ||
171 | * the first through fourth octets each converted to ASCII-decimal. | ||
172 | * Assuming big-endian ordering, p1 and p2 are, respectively, the first | ||
173 | * and second octets each converted to ASCII-decimal. For example, if a | ||
174 | * host, in big-endian order, has an address of 0x0A010307 and there is | ||
175 | * a service listening on, in big endian order, port 0x020F (decimal | ||
176 | * 527), then the complete universal address is "10.1.3.7.2.15". | ||
177 | * | ||
178 | * ... | ||
179 | * | ||
180 | * For TCP over IPv6 and for UDP over IPv6, the format of r_addr is the | ||
181 | * US-ASCII string: | ||
182 | * | ||
183 | * x1:x2:x3:x4:x5:x6:x7:x8.p1.p2 | ||
184 | * | ||
185 | * The suffix "p1.p2" is the service port, and is computed the same way | ||
186 | * as with universal addresses for TCP and UDP over IPv4. The prefix, | ||
187 | * "x1:x2:x3:x4:x5:x6:x7:x8", is the standard textual form for | ||
188 | * representing an IPv6 address as defined in Section 2.2 of [RFC2373]. | ||
189 | * Additionally, the two alternative forms specified in Section 2.2 of | ||
190 | * [RFC2373] are also acceptable. | ||
191 | */ | ||
192 | #define RPCBIND_MAXUADDRLEN (56u) | ||
193 | |||
155 | #endif /* __KERNEL__ */ | 194 | #endif /* __KERNEL__ */ |
156 | #endif /* _LINUX_SUNRPC_MSGPROT_H_ */ | 195 | #endif /* _LINUX_SUNRPC_MSGPROT_H_ */ |
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 8ea077db0099..ce3d1b132729 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h | |||
@@ -56,8 +56,6 @@ struct rpc_task { | |||
56 | __u8 tk_garb_retry; | 56 | __u8 tk_garb_retry; |
57 | __u8 tk_cred_retry; | 57 | __u8 tk_cred_retry; |
58 | 58 | ||
59 | unsigned long tk_cookie; /* Cookie for batching tasks */ | ||
60 | |||
61 | /* | 59 | /* |
62 | * timeout_fn to be executed by timer bottom half | 60 | * timeout_fn to be executed by timer bottom half |
63 | * callback to be executed after waking up | 61 | * callback to be executed after waking up |
@@ -78,7 +76,6 @@ struct rpc_task { | |||
78 | struct timer_list tk_timer; /* kernel timer */ | 76 | struct timer_list tk_timer; /* kernel timer */ |
79 | unsigned long tk_timeout; /* timeout for rpc_sleep() */ | 77 | unsigned long tk_timeout; /* timeout for rpc_sleep() */ |
80 | unsigned short tk_flags; /* misc flags */ | 78 | unsigned short tk_flags; /* misc flags */ |
81 | unsigned char tk_priority : 2;/* Task priority */ | ||
82 | unsigned long tk_runstate; /* Task run status */ | 79 | unsigned long tk_runstate; /* Task run status */ |
83 | struct workqueue_struct *tk_workqueue; /* Normally rpciod, but could | 80 | struct workqueue_struct *tk_workqueue; /* Normally rpciod, but could |
84 | * be any workqueue | 81 | * be any workqueue |
@@ -94,6 +91,9 @@ struct rpc_task { | |||
94 | unsigned long tk_start; /* RPC task init timestamp */ | 91 | unsigned long tk_start; /* RPC task init timestamp */ |
95 | long tk_rtt; /* round-trip time (jiffies) */ | 92 | long tk_rtt; /* round-trip time (jiffies) */ |
96 | 93 | ||
94 | pid_t tk_owner; /* Process id for batching tasks */ | ||
95 | unsigned char tk_priority : 2;/* Task priority */ | ||
96 | |||
97 | #ifdef RPC_DEBUG | 97 | #ifdef RPC_DEBUG |
98 | unsigned short tk_pid; /* debugging aid */ | 98 | unsigned short tk_pid; /* debugging aid */ |
99 | #endif | 99 | #endif |
@@ -117,6 +117,15 @@ struct rpc_call_ops { | |||
117 | void (*rpc_release)(void *); | 117 | void (*rpc_release)(void *); |
118 | }; | 118 | }; |
119 | 119 | ||
120 | struct rpc_task_setup { | ||
121 | struct rpc_task *task; | ||
122 | struct rpc_clnt *rpc_client; | ||
123 | const struct rpc_message *rpc_message; | ||
124 | const struct rpc_call_ops *callback_ops; | ||
125 | void *callback_data; | ||
126 | unsigned short flags; | ||
127 | signed char priority; | ||
128 | }; | ||
120 | 129 | ||
121 | /* | 130 | /* |
122 | * RPC task flags | 131 | * RPC task flags |
@@ -180,10 +189,10 @@ struct rpc_call_ops { | |||
180 | * Note: if you change these, you must also change | 189 | * Note: if you change these, you must also change |
181 | * the task initialization definitions below. | 190 | * the task initialization definitions below. |
182 | */ | 191 | */ |
183 | #define RPC_PRIORITY_LOW 0 | 192 | #define RPC_PRIORITY_LOW (-1) |
184 | #define RPC_PRIORITY_NORMAL 1 | 193 | #define RPC_PRIORITY_NORMAL (0) |
185 | #define RPC_PRIORITY_HIGH 2 | 194 | #define RPC_PRIORITY_HIGH (1) |
186 | #define RPC_NR_PRIORITY (RPC_PRIORITY_HIGH+1) | 195 | #define RPC_NR_PRIORITY (1 + RPC_PRIORITY_HIGH - RPC_PRIORITY_LOW) |
187 | 196 | ||
188 | /* | 197 | /* |
189 | * RPC synchronization objects | 198 | * RPC synchronization objects |
@@ -191,7 +200,7 @@ struct rpc_call_ops { | |||
191 | struct rpc_wait_queue { | 200 | struct rpc_wait_queue { |
192 | spinlock_t lock; | 201 | spinlock_t lock; |
193 | struct list_head tasks[RPC_NR_PRIORITY]; /* task queue for each priority level */ | 202 | struct list_head tasks[RPC_NR_PRIORITY]; /* task queue for each priority level */ |
194 | unsigned long cookie; /* cookie of last task serviced */ | 203 | pid_t owner; /* process id of last task serviced */ |
195 | unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */ | 204 | unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */ |
196 | unsigned char priority; /* current priority */ | 205 | unsigned char priority; /* current priority */ |
197 | unsigned char count; /* # task groups remaining serviced so far */ | 206 | unsigned char count; /* # task groups remaining serviced so far */ |
@@ -208,41 +217,13 @@ struct rpc_wait_queue { | |||
208 | * performance of NFS operations such as read/write. | 217 | * performance of NFS operations such as read/write. |
209 | */ | 218 | */ |
210 | #define RPC_BATCH_COUNT 16 | 219 | #define RPC_BATCH_COUNT 16 |
211 | |||
212 | #ifndef RPC_DEBUG | ||
213 | # define RPC_WAITQ_INIT(var,qname) { \ | ||
214 | .lock = __SPIN_LOCK_UNLOCKED(var.lock), \ | ||
215 | .tasks = { \ | ||
216 | [0] = LIST_HEAD_INIT(var.tasks[0]), \ | ||
217 | [1] = LIST_HEAD_INIT(var.tasks[1]), \ | ||
218 | [2] = LIST_HEAD_INIT(var.tasks[2]), \ | ||
219 | }, \ | ||
220 | } | ||
221 | #else | ||
222 | # define RPC_WAITQ_INIT(var,qname) { \ | ||
223 | .lock = __SPIN_LOCK_UNLOCKED(var.lock), \ | ||
224 | .tasks = { \ | ||
225 | [0] = LIST_HEAD_INIT(var.tasks[0]), \ | ||
226 | [1] = LIST_HEAD_INIT(var.tasks[1]), \ | ||
227 | [2] = LIST_HEAD_INIT(var.tasks[2]), \ | ||
228 | }, \ | ||
229 | .name = qname, \ | ||
230 | } | ||
231 | #endif | ||
232 | # define RPC_WAITQ(var,qname) struct rpc_wait_queue var = RPC_WAITQ_INIT(var,qname) | ||
233 | |||
234 | #define RPC_IS_PRIORITY(q) ((q)->maxpriority > 0) | 220 | #define RPC_IS_PRIORITY(q) ((q)->maxpriority > 0) |
235 | 221 | ||
236 | /* | 222 | /* |
237 | * Function prototypes | 223 | * Function prototypes |
238 | */ | 224 | */ |
239 | struct rpc_task *rpc_new_task(struct rpc_clnt *, int flags, | 225 | struct rpc_task *rpc_new_task(const struct rpc_task_setup *); |
240 | const struct rpc_call_ops *ops, void *data); | 226 | struct rpc_task *rpc_run_task(const struct rpc_task_setup *); |
241 | struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags, | ||
242 | const struct rpc_call_ops *ops, void *data); | ||
243 | void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, | ||
244 | int flags, const struct rpc_call_ops *ops, | ||
245 | void *data); | ||
246 | void rpc_put_task(struct rpc_task *); | 227 | void rpc_put_task(struct rpc_task *); |
247 | void rpc_exit_task(struct rpc_task *); | 228 | void rpc_exit_task(struct rpc_task *); |
248 | void rpc_release_calldata(const struct rpc_call_ops *, void *); | 229 | void rpc_release_calldata(const struct rpc_call_ops *, void *); |
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index 30b17b3bc1a9..b3ff9a815e6f 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h | |||
@@ -120,7 +120,7 @@ struct rpc_xprt { | |||
120 | struct kref kref; /* Reference count */ | 120 | struct kref kref; /* Reference count */ |
121 | struct rpc_xprt_ops * ops; /* transport methods */ | 121 | struct rpc_xprt_ops * ops; /* transport methods */ |
122 | 122 | ||
123 | struct rpc_timeout timeout; /* timeout parms */ | 123 | const struct rpc_timeout *timeout; /* timeout parms */ |
124 | struct sockaddr_storage addr; /* server address */ | 124 | struct sockaddr_storage addr; /* server address */ |
125 | size_t addrlen; /* size of server address */ | 125 | size_t addrlen; /* size of server address */ |
126 | int prot; /* IP protocol */ | 126 | int prot; /* IP protocol */ |
@@ -183,7 +183,7 @@ struct rpc_xprt { | |||
183 | bklog_u; /* backlog queue utilization */ | 183 | bklog_u; /* backlog queue utilization */ |
184 | } stat; | 184 | } stat; |
185 | 185 | ||
186 | char * address_strings[RPC_DISPLAY_MAX]; | 186 | const char *address_strings[RPC_DISPLAY_MAX]; |
187 | }; | 187 | }; |
188 | 188 | ||
189 | struct xprt_create { | 189 | struct xprt_create { |
@@ -191,7 +191,6 @@ struct xprt_create { | |||
191 | struct sockaddr * srcaddr; /* optional local address */ | 191 | struct sockaddr * srcaddr; /* optional local address */ |
192 | struct sockaddr * dstaddr; /* remote peer address */ | 192 | struct sockaddr * dstaddr; /* remote peer address */ |
193 | size_t addrlen; | 193 | size_t addrlen; |
194 | struct rpc_timeout * timeout; /* optional timeout parameters */ | ||
195 | }; | 194 | }; |
196 | 195 | ||
197 | struct xprt_class { | 196 | struct xprt_class { |
@@ -203,11 +202,6 @@ struct xprt_class { | |||
203 | }; | 202 | }; |
204 | 203 | ||
205 | /* | 204 | /* |
206 | * Transport operations used by ULPs | ||
207 | */ | ||
208 | void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr); | ||
209 | |||
210 | /* | ||
211 | * Generic internal transport functions | 205 | * Generic internal transport functions |
212 | */ | 206 | */ |
213 | struct rpc_xprt *xprt_create_transport(struct xprt_create *args); | 207 | struct rpc_xprt *xprt_create_transport(struct xprt_create *args); |
@@ -245,7 +239,8 @@ void xprt_adjust_cwnd(struct rpc_task *task, int result); | |||
245 | struct rpc_rqst * xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid); | 239 | struct rpc_rqst * xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid); |
246 | void xprt_complete_rqst(struct rpc_task *task, int copied); | 240 | void xprt_complete_rqst(struct rpc_task *task, int copied); |
247 | void xprt_release_rqst_cong(struct rpc_task *task); | 241 | void xprt_release_rqst_cong(struct rpc_task *task); |
248 | void xprt_disconnect(struct rpc_xprt *xprt); | 242 | void xprt_disconnect_done(struct rpc_xprt *xprt); |
243 | void xprt_force_disconnect(struct rpc_xprt *xprt); | ||
249 | 244 | ||
250 | /* | 245 | /* |
251 | * Reserved bit positions in xprt->state | 246 | * Reserved bit positions in xprt->state |
@@ -256,6 +251,7 @@ void xprt_disconnect(struct rpc_xprt *xprt); | |||
256 | #define XPRT_CLOSE_WAIT (3) | 251 | #define XPRT_CLOSE_WAIT (3) |
257 | #define XPRT_BOUND (4) | 252 | #define XPRT_BOUND (4) |
258 | #define XPRT_BINDING (5) | 253 | #define XPRT_BINDING (5) |
254 | #define XPRT_CLOSING (6) | ||
259 | 255 | ||
260 | static inline void xprt_set_connected(struct rpc_xprt *xprt) | 256 | static inline void xprt_set_connected(struct rpc_xprt *xprt) |
261 | { | 257 | { |
diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 4360e0816956..40280df2a3db 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h | |||
@@ -211,9 +211,6 @@ static inline int hibernate(void) { return -ENOSYS; } | |||
211 | #ifdef CONFIG_PM_SLEEP | 211 | #ifdef CONFIG_PM_SLEEP |
212 | void save_processor_state(void); | 212 | void save_processor_state(void); |
213 | void restore_processor_state(void); | 213 | void restore_processor_state(void); |
214 | struct saved_context; | ||
215 | void __save_processor_state(struct saved_context *ctxt); | ||
216 | void __restore_processor_state(struct saved_context *ctxt); | ||
217 | 214 | ||
218 | /* kernel/power/main.c */ | 215 | /* kernel/power/main.c */ |
219 | extern struct blocking_notifier_head pm_chain_head; | 216 | extern struct blocking_notifier_head pm_chain_head; |
diff --git a/include/linux/swap.h b/include/linux/swap.h index 4f3838adbb30..2c3ce4c69b25 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/mmzone.h> | 6 | #include <linux/mmzone.h> |
7 | #include <linux/list.h> | 7 | #include <linux/list.h> |
8 | #include <linux/sched.h> | 8 | #include <linux/sched.h> |
9 | #include <linux/pagemap.h> | ||
9 | 10 | ||
10 | #include <asm/atomic.h> | 11 | #include <asm/atomic.h> |
11 | #include <asm/page.h> | 12 | #include <asm/page.h> |
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 9c4ad755d7e5..dfbdfb9836f4 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h | |||
@@ -42,27 +42,27 @@ extern long do_no_restart_syscall(struct restart_block *parm); | |||
42 | 42 | ||
43 | static inline void set_ti_thread_flag(struct thread_info *ti, int flag) | 43 | static inline void set_ti_thread_flag(struct thread_info *ti, int flag) |
44 | { | 44 | { |
45 | set_bit(flag,&ti->flags); | 45 | set_bit(flag, (unsigned long *)&ti->flags); |
46 | } | 46 | } |
47 | 47 | ||
48 | static inline void clear_ti_thread_flag(struct thread_info *ti, int flag) | 48 | static inline void clear_ti_thread_flag(struct thread_info *ti, int flag) |
49 | { | 49 | { |
50 | clear_bit(flag,&ti->flags); | 50 | clear_bit(flag, (unsigned long *)&ti->flags); |
51 | } | 51 | } |
52 | 52 | ||
53 | static inline int test_and_set_ti_thread_flag(struct thread_info *ti, int flag) | 53 | static inline int test_and_set_ti_thread_flag(struct thread_info *ti, int flag) |
54 | { | 54 | { |
55 | return test_and_set_bit(flag,&ti->flags); | 55 | return test_and_set_bit(flag, (unsigned long *)&ti->flags); |
56 | } | 56 | } |
57 | 57 | ||
58 | static inline int test_and_clear_ti_thread_flag(struct thread_info *ti, int flag) | 58 | static inline int test_and_clear_ti_thread_flag(struct thread_info *ti, int flag) |
59 | { | 59 | { |
60 | return test_and_clear_bit(flag,&ti->flags); | 60 | return test_and_clear_bit(flag, (unsigned long *)&ti->flags); |
61 | } | 61 | } |
62 | 62 | ||
63 | static inline int test_ti_thread_flag(struct thread_info *ti, int flag) | 63 | static inline int test_ti_thread_flag(struct thread_info *ti, int flag) |
64 | { | 64 | { |
65 | return test_bit(flag,&ti->flags); | 65 | return test_bit(flag, (unsigned long *)&ti->flags); |
66 | } | 66 | } |
67 | 67 | ||
68 | #define set_thread_flag(flag) \ | 68 | #define set_thread_flag(flag) \ |
diff --git a/include/linux/tick.h b/include/linux/tick.h index f4a1395e05ff..0fadf95debe1 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h | |||
@@ -51,8 +51,10 @@ struct tick_sched { | |||
51 | unsigned long idle_jiffies; | 51 | unsigned long idle_jiffies; |
52 | unsigned long idle_calls; | 52 | unsigned long idle_calls; |
53 | unsigned long idle_sleeps; | 53 | unsigned long idle_sleeps; |
54 | int idle_active; | ||
54 | ktime_t idle_entrytime; | 55 | ktime_t idle_entrytime; |
55 | ktime_t idle_sleeptime; | 56 | ktime_t idle_sleeptime; |
57 | ktime_t idle_lastupdate; | ||
56 | ktime_t sleep_length; | 58 | ktime_t sleep_length; |
57 | unsigned long last_jiffies; | 59 | unsigned long last_jiffies; |
58 | unsigned long next_jiffies; | 60 | unsigned long next_jiffies; |
@@ -103,6 +105,8 @@ extern void tick_nohz_stop_sched_tick(void); | |||
103 | extern void tick_nohz_restart_sched_tick(void); | 105 | extern void tick_nohz_restart_sched_tick(void); |
104 | extern void tick_nohz_update_jiffies(void); | 106 | extern void tick_nohz_update_jiffies(void); |
105 | extern ktime_t tick_nohz_get_sleep_length(void); | 107 | extern ktime_t tick_nohz_get_sleep_length(void); |
108 | extern void tick_nohz_stop_idle(int cpu); | ||
109 | extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time); | ||
106 | # else | 110 | # else |
107 | static inline void tick_nohz_stop_sched_tick(void) { } | 111 | static inline void tick_nohz_stop_sched_tick(void) { } |
108 | static inline void tick_nohz_restart_sched_tick(void) { } | 112 | static inline void tick_nohz_restart_sched_tick(void) { } |
@@ -113,6 +117,8 @@ static inline ktime_t tick_nohz_get_sleep_length(void) | |||
113 | 117 | ||
114 | return len; | 118 | return len; |
115 | } | 119 | } |
120 | static inline void tick_nohz_stop_idle(int cpu) { } | ||
121 | static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return 0; } | ||
116 | # endif /* !NO_HZ */ | 122 | # endif /* !NO_HZ */ |
117 | 123 | ||
118 | #endif | 124 | #endif |
diff --git a/include/linux/timer.h b/include/linux/timer.h index 78cf899b4409..de0e71359ede 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h | |||
@@ -5,7 +5,7 @@ | |||
5 | #include <linux/ktime.h> | 5 | #include <linux/ktime.h> |
6 | #include <linux/stddef.h> | 6 | #include <linux/stddef.h> |
7 | 7 | ||
8 | struct tvec_t_base_s; | 8 | struct tvec_base; |
9 | 9 | ||
10 | struct timer_list { | 10 | struct timer_list { |
11 | struct list_head entry; | 11 | struct list_head entry; |
@@ -14,7 +14,7 @@ struct timer_list { | |||
14 | void (*function)(unsigned long); | 14 | void (*function)(unsigned long); |
15 | unsigned long data; | 15 | unsigned long data; |
16 | 16 | ||
17 | struct tvec_t_base_s *base; | 17 | struct tvec_base *base; |
18 | #ifdef CONFIG_TIMER_STATS | 18 | #ifdef CONFIG_TIMER_STATS |
19 | void *start_site; | 19 | void *start_site; |
20 | char start_comm[16]; | 20 | char start_comm[16]; |
@@ -22,7 +22,7 @@ struct timer_list { | |||
22 | #endif | 22 | #endif |
23 | }; | 23 | }; |
24 | 24 | ||
25 | extern struct tvec_t_base_s boot_tvec_bases; | 25 | extern struct tvec_base boot_tvec_bases; |
26 | 26 | ||
27 | #define TIMER_INITIALIZER(_function, _expires, _data) { \ | 27 | #define TIMER_INITIALIZER(_function, _expires, _data) { \ |
28 | .function = (_function), \ | 28 | .function = (_function), \ |